Add local version of JDK-8351500 for early interim release before 21.0.8

- Resolves: RHEL-90309
This commit is contained in:
Andrew Hughes 2025-05-08 19:07:47 +01:00
parent 3f11c93f7c
commit fa21a3fa7d
2 changed files with 286 additions and 2 deletions

View File

@ -366,9 +366,9 @@
%global top_level_dir_name %{vcstag}
%global top_level_dir_name_backup %{top_level_dir_name}-backup
%global buildver 6
%global rpmrelease 1
%global rpmrelease 2
# Settings used by the portable build
%global portablerelease 1
%global portablerelease 2
# Portable suffix differs between RHEL and CentOS
%if 0%{?centos} == 0
%global portablesuffix %{?pkgos:el7_9}%{!?pkgos:el8}
@ -1450,6 +1450,10 @@ Patch1001: fips-%{featurever}u-%{fipsver}.patch
#
#############################################
# JDK-8351500: G1: NUMA migrations cause crashes in region allocation
# Upstream in 21.0.8+1
Patch2001: jdk8351500-numa_migration_crashes.patch
#############################################
#
# Portable build specific patches
@ -1917,6 +1921,8 @@ sh %{SOURCE12} %{top_level_dir_name}
pushd %{top_level_dir_name}
# Add crypto policy and FIPS support
%patch -P1001 -p1
# Add early JDK-8351500
%patch -P2001 -p1
popd # openjdk
@ -2561,6 +2567,10 @@ cjc.mainProgram(args)
%endif
%changelog
* Thu May 08 2025 Andrew Hughes <gnu.andrew@redhat.com> - 1:21.0.7.0.6-2
- Add local version of JDK-8351500 for early interim release before 21.0.8
- Resolves: RHEL-90309
* Fri Apr 11 2025 Andrew Hughes <gnu.andrew@redhat.com> - 1:21.0.7.0.6-1
- Update to jdk-21.0.7+6 (GA)
- Update release notes to 21.0.7+6

View File

@ -0,0 +1,274 @@
commit c5c0ac6140ec5241d949bc66f54469bfa516c60c
Author: Thomas Stuefe <stuefe@openjdk.org>
Date: Thu Mar 27 17:37:44 2025 +0000
8351500: G1: NUMA migrations cause crashes in region allocation
Reviewed-by: sjohanss
Backport-of: 37ec796255ae857588a5c7e0d572407dd81cbec9
diff --git a/src/hotspot/share/gc/g1/g1Allocator.cpp b/src/hotspot/share/gc/g1/g1Allocator.cpp
index 5b33e24de75..23c7ee839d8 100644
--- a/src/hotspot/share/gc/g1/g1Allocator.cpp
+++ b/src/hotspot/share/gc/g1/g1Allocator.cpp
@@ -212,10 +212,10 @@ size_t G1Allocator::used_in_alloc_regions() {
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
- size_t word_size,
- uint node_index) {
+ uint node_index,
+ size_t word_size) {
size_t temp = 0;
- HeapWord* result = par_allocate_during_gc(dest, word_size, word_size, &temp, node_index);
+ HeapWord* result = par_allocate_during_gc(dest, node_index, word_size, word_size, &temp);
assert(result == nullptr || temp == word_size,
"Requested " SIZE_FORMAT " words, but got " SIZE_FORMAT " at " PTR_FORMAT,
word_size, temp, p2i(result));
@@ -223,13 +223,13 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
}
HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
+ uint node_index,
size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- uint node_index) {
+ size_t* actual_word_size) {
switch (dest.type()) {
case G1HeapRegionAttr::Young:
- return survivor_attempt_allocation(min_word_size, desired_word_size, actual_word_size, node_index);
+ return survivor_attempt_allocation(node_index, min_word_size, desired_word_size, actual_word_size);
case G1HeapRegionAttr::Old:
return old_attempt_allocation(min_word_size, desired_word_size, actual_word_size);
default:
@@ -238,10 +238,10 @@ HeapWord* G1Allocator::par_allocate_during_gc(G1HeapRegionAttr dest,
}
}
-HeapWord* G1Allocator::survivor_attempt_allocation(size_t min_word_size,
+HeapWord* G1Allocator::survivor_attempt_allocation(uint node_index,
+ size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- uint node_index) {
+ size_t* actual_word_size) {
assert(!_g1h->is_humongous(desired_word_size),
"we should not be seeing humongous-size allocations in this path");
@@ -396,10 +396,10 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
size_t actual_plab_size = 0;
HeapWord* buf = _allocator->par_allocate_during_gc(dest,
+ node_index,
required_in_plab,
plab_word_size,
- &actual_plab_size,
- node_index);
+ &actual_plab_size);
assert(buf == nullptr || ((actual_plab_size >= required_in_plab) && (actual_plab_size <= plab_word_size)),
"Requested at minimum %zu, desired %zu words, but got %zu at " PTR_FORMAT,
@@ -418,7 +418,7 @@ HeapWord* G1PLABAllocator::allocate_direct_or_new_plab(G1HeapRegionAttr dest,
*plab_refill_failed = true;
}
// Try direct allocation.
- HeapWord* result = _allocator->par_allocate_during_gc(dest, word_sz, node_index);
+ HeapWord* result = _allocator->par_allocate_during_gc(dest, node_index, word_sz);
if (result != nullptr) {
plab_data->_direct_allocated += word_sz;
plab_data->_num_direct_allocations++;
diff --git a/src/hotspot/share/gc/g1/g1Allocator.hpp b/src/hotspot/share/gc/g1/g1Allocator.hpp
index 32f37778a18..b2828d64d87 100644
--- a/src/hotspot/share/gc/g1/g1Allocator.hpp
+++ b/src/hotspot/share/gc/g1/g1Allocator.hpp
@@ -78,19 +78,16 @@ class G1Allocator : public CHeapObj<mtGC> {
inline OldGCAllocRegion* old_gc_alloc_region();
// Allocation attempt during GC for a survivor object / PLAB.
- HeapWord* survivor_attempt_allocation(size_t min_word_size,
+ HeapWord* survivor_attempt_allocation(uint node_index,
+ size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- uint node_index);
+ size_t* actual_word_size);
// Allocation attempt during GC for an old object / PLAB.
HeapWord* old_attempt_allocation(size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
- // Node index of current thread.
- inline uint current_node_index() const;
-
public:
G1Allocator(G1CollectedHeap* heap);
~G1Allocator();
@@ -110,18 +107,22 @@ class G1Allocator : public CHeapObj<mtGC> {
void abandon_gc_alloc_regions();
bool is_retained_old_region(HeapRegion* hr);
+ // Node index of current thread.
+ inline uint current_node_index() const;
+
// Allocate blocks of memory during mutator time.
// Attempt allocation in the current alloc region.
- inline HeapWord* attempt_allocation(size_t min_word_size,
+ inline HeapWord* attempt_allocation(uint node_index,
+ size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size);
// This is to be called when holding an appropriate lock. It first tries in the
// current allocation region, and then attempts an allocation using a new region.
- inline HeapWord* attempt_allocation_locked(size_t word_size);
+ inline HeapWord* attempt_allocation_locked(uint node_index, size_t word_size);
- inline HeapWord* attempt_allocation_force(size_t word_size);
+ inline HeapWord* attempt_allocation_force(uint node_index, size_t word_size);
size_t unsafe_max_tlab_alloc();
size_t used_in_alloc_regions();
@@ -131,14 +132,15 @@ class G1Allocator : public CHeapObj<mtGC> {
// heap, and then allocate a block of the given size. The block
// may not be a humongous - it must fit into a single heap region.
HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
- size_t word_size,
- uint node_index);
+ uint node_index,
+ size_t word_size
+ );
HeapWord* par_allocate_during_gc(G1HeapRegionAttr dest,
+ uint node_index,
size_t min_word_size,
size_t desired_word_size,
- size_t* actual_word_size,
- uint node_index);
+ size_t* actual_word_size);
};
// Manages the PLABs used during garbage collection. Interface for allocation from PLABs.
diff --git a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp
index 13ae9b9bbbd..7e516b48faa 100644
--- a/src/hotspot/share/gc/g1/g1Allocator.inline.hpp
+++ b/src/hotspot/share/gc/g1/g1Allocator.inline.hpp
@@ -49,11 +49,10 @@ inline OldGCAllocRegion* G1Allocator::old_gc_alloc_region() {
return &_old_gc_alloc_region;
}
-inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size,
+inline HeapWord* G1Allocator::attempt_allocation(uint node_index,
+ size_t min_word_size,
size_t desired_word_size,
size_t* actual_word_size) {
- uint node_index = current_node_index();
-
HeapWord* result = mutator_alloc_region(node_index)->attempt_retained_allocation(min_word_size, desired_word_size, actual_word_size);
if (result != nullptr) {
return result;
@@ -62,8 +61,7 @@ inline HeapWord* G1Allocator::attempt_allocation(size_t min_word_size,
return mutator_alloc_region(node_index)->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
}
-inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
- uint node_index = current_node_index();
+inline HeapWord* G1Allocator::attempt_allocation_locked(uint node_index, size_t word_size) {
HeapWord* result = mutator_alloc_region(node_index)->attempt_allocation_locked(word_size);
assert(result != nullptr || mutator_alloc_region(node_index)->get() == nullptr,
@@ -71,8 +69,7 @@ inline HeapWord* G1Allocator::attempt_allocation_locked(size_t word_size) {
return result;
}
-inline HeapWord* G1Allocator::attempt_allocation_force(size_t word_size) {
- uint node_index = current_node_index();
+inline HeapWord* G1Allocator::attempt_allocation_force(uint node_index, size_t word_size) {
return mutator_alloc_region(node_index)->attempt_allocation_force(word_size);
}
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
index 9b6b22a29e7..a43e45c4afa 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp
@@ -401,7 +401,7 @@ G1CollectedHeap::mem_allocate(size_t word_size,
return attempt_allocation(word_size, word_size, &dummy);
}
-HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
+HeapWord* G1CollectedHeap::attempt_allocation_slow(uint node_index, size_t word_size) {
ResourceMark rm; // For retrieving the thread names in log messages.
// Make sure you read the note in attempt_allocation_humongous().
@@ -427,7 +427,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
// Now that we have the lock, we first retry the allocation in case another
// thread changed the region while we were waiting to acquire the lock.
- result = _allocator->attempt_allocation_locked(word_size);
+ result = _allocator->attempt_allocation_locked(node_index, word_size);
if (result != nullptr) {
return result;
}
@@ -438,7 +438,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
if (GCLocker::is_active_and_needs_gc() && policy()->can_expand_young_list()) {
// No need for an ergo message here, can_expand_young_list() does this when
// it returns true.
- result = _allocator->attempt_allocation_force(word_size);
+ result = _allocator->attempt_allocation_force(node_index, word_size);
if (result != nullptr) {
return result;
}
@@ -495,7 +495,7 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
// follow-on attempt will be at the start of the next loop
// iteration (after taking the Heap_lock).
size_t dummy = 0;
- result = _allocator->attempt_allocation(word_size, word_size, &dummy);
+ result = _allocator->attempt_allocation(node_index, word_size, word_size, &dummy);
if (result != nullptr) {
return result;
}
@@ -636,11 +636,14 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
"be called for humongous allocation requests");
- HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
+ // Fix NUMA node association for the duration of this allocation
+ const uint node_index = _allocator->current_node_index();
+
+ HeapWord* result = _allocator->attempt_allocation(node_index, min_word_size, desired_word_size, actual_word_size);
if (result == nullptr) {
*actual_word_size = desired_word_size;
- result = attempt_allocation_slow(desired_word_size);
+ result = attempt_allocation_slow(node_index, desired_word_size);
}
assert_heap_not_locked();
@@ -778,8 +781,11 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
assert(!_allocator->has_mutator_alloc_region() || !expect_null_mutator_alloc_region,
"the current alloc region was unexpectedly found to be non-null");
+ // Fix NUMA node association for the duration of this allocation
+ const uint node_index = _allocator->current_node_index();
+
if (!is_humongous(word_size)) {
- return _allocator->attempt_allocation_locked(word_size);
+ return _allocator->attempt_allocation_locked(node_index, word_size);
} else {
HeapWord* result = humongous_obj_allocate(word_size);
if (result != nullptr && policy()->need_to_start_conc_mark("STW humongous allocation")) {
diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
index 88f4f680800..6afaeb6e6d4 100644
--- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
+++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp
@@ -453,7 +453,7 @@ class G1CollectedHeap : public CollectedHeap {
// Second-level mutator allocation attempt: take the Heap_lock and
// retry the allocation attempt, potentially scheduling a GC
// pause. This should only be used for non-humongous allocations.
- HeapWord* attempt_allocation_slow(size_t word_size);
+ HeapWord* attempt_allocation_slow(uint node_index, size_t word_size);
// Takes the Heap_lock and attempts a humongous allocation. It can
// potentially schedule a GC pause.