diff --git a/glibc-RHEL-24168-1.patch b/glibc-RHEL-24168-1.patch
new file mode 100644
index 0000000..b645f32
--- /dev/null
+++ b/glibc-RHEL-24168-1.patch
@@ -0,0 +1,226 @@
+Downstream-only patch to add arc4random to support/ for use in qsort
+testing.
+
+The arc4random implementation is up-to-date with upstream commit
+2642002380aafb71a1d3b569b6d7ebeab3284816, with minor changes to keep
+everything self-contained within support infrastructure. Unlike the
+upstream version, this implementation is a cancellation point.
+
+diff --git a/support/Makefile b/support/Makefile
+index d6d03c2ed3af3e6d..bffcb06d7185d674 100644
+--- a/support/Makefile
++++ b/support/Makefile
+@@ -41,6 +41,8 @@ libsupport-routines = \
+ resolv_response_context_free \
+ resolv_test \
+ set_fortify_handler \
++ support-arc4random \
++ support-arc4random_uniform \
+ support-open-dev-null-range \
+ support_become_root \
+ support_can_chroot \
+diff --git a/support/support-arc4random.c b/support/support-arc4random.c
+new file mode 100644
+index 0000000000000000..c4462b098c68cef5
+--- /dev/null
++++ b/support/support-arc4random.c
+@@ -0,0 +1,99 @@
++/* Pseudo Random Number Generator
++ Copyright (C) 2022-2025 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++static void
++arc4random_getrandom_failure (void)
++{
++ __libc_fatal ("Fatal glibc error: cannot get entropy for arc4random\n");
++}
++
++void
++arc4random_buf (void *p, size_t n)
++{
++ static int seen_initialized;
++ ssize_t l;
++ int fd;
++
++ if (n == 0)
++ return;
++
++ for (;;)
++ {
++ l = TEMP_FAILURE_RETRY (getrandom (p, n, 0));
++ if (l > 0)
++ {
++ if ((size_t) l == n)
++ return; /* Done reading, success. */
++ p = (uint8_t *) p + l;
++ n -= l;
++ continue; /* Interrupted by a signal; keep going. */
++ }
++ else if (l < 0 && errno == ENOSYS)
++ break; /* No syscall, so fallback to /dev/urandom. */
++ arc4random_getrandom_failure ();
++ }
++
++ if (atomic_load_relaxed (&seen_initialized) == 0)
++ {
++ /* Poll /dev/random as an approximation of RNG initialization. */
++ struct pollfd pfd = { .events = POLLIN };
++ pfd.fd = TEMP_FAILURE_RETRY (
++ __open64_nocancel ("/dev/random", O_RDONLY | O_CLOEXEC | O_NOCTTY));
++ if (pfd.fd < 0)
++ arc4random_getrandom_failure ();
++ if (TEMP_FAILURE_RETRY (poll (&pfd, 1, -1)) < 0)
++ arc4random_getrandom_failure ();
++ if (__close_nocancel (pfd.fd) < 0)
++ arc4random_getrandom_failure ();
++ atomic_store_relaxed (&seen_initialized, 1);
++ }
++
++ fd = TEMP_FAILURE_RETRY (
++ __open64_nocancel ("/dev/urandom", O_RDONLY | O_CLOEXEC | O_NOCTTY));
++ if (fd < 0)
++ arc4random_getrandom_failure ();
++ for (;;)
++ {
++ l = TEMP_FAILURE_RETRY (__read_nocancel (fd, p, n));
++ if (l <= 0)
++ arc4random_getrandom_failure ();
++ if ((size_t) l == n)
++ break; /* Done reading, success. */
++ p = (uint8_t *) p + l;
++ n -= l;
++ }
++ if (__close_nocancel (fd) < 0)
++ arc4random_getrandom_failure ();
++}
++
++uint32_t
++arc4random (void)
++{
++ uint32_t r;
++ arc4random_buf (&r, sizeof (r));
++ return r;
++}
+diff --git a/support/support-arc4random_uniform.c b/support/support-arc4random_uniform.c
+new file mode 100644
+index 0000000000000000..20108e7409cca81b
+--- /dev/null
++++ b/support/support-arc4random_uniform.c
+@@ -0,0 +1,70 @@
++/* Random pseudo generator number which returns a single 32 bit value
++ uniformly distributed but with an upper_bound.
++ Copyright (C) 2022-2025 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++#include
++#include
++#include
++
++/* Return a uniformly distributed random number less than N. The algorithm
++ calculates a mask being the lowest power of two bounding the upper bound
++ N, successively queries new random values, and rejects values outside of
++ the request range.
++
++ For reject values, it also tries if the remaining entropy could fit on
++ the asked range after range adjustment.
++
++ The algorithm avoids modulo and divide operations, which might be costly
++ depending on the architecture. */
++uint32_t
++arc4random_uniform (uint32_t n)
++{
++ if (n <= 1)
++ /* There is no valid return value for a zero limit, and 0 is the
++ only possible result for limit 1. */
++ return 0;
++
++ /* Powers of two are easy. */
++ if (powerof2 (n))
++ return arc4random () & (n - 1);
++
++ /* mask is the smallest power of 2 minus 1 number larger than n. */
++ int z = __builtin_clz (n);
++ uint32_t mask = ~UINT32_C(0) >> z;
++ int bits = CHAR_BIT * sizeof (uint32_t) - z;
++
++ while (1)
++ {
++ uint32_t value = arc4random ();
++
++ /* Return if the lower power of 2 minus 1 satisfy the condition. */
++ uint32_t r = value & mask;
++ if (r < n)
++ return r;
++
++ /* Otherwise check if remaining bits of entropy provides fits in the
++ bound. */
++ for (int bits_left = z; bits_left >= bits; bits_left -= bits)
++ {
++ value >>= bits;
++ r = value & mask;
++ if (r < n)
++ return r;
++ }
++ }
++}
+diff --git a/support/support.h b/support/support.h
+index b69f588e2edce6be..ed7862daf9e4120a 100644
+--- a/support/support.h
++++ b/support/support.h
+@@ -220,6 +220,19 @@ void support_stack_free (struct support_stack *stack);
+ The returned value is the lowest file descriptor number. */
+ int support_open_dev_null_range (int num, int flags, mode_t mode);
+
++/* Return a random integer between zero and 2**32-1 (inclusive). */
++extern uint32_t arc4random (void)
++ __THROW __wur;
++
++/* Fill the buffer with random data. */
++extern void arc4random_buf (void *__buf, size_t __size)
++ __THROW __nonnull ((1));
++
++/* Return a random number between zero (inclusive) and the specified
++ limit (exclusive). */
++extern uint32_t arc4random_uniform (__uint32_t __upper_bound)
++ __THROW __wur;
++
+ __END_DECLS
+
+ #endif /* SUPPORT_H */
diff --git a/glibc-RHEL-24168-10.patch b/glibc-RHEL-24168-10.patch
new file mode 100644
index 0000000..ae3fc8a
--- /dev/null
+++ b/glibc-RHEL-24168-10.patch
@@ -0,0 +1,43 @@
+commit f8cfb6836e8d91bb789b2e7fd65338d6f5bd459c
+Author: Florian Weimer
+Date: Wed Nov 8 15:18:02 2023 +0100
+
+ stdlib: Avoid element self-comparisons in qsort
+
+ This improves compatibility with applications which assume that qsort
+ does not invoke the comparison function with equal pointer arguments.
+
+ The newly introduced branches should be predictable, as leading to a
+ call to the comparison function. If the prediction fails, we avoid
+ calling the function.
+
+ Reviewed-by: Adhemerval Zanella
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index cb1619aa0ae7de72..2ee39e2c492f792e 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -137,7 +137,7 @@ siftdown (void *base, size_t size, size_t k, size_t n,
+ if (j < n && cmp (base + (j * size), base + ((j + 1) * size), arg) < 0)
+ j++;
+
+- if (cmp (base + (k * size), base + (j * size), arg) >= 0)
++ if (j == k || cmp (base + (k * size), base + (j * size), arg) >= 0)
+ break;
+
+ do_swap (base + (size * j), base + (k * size), size, swap_type);
+@@ -333,10 +333,12 @@ __qsort_r (void *const pbase, size_t total_elems, size_t size,
+ that this algorithm runs much faster than others. */
+ do
+ {
+- while ((*cmp) ((void *) left_ptr, (void *) mid, arg) < 0)
++ while (left_ptr != mid
++ && (*cmp) ((void *) left_ptr, (void *) mid, arg) < 0)
+ left_ptr += size;
+
+- while ((*cmp) ((void *) mid, (void *) right_ptr, arg) < 0)
++ while (right_ptr != mid
++ && (*cmp) ((void *) mid, (void *) right_ptr, arg) < 0)
+ right_ptr -= size;
+
+ if (left_ptr < right_ptr)
diff --git a/glibc-RHEL-24168-11.patch b/glibc-RHEL-24168-11.patch
new file mode 100644
index 0000000..ac24cde
--- /dev/null
+++ b/glibc-RHEL-24168-11.patch
@@ -0,0 +1,25 @@
+commit e4d8117b82065dc72e8df80097360e7c05a349b9
+Author: Florian Weimer
+Date: Tue Nov 21 16:45:35 2023 +0100
+
+ stdlib: Avoid another self-comparison in qsort
+
+ In the insertion phase, we could run off the start of the array if the
+ comparison function never runs zero. In that case, it never finds the
+ initial element that terminates the iteration.
+
+ Reviewed-by: Adhemerval Zanella
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 2ee39e2c492f792e..0d5f8b92e8072965 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -218,7 +218,7 @@ insertion_sort_qsort_partitions (void *const pbase, size_t total_elems,
+ while ((run_ptr += size) <= end_ptr)
+ {
+ tmp_ptr = run_ptr - size;
+- while (cmp (run_ptr, tmp_ptr, arg) < 0)
++ while (run_ptr != tmp_ptr && cmp (run_ptr, tmp_ptr, arg) < 0)
+ tmp_ptr -= size;
+
+ tmp_ptr += size;
diff --git a/glibc-RHEL-24168-12.patch b/glibc-RHEL-24168-12.patch
new file mode 100644
index 0000000..e43ecde
--- /dev/null
+++ b/glibc-RHEL-24168-12.patch
@@ -0,0 +1,273 @@
+commit 55364e1f7dfab372f0710513c4d1c967c4965f71
+Author: Florian Weimer
+Date: Tue Nov 21 16:45:35 2023 +0100
+
+ stdlib: Handle various corner cases in the fallback heapsort for qsort
+
+ The previous implementation did not consistently apply the rule that
+ the child nodes of node K are at 2 * K + 1 and 2 * K + 2, or
+ that the parent node is at (K - 1) / 2.
+
+ Add an internal test that targets the heapsort implementation
+ directly.
+
+ Reported-by: Stepan Golosunov
+ Reviewed-by: Adhemerval Zanella
+
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index 4039e5395eeea2b0..ee005ce8caa48abe 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -254,6 +254,7 @@ tests := \
+ # tests
+
+ tests-internal := \
++ tst-qsort4 \
+ tst-strtod1i \
+ tst-strtod3 \
+ tst-strtod4 \
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 0d5f8b92e8072965..b207c12d2f0a38cc 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -126,29 +126,44 @@ pop (stack_node *top, char **lo, char **hi, size_t *depth)
+ return top;
+ }
+
+-/* NB: N is inclusive bound for BASE. */
++/* Establish the heap condition at index K, that is, the key at K will
++ not be less than either of its children, at 2 * K + 1 and 2 * K + 2
++ (if they exist). N is the last valid index. */
+ static inline void
+ siftdown (void *base, size_t size, size_t k, size_t n,
+ enum swap_type_t swap_type, __compar_d_fn_t cmp, void *arg)
+ {
+- while (k <= n / 2)
++ /* There can only be a heap condition violation if there are
++ children. */
++ while (2 * k + 1 <= n)
+ {
+- size_t j = 2 * k;
++ /* Left child. */
++ size_t j = 2 * k + 1;
++ /* If the right child is larger, use it. */
+ if (j < n && cmp (base + (j * size), base + ((j + 1) * size), arg) < 0)
+ j++;
+
++ /* If k is already >= to its children, we are done. */
+ if (j == k || cmp (base + (k * size), base + (j * size), arg) >= 0)
+ break;
+
++ /* Heal the violation. */
+ do_swap (base + (size * j), base + (k * size), size, swap_type);
++
++ /* Swapping with j may have introduced a violation at j. Fix
++ it in the next loop iteration. */
+ k = j;
+ }
+ }
+
++/* Establish the heap condition for the indices 0 to N (inclusive). */
+ static inline void
+ heapify (void *base, size_t size, size_t n, enum swap_type_t swap_type,
+ __compar_d_fn_t cmp, void *arg)
+ {
++ /* If n is odd, k = n / 2 has a left child at n, so this is the
++ largest index that can have a heap condition violation regarding
++ its children. */
+ size_t k = n / 2;
+ while (1)
+ {
+@@ -158,32 +173,38 @@ heapify (void *base, size_t size, size_t n, enum swap_type_t swap_type,
+ }
+ }
+
+-/* A non-recursive heapsort, used on introsort implementation as a fallback
+- routine with worst-case performance of O(nlog n) and worst-case space
+- complexity of O(1). It sorts the array starting at BASE and ending at
+- END, with each element of SIZE bytes. The SWAP_TYPE is the callback
+- function used to swap elements, and CMP is the function used to compare
+- elements. */
++/* A non-recursive heapsort, used on introsort implementation as a
++ fallback routine with worst-case performance of O(nlog n) and
++ worst-case space complexity of O(1). It sorts the array starting
++ at BASE and ending at END (inclusive), with each element of SIZE
++ bytes. The SWAP_TYPE is the callback function used to swap
++ elements, and CMP is the function used to compare elements. */
+ static void
+ heapsort_r (void *base, void *end, size_t size, enum swap_type_t swap_type,
+ __compar_d_fn_t cmp, void *arg)
+ {
+- const size_t count = ((uintptr_t) end - (uintptr_t) base) / size;
+-
+- if (count < 2)
++ size_t n = ((uintptr_t) end - (uintptr_t) base) / size;
++ if (n <= 1)
++ /* Handled by insertion sort. */
+ return;
+
+- size_t n = count - 1;
+-
+ /* Build the binary heap, largest value at the base[0]. */
+ heapify (base, size, n, swap_type, cmp, arg);
+
+- /* On each iteration base[0:n] is the binary heap, while base[n:count]
+- is sorted. */
+- while (n > 0)
++ while (true)
+ {
++ /* Indices 0 .. n contain the binary heap. Extract the largest
++ element put it into the final position in the array. */
+ do_swap (base, base + (n * size), size, swap_type);
++
++ /* The heap is now one element shorter. */
+ n--;
++ if (n == 0)
++ break;
++
++ /* By swapping in elements 0 and the previous value of n (now at
++ n + 1), we likely introduced a heap condition violation. Fix
++ it for the reduced heap. */
+ siftdown (base, size, 0, n, swap_type, cmp, arg);
+ }
+ }
+diff --git a/stdlib/tst-qsort4.c b/stdlib/tst-qsort4.c
+new file mode 100644
+index 0000000000000000..a7abaa1a37461666
+--- /dev/null
++++ b/stdlib/tst-qsort4.c
+@@ -0,0 +1,134 @@
++/* Test the heapsort implementation behind qsort.
++ Copyright (C) 2023 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++#include "qsort.c"
++
++#include
++#include
++#include
++
++static int
++cmp (const void *a1, const void *b1, void *closure)
++{
++ const signed char *a = a1;
++ const signed char *b = b1;
++ return *a - *b;
++}
++
++/* Wrapper around heapsort_r that set ups the required variables. */
++static void
++heapsort_wrapper (void *const pbase, size_t total_elems, size_t size,
++ __compar_d_fn_t cmp, void *arg)
++{
++ char *base_ptr = (char *) pbase;
++ char *lo = base_ptr;
++ char *hi = &lo[size * (total_elems - 1)];
++
++ if (total_elems <= 1)
++ /* Avoid lossage with unsigned arithmetic below. */
++ return;
++
++ enum swap_type_t swap_type;
++ if (is_aligned (pbase, size, 8))
++ swap_type = SWAP_WORDS_64;
++ else if (is_aligned (pbase, size, 4))
++ swap_type = SWAP_WORDS_32;
++ else
++ swap_type = SWAP_BYTES;
++ heapsort_r (lo, hi, size, swap_type, cmp, arg);
++}
++
++static void
++check_one_sort (signed char *array, int length)
++{
++ signed char *copy = xmalloc (length);
++ memcpy (copy, array, length);
++ heapsort_wrapper (copy, length, 1, cmp, NULL);
++
++ /* Verify that the result is sorted. */
++ for (int i = 1; i < length; ++i)
++ if (copy[i] < copy[i - 1])
++ {
++ support_record_failure ();
++ printf ("error: sorting failure for length %d at offset %d\n",
++ length, i - 1);
++ printf ("input:");
++ for (int i = 0; i < length; ++i)
++ printf (" %d", array[i]);
++ printf ("\noutput:");
++ for (int i = 0; i < length; ++i)
++ printf (" %d", copy[i]);
++ putchar ('\n');
++ break;
++ }
++
++ /* Verify that no elements went away or were added. */
++ {
++ int expected_counts[256];
++ for (int i = 0; i < length; ++i)
++ ++expected_counts[array[i] & 0xff];
++ int actual_counts[256];
++ for (int i = 0; i < length; ++i)
++ ++actual_counts[copy[i] & 0xff];
++ for (int i = 0; i < 256; ++i)
++ TEST_COMPARE (expected_counts[i], expected_counts[i]);
++ }
++
++ free (copy);
++}
++
++/* Enumerate all possible combinations of LENGTH elements. */
++static void
++check_combinations (int length, signed char *start, int offset)
++{
++ if (offset == length)
++ check_one_sort (start, length);
++ else
++ for (int i = 0; i < length; ++i)
++ {
++ start[offset] = i;
++ check_combinations(length, start, offset + 1);
++ }
++}
++
++static int
++do_test (void)
++{
++ /* A random permutation of 20 values. */
++ check_one_sort ((signed char[20]) {5, 12, 16, 10, 14, 11, 9, 13, 8, 15,
++ 0, 17, 3, 7, 1, 18, 2, 19, 4, 6}, 20);
++
++
++ /* A permutation that appeared during adversarial testing for the
++ quicksort pass. */
++ check_one_sort ((signed char[16]) {15, 3, 4, 2, 1, 0, 8, 7, 6, 5, 14,
++ 13, 12, 11, 10, 9}, 16);
++
++ /* Array lengths 2 and less are not handled by heapsort_r and
++ deferred to insertion sort. */
++ for (int i = 3; i <= 8; ++i)
++ {
++ signed char *buf = xmalloc (i);
++ check_combinations (i, buf, 0);
++ free (buf);
++ }
++
++ return 0;
++}
++
++#include
diff --git a/glibc-RHEL-24168-13.patch b/glibc-RHEL-24168-13.patch
new file mode 100644
index 0000000..9574444
--- /dev/null
+++ b/glibc-RHEL-24168-13.patch
@@ -0,0 +1,248 @@
+commit 64e4acf24da15c11cb83f933947df3b2e8a700cd
+Author: Florian Weimer
+Date: Tue Nov 21 16:45:35 2023 +0100
+
+ stdlib: The qsort implementation needs to use heapsort in more cases
+
+ The existing logic avoided internal stack overflow. To avoid
+ a denial-of-service condition with adversarial input, it is necessary
+ to fall over to heapsort if tail-recursing deeply, too, which does
+ not result in a deep stack of pending partitions.
+
+ The new test stdlib/tst-qsort5 is based on Douglas McIlroy's paper
+ on this subject.
+
+ Reviewed-by: Adhemerval Zanella
+
+Conflicts:
+ stdlib/Makefile: Adjust for getenv tests in glibc-RHEL-67692-4.patch.
+
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index ee005ce8caa48abe..a1a511da37f0c18e 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -212,6 +212,7 @@ tests := \
+ tst-qsort \
+ tst-qsort2 \
+ tst-qsort3 \
++ tst-qsort5 \
+ tst-quick_exit \
+ tst-rand48 \
+ tst-rand48-2 \
+@@ -483,6 +484,7 @@ $(objpfx)tst-setcontext3.out: tst-setcontext3.sh $(objpfx)tst-setcontext3
+ $(common-objpfx)stdlib/; \
+ $(evaluate-test)
+
++$(objpfx)tst-qsort5: $(libm)
+ $(objpfx)tst-getenv-signal: $(shared-thread-library)
+ $(objpfx)tst-getenv-thread: $(shared-thread-library)
+ $(objpfx)tst-getenv-unsetenv: $(shared-thread-library)
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index b207c12d2f0a38cc..df8d0012c759e509 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -390,14 +390,23 @@ __qsort_r (void *const pbase, size_t total_elems, size_t size,
+ {
+ if ((size_t) (hi - left_ptr) <= max_thresh)
+ /* Ignore both small partitions. */
+- top = pop (top, &lo, &hi, &depth);
++ {
++ top = pop (top, &lo, &hi, &depth);
++ --depth;
++ }
+ else
+- /* Ignore small left partition. */
+- lo = left_ptr;
++ {
++ /* Ignore small left partition. */
++ lo = left_ptr;
++ --depth;
++ }
+ }
+ else if ((size_t) (hi - left_ptr) <= max_thresh)
+ /* Ignore small right partition. */
+- hi = right_ptr;
++ {
++ hi = right_ptr;
++ --depth;
++ }
+ else if ((right_ptr - lo) > (hi - left_ptr))
+ {
+ /* Push larger left partition indices. */
+diff --git a/stdlib/tst-qsort5.c b/stdlib/tst-qsort5.c
+new file mode 100644
+index 0000000000000000..d3a88c30f8ffb135
+--- /dev/null
++++ b/stdlib/tst-qsort5.c
+@@ -0,0 +1,171 @@
++/* Adversarial test for qsort_r.
++ Copyright (C) 2023 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++/* The approach follows Douglas McIlroy, A Killer Adversary for
++ Quicksort. Software—Practice and Experience 29 (1999) 341-344.
++ Downloaded
++ (2023-11-17). */
++
++#include
++#include
++#include
++#include
++#include
++
++struct context
++{
++ /* Called the gas value in the paper. This value is larger than all
++ other values (length minus one will do), so comparison with any
++ decided value has a known result. */
++ int undecided_value;
++
++ /* If comparing undecided values, one of them as to be assigned a
++ value to ensure consistency with future comparisons. This is the
++ value that will be used. Starts out at zero. */
++ int next_decided;
++
++ /* Used to trick pivot selection. Deciding the value for the last
++ seen undcided value in a decided/undecided comparison happens
++ to trick the many qsort implementations. */
++ int last_undecided_index;
++
++ /* This array contains the actually asigned values. The call to
++ qsort_r sorts a different array that contains indices into this
++ array. */
++ int *decided_values;
++};
++
++static int
++compare_opponent (const void *l1, const void *r1, void *ctx1)
++{
++ const int *l = l1;
++ const int *r = r1;
++ struct context *ctx = ctx1;
++ int rvalue = ctx->decided_values[*r];
++ int lvalue = ctx->decided_values[*l];
++
++ if (lvalue == ctx->undecided_value)
++ {
++ if (rvalue == ctx->undecided_value)
++ {
++ /* Both values are undecided. In this case, make a decision
++ for the last-used undecided value. This is tweak is very
++ specific to quicksort. */
++ if (*l == ctx->last_undecided_index)
++ {
++ ctx->decided_values[*l] = ctx->next_decided;
++ ++ctx->next_decided;
++ /* The undecided value or *r is greater. */
++ return -1;
++ }
++ else
++ {
++ ctx->decided_values[*r] = ctx->next_decided;
++ ++ctx->next_decided;
++ /* The undecided value for *l is greater. */
++ return 1;
++ }
++ }
++ else
++ {
++ ctx->last_undecided_index = *l;
++ return 1;
++ }
++ }
++ else
++ {
++ /* *l is a decided value. */
++ if (rvalue == ctx->undecided_value)
++ {
++ ctx->last_undecided_index = *r;
++ /* The undecided value for *r is greater. */
++ return -1;
++ }
++ else
++ return lvalue - rvalue;
++ }
++}
++
++/* Return a pointer to the adversarial permutation of length N. */
++static int *
++create_permutation (size_t n)
++{
++ struct context ctx =
++ {
++ .undecided_value = n - 1, /* Larger than all other values. */
++ .decided_values = xcalloc (n, sizeof (int)),
++ };
++ for (size_t i = 0; i < n; ++i)
++ ctx.decided_values[i] = ctx.undecided_value;
++ int *scratch = xcalloc (n, sizeof (int));
++ for (size_t i = 0; i < n; ++i)
++ scratch[i] = i;
++ qsort_r (scratch, n, sizeof (*scratch), compare_opponent, &ctx);
++ free (scratch);
++ return ctx.decided_values;
++}
++
++/* Callback function for qsort which counts the number of invocations
++ in *CLOSURE. */
++static int
++compare_counter (const void *l1, const void *r1, void *closure)
++{
++ const int *l = l1;
++ const int *r = r1;
++ unsigned long long int *counter = closure;
++ ++*counter;
++ return *l - *r;
++}
++
++/* Count the comparisons required for an adversarial permutation of
++ length N. */
++static unsigned long long int
++count_comparisons (size_t n)
++{
++ int *array = create_permutation (n);
++ unsigned long long int counter = 0;
++ qsort_r (array, n, sizeof (*array), compare_counter, &counter);
++ free (array);
++ return counter;
++}
++
++/* Check the scaling factor for one adversarial permutation of length
++ N, and report some statistics. */
++static void
++check_one_n (size_t n)
++{
++ unsigned long long int count = count_comparisons (n);
++ double factor = count / (n * log (count));
++ printf ("info: length %zu: %llu comparisons ~ %f * n * log (n)\n",
++ n, count, factor);
++ /* This is an arbitrary factor which is true for the current
++ implementation across a wide range of sizes. */
++ TEST_VERIFY (factor <= 4.5);
++}
++
++static int
++do_test (void)
++{
++ check_one_n (100);
++ check_one_n (1000);
++ for (int i = 1; i <= 15; ++i)
++ check_one_n (i * 10 * 1000);
++ return 0;
++}
++
++#include
diff --git a/glibc-RHEL-24168-14.patch b/glibc-RHEL-24168-14.patch
new file mode 100644
index 0000000..db8cbed
--- /dev/null
+++ b/glibc-RHEL-24168-14.patch
@@ -0,0 +1,105 @@
+commit b9390ba93676c4b1e87e218af5e7e4bb596312ac
+Author: Florian Weimer
+Date: Mon Dec 4 06:35:56 2023 +0100
+
+ stdlib: Fix array bounds protection in insertion sort phase of qsort
+
+ The previous check did not do anything because tmp_ptr already
+ points before run_ptr due to the way it is initialized.
+
+ Fixes commit e4d8117b82065dc72e8df80097360e7c05a349b9
+ ("stdlib: Avoid another self-comparison in qsort").
+
+ Reviewed-by: Adhemerval Zanella
+
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index a1a511da37f0c18e..82d9d909890853b7 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -213,6 +213,7 @@ tests := \
+ tst-qsort2 \
+ tst-qsort3 \
+ tst-qsort5 \
++ tst-qsort6 \
+ tst-quick_exit \
+ tst-rand48 \
+ tst-rand48-2 \
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index df8d0012c759e509..3d5405705862ddf0 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -239,7 +239,7 @@ insertion_sort_qsort_partitions (void *const pbase, size_t total_elems,
+ while ((run_ptr += size) <= end_ptr)
+ {
+ tmp_ptr = run_ptr - size;
+- while (run_ptr != tmp_ptr && cmp (run_ptr, tmp_ptr, arg) < 0)
++ while (tmp_ptr != base_ptr && cmp (run_ptr, tmp_ptr, arg) < 0)
+ tmp_ptr -= size;
+
+ tmp_ptr += size;
+diff --git a/stdlib/tst-qsort6.c b/stdlib/tst-qsort6.c
+new file mode 100644
+index 0000000000000000..8ec0a6b633bc8398
+--- /dev/null
++++ b/stdlib/tst-qsort6.c
+@@ -0,0 +1,60 @@
++/* Test qsort with invalid comparison functions.
++ Copyright (C) 2023 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++#include
++#include
++#include
++
++/* Invalid comparison function that always returns -1. */
++static int
++invalid_compare_1 (const void *a1, const void *b1)
++{
++ const int *a = a1;
++ const int *b = b1;
++ /* Check that the marker value matches, which means that we are
++ likely within the array. */
++ TEST_COMPARE (*a, 842523635);
++ TEST_COMPARE (*b, 842523635);
++ TEST_VERIFY_EXIT (*a == 842523635);
++ TEST_VERIFY_EXIT (*b == 842523635);
++ return -1;
++}
++
++/* Invalid comparison function that always returns 1. */
++static int
++invalid_compare_2 (const void *a1, const void *b1)
++{
++ const int *a = a1;
++ const int *b = b1;
++ TEST_COMPARE (*a, 842523635);
++ TEST_COMPARE (*b, 842523635);
++ TEST_VERIFY_EXIT (*a == 842523635);
++ TEST_VERIFY_EXIT (*b == 842523635);
++ return 1;
++}
++
++static int
++do_test (void)
++{
++ int array[] = {842523635, 842523635, 842523635, 842523635, 842523635};
++ qsort (array, array_length (array), sizeof (array[0]), invalid_compare_1);
++ qsort (array, array_length (array), sizeof (array[0]), invalid_compare_2);
++ return 0;
++}
++
++#include
diff --git a/glibc-RHEL-24168-15.patch b/glibc-RHEL-24168-15.patch
new file mode 100644
index 0000000..7327901
--- /dev/null
+++ b/glibc-RHEL-24168-15.patch
@@ -0,0 +1,876 @@
+commit 709fbd3ec3595f2d1076b4fec09a739327459288
+Author: Adhemerval Zanella
+Date: Mon Jan 15 11:07:21 2024 -0300
+
+ stdlib: Reinstate stable mergesort implementation on qsort
+
+ The mergesort removal from qsort implementation (commit 03bf8357e8)
+ had the side-effect of making sorting nonstable. Although neither
+ POSIX nor C standard specify that qsort should be stable, it seems
+ that it has become an instance of Hyrum's law where multiple programs
+ expect it.
+
+ Also, the resulting introsort implementation is not faster than
+ the previous mergesort (which makes the change even less appealing).
+
+ This patch restores the previous mergesort implementation, with the
+ exception of machinery that checks the resulting allocation against
+ the _SC_PHYS_PAGES (it only adds complexity and the heuristic not
+ always make sense depending on the system configuration and load).
+ The alloca usage was replaced with a fixed-size buffer.
+
+ For the fallback mechanism, the implementation uses heapsort. It is
+ simpler than quicksort, and it does not suffer from adversarial
+ inputs. With memory overcommit, it should be rarely triggered.
+
+ The drawback is mergesort requires O(n) extra space, and since it is
+ allocated with malloc the function is AS-signal-unsafe. It should be
+ feasible to change it to use mmap, although I am not sure how urgent
+ it is. The heapsort is also nonstable, so programs that require a
+ stable sort would still be subject to this latent issue.
+
+ The tst-qsort5 is removed since it will not create quicksort adversarial
+ inputs with the current qsort_r implementation.
+
+ Checked on x86_64-linux-gnu and aarch64-linux-gnu.
+ Reviewed-by: Florian Weimer
+
+Conflicts:
+ stdlib/tst-qsort5.c: Deletion had conflicts due to copyright update.
+
+diff --git a/manual/argp.texi b/manual/argp.texi
+index b77ad68285ecb732..0023441812d4e584 100644
+--- a/manual/argp.texi
++++ b/manual/argp.texi
+@@ -735,7 +735,7 @@ for options, bad phase of the moon, etc.
+ @c hol_set_group ok
+ @c hol_find_entry ok
+ @c hol_sort @mtslocale @acucorrupt
+-@c qsort dup
++@c qsort dup @acucorrupt
+ @c hol_entry_qcmp @mtslocale
+ @c hol_entry_cmp @mtslocale
+ @c group_cmp ok
+diff --git a/manual/locale.texi b/manual/locale.texi
+index f6afa5dc44a2a016..1b3f97839bb5d068 100644
+--- a/manual/locale.texi
++++ b/manual/locale.texi
+@@ -253,7 +253,7 @@ The symbols in this section are defined in the header file @file{locale.h}.
+ @c calculate_head_size ok
+ @c __munmap ok
+ @c compute_hashval ok
+-@c qsort dup
++@c qsort dup @acucorrupt
+ @c rangecmp ok
+ @c malloc @ascuheap @acsmem
+ @c strdup @ascuheap @acsmem
+diff --git a/manual/search.texi b/manual/search.texi
+index a550858478f7fc83..ffaadc46f51b18f9 100644
+--- a/manual/search.texi
++++ b/manual/search.texi
+@@ -159,7 +159,7 @@ To sort an array using an arbitrary comparison function, use the
+
+ @deftypefun void qsort (void *@var{array}, size_t @var{count}, size_t @var{size}, comparison_fn_t @var{compare})
+ @standards{ISO, stdlib.h}
+-@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}}
++@safety{@prelim{}@mtsafe{}@assafe{}@acunsafe{@acucorrupt{}}}
+ The @code{qsort} function sorts the array @var{array}. The array
+ contains @var{count} elements, each of which is of size @var{size}.
+
+@@ -199,8 +199,9 @@ Functions}):
+ The @code{qsort} function derives its name from the fact that it was
+ originally implemented using the ``quick sort'' algorithm.
+
+-The implementation of @code{qsort} in this library is an in-place sort
+-and uses a constant extra space (allocated on the stack).
++The implementation of @code{qsort} attempts to allocate auxiliary storage
++and use the merge sort algorithm, without violating C standard requirement
++that arguments passed to the comparison function point within the array.
+ @end deftypefun
+
+ @node Search/Sort Example
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index 82d9d909890853b7..a9d91a57c08ac506 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -212,7 +212,6 @@ tests := \
+ tst-qsort \
+ tst-qsort2 \
+ tst-qsort3 \
+- tst-qsort5 \
+ tst-qsort6 \
+ tst-quick_exit \
+ tst-rand48 \
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 3d5405705862ddf0..b95889047ba31193 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -20,6 +20,7 @@
+ Engineering a sort function; Jon Bentley and M. Douglas McIlroy;
+ Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */
+
++#include
+ #include
+ #include
+ #include
+@@ -33,9 +34,13 @@ enum swap_type_t
+ {
+ SWAP_WORDS_64,
+ SWAP_WORDS_32,
++ SWAP_VOID_ARG,
+ SWAP_BYTES
+ };
+
++typedef uint32_t __attribute__ ((__may_alias__)) u32_alias_t;
++typedef uint64_t __attribute__ ((__may_alias__)) u64_alias_t;
++
+ /* If this function returns true, elements can be safely copied using word
+ loads and stores. Otherwise, it might not be safe. BASE (as an integer)
+ must be a multiple of the word alignment. SIZE must be a multiple of
+@@ -52,7 +57,6 @@ is_aligned (const void *base, size_t size, size_t wordsize)
+ static inline void
+ swap_words_64 (void * restrict a, void * restrict b, size_t n)
+ {
+- typedef uint64_t __attribute__ ((__may_alias__)) u64_alias_t;
+ do
+ {
+ n -= 8;
+@@ -65,7 +69,6 @@ swap_words_64 (void * restrict a, void * restrict b, size_t n)
+ static inline void
+ swap_words_32 (void * restrict a, void * restrict b, size_t n)
+ {
+- typedef uint32_t __attribute__ ((__may_alias__)) u32_alias_t;
+ do
+ {
+ n -= 4;
+@@ -89,43 +92,6 @@ do_swap (void * restrict a, void * restrict b, size_t size,
+ __memswap (a, b, size);
+ }
+
+-/* Discontinue quicksort algorithm when partition gets below this size.
+- This particular magic number was chosen to work best on a Sun 4/260. */
+-#define MAX_THRESH 4
+-
+-/* Stack node declarations used to store unfulfilled partition obligations. */
+-typedef struct
+- {
+- char *lo;
+- char *hi;
+- size_t depth;
+- } stack_node;
+-
+-/* The stack needs log (total_elements) entries (we could even subtract
+- log(MAX_THRESH)). Since total_elements has type size_t, we get as
+- upper bound for log (total_elements):
+- bits per byte (CHAR_BIT) * sizeof(size_t). */
+-enum { STACK_SIZE = CHAR_BIT * sizeof (size_t) };
+-
+-static inline stack_node *
+-push (stack_node *top, char *lo, char *hi, size_t depth)
+-{
+- top->lo = lo;
+- top->hi = hi;
+- top->depth = depth;
+- return ++top;
+-}
+-
+-static inline stack_node *
+-pop (stack_node *top, char **lo, char **hi, size_t *depth)
+-{
+- --top;
+- *lo = top->lo;
+- *hi = top->hi;
+- *depth = top->depth;
+- return top;
+-}
+-
+ /* Establish the heap condition at index K, that is, the key at K will
+ not be less than either of its children, at 2 * K + 1 and 2 * K + 2
+ (if they exist). N is the last valid index. */
+@@ -173,21 +139,35 @@ heapify (void *base, size_t size, size_t n, enum swap_type_t swap_type,
+ }
+ }
+
+-/* A non-recursive heapsort, used on introsort implementation as a
+- fallback routine with worst-case performance of O(nlog n) and
+- worst-case space complexity of O(1). It sorts the array starting
+- at BASE and ending at END (inclusive), with each element of SIZE
+- bytes. The SWAP_TYPE is the callback function used to swap
+- elements, and CMP is the function used to compare elements. */
++static enum swap_type_t
++get_swap_type (void *const pbase, size_t size)
++{
++ if ((size & (sizeof (uint32_t) - 1)) == 0
++ && ((uintptr_t) pbase) % __alignof__ (uint32_t) == 0)
++ {
++ if (size == sizeof (uint32_t))
++ return SWAP_WORDS_32;
++ else if (size == sizeof (uint64_t)
++ && ((uintptr_t) pbase) % __alignof__ (uint64_t) == 0)
++ return SWAP_WORDS_64;
++ }
++ return SWAP_BYTES;
++}
++
++
++/* A non-recursive heapsort with worst-case performance of O(nlog n) and
++ worst-case space complexity of O(1). It sorts the array starting at
++ BASE with n + 1 elements of SIZE bytes. The SWAP_TYPE is the callback
++ function used to swap elements, and CMP is the function used to compare
++ elements. */
+ static void
+-heapsort_r (void *base, void *end, size_t size, enum swap_type_t swap_type,
+- __compar_d_fn_t cmp, void *arg)
++heapsort_r (void *base, size_t n, size_t size, __compar_d_fn_t cmp, void *arg)
+ {
+- size_t n = ((uintptr_t) end - (uintptr_t) base) / size;
+ if (n <= 1)
+- /* Handled by insertion sort. */
+ return;
+
++ enum swap_type_t swap_type = get_swap_type (base, size);
++
+ /* Build the binary heap, largest value at the base[0]. */
+ heapify (base, size, n, swap_type, cmp, arg);
+
+@@ -209,226 +189,226 @@ heapsort_r (void *base, void *end, size_t size, enum swap_type_t swap_type,
+ }
+ }
+
+-static inline void
+-insertion_sort_qsort_partitions (void *const pbase, size_t total_elems,
+- size_t size, enum swap_type_t swap_type,
+- __compar_d_fn_t cmp, void *arg)
++/* The maximum size in bytes required by mergesort that will be provided
++ through a buffer allocated in the stack. */
++#define QSORT_STACK_SIZE 1024
++
++/* Elements larger than this value will be sorted through indirect sorting
++ to minimize the need to memory swap calls. */
++#define INDIRECT_SORT_SIZE_THRES 32
++
++struct msort_param
+ {
+- char *base_ptr = (char *) pbase;
+- char *const end_ptr = &base_ptr[size * (total_elems - 1)];
+- char *tmp_ptr = base_ptr;
+-#define min(x, y) ((x) < (y) ? (x) : (y))
+- const size_t max_thresh = MAX_THRESH * size;
+- char *thresh = min(end_ptr, base_ptr + max_thresh);
+- char *run_ptr;
++ size_t s;
++ enum swap_type_t var;
++ __compar_d_fn_t cmp;
++ void *arg;
++ char *t;
++};
+
+- /* Find smallest element in first threshold and place it at the
+- array's beginning. This is the smallest array element,
+- and the operation speeds up insertion sort's inner loop. */
++static void
++msort_with_tmp (const struct msort_param *p, void *b, size_t n)
++{
++ char *b1, *b2;
++ size_t n1, n2;
+
+- for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size)
+- if (cmp (run_ptr, tmp_ptr, arg) < 0)
+- tmp_ptr = run_ptr;
++ if (n <= 1)
++ return;
+
+- if (tmp_ptr != base_ptr)
+- do_swap (tmp_ptr, base_ptr, size, swap_type);
++ n1 = n / 2;
++ n2 = n - n1;
++ b1 = b;
++ b2 = (char *) b + (n1 * p->s);
+
+- /* Insertion sort, running from left-hand-side up to right-hand-side. */
++ msort_with_tmp (p, b1, n1);
++ msort_with_tmp (p, b2, n2);
+
+- run_ptr = base_ptr + size;
+- while ((run_ptr += size) <= end_ptr)
++ char *tmp = p->t;
++ const size_t s = p->s;
++ __compar_d_fn_t cmp = p->cmp;
++ void *arg = p->arg;
++ switch (p->var)
+ {
+- tmp_ptr = run_ptr - size;
+- while (tmp_ptr != base_ptr && cmp (run_ptr, tmp_ptr, arg) < 0)
+- tmp_ptr -= size;
+-
+- tmp_ptr += size;
+- if (tmp_ptr != run_ptr)
+- {
+- char *trav;
+-
+- trav = run_ptr + size;
+- while (--trav >= run_ptr)
+- {
+- char c = *trav;
+- char *hi, *lo;
+-
+- for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo)
+- *hi = *lo;
+- *hi = c;
+- }
+- }
++ case SWAP_WORDS_32:
++ while (n1 > 0 && n2 > 0)
++ {
++ if (cmp (b1, b2, arg) <= 0)
++ {
++ *(u32_alias_t *) tmp = *(u32_alias_t *) b1;
++ b1 += sizeof (u32_alias_t);
++ --n1;
++ }
++ else
++ {
++ *(u32_alias_t *) tmp = *(u32_alias_t *) b2;
++ b2 += sizeof (u32_alias_t);
++ --n2;
++ }
++ tmp += sizeof (u32_alias_t);
++ }
++ break;
++ case SWAP_WORDS_64:
++ while (n1 > 0 && n2 > 0)
++ {
++ if (cmp (b1, b2, arg) <= 0)
++ {
++ *(u64_alias_t *) tmp = *(u64_alias_t *) b1;
++ b1 += sizeof (u64_alias_t);
++ --n1;
++ }
++ else
++ {
++ *(u64_alias_t *) tmp = *(u64_alias_t *) b2;
++ b2 += sizeof (u64_alias_t);
++ --n2;
++ }
++ tmp += sizeof (u64_alias_t);
++ }
++ break;
++ case SWAP_VOID_ARG:
++ while (n1 > 0 && n2 > 0)
++ {
++ if ((*cmp) (*(const void **) b1, *(const void **) b2, arg) <= 0)
++ {
++ *(void **) tmp = *(void **) b1;
++ b1 += sizeof (void *);
++ --n1;
++ }
++ else
++ {
++ *(void **) tmp = *(void **) b2;
++ b2 += sizeof (void *);
++ --n2;
++ }
++ tmp += sizeof (void *);
++ }
++ break;
++ default:
++ while (n1 > 0 && n2 > 0)
++ {
++ if (cmp (b1, b2, arg) <= 0)
++ {
++ tmp = (char *) __mempcpy (tmp, b1, s);
++ b1 += s;
++ --n1;
++ }
++ else
++ {
++ tmp = (char *) __mempcpy (tmp, b2, s);
++ b2 += s;
++ --n2;
++ }
++ }
++ break;
+ }
+-}
+-
+-/* Order size using quicksort. This implementation incorporates
+- four optimizations discussed in Sedgewick:
+
+- 1. Non-recursive, using an explicit stack of pointer that store the
+- next array partition to sort. To save time, this maximum amount
+- of space required to store an array of SIZE_MAX is allocated on the
+- stack. Assuming a 32-bit (64 bit) integer for size_t, this needs
+- only 32 * sizeof(stack_node) == 256 bytes (for 64 bit: 1024 bytes).
+- Pretty cheap, actually.
+-
+- 2. Chose the pivot element using a median-of-three decision tree.
+- This reduces the probability of selecting a bad pivot value and
+- eliminates certain extraneous comparisons.
++ if (n1 > 0)
++ memcpy (tmp, b1, n1 * s);
++ memcpy (b, p->t, (n - n2) * s);
++}
+
+- 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving
+- insertion sort to order the MAX_THRESH items within each partition.
+- This is a big win, since insertion sort is faster for small, mostly
+- sorted array segments.
++static void
++__attribute_used__
++indirect_msort_with_tmp (const struct msort_param *p, void *b, size_t n,
++ size_t s)
++{
++ /* Indirect sorting. */
++ char *ip = (char *) b;
++ void **tp = (void **) (p->t + n * sizeof (void *));
++ void **t = tp;
++ void *tmp_storage = (void *) (tp + n);
+
+- 4. The larger of the two sub-partitions is always pushed onto the
+- stack first, with the algorithm then concentrating on the
+- smaller partition. This *guarantees* no more than log (total_elems)
+- stack size is needed (actually O(1) in this case)! */
++ while ((void *) t < tmp_storage)
++ {
++ *t++ = ip;
++ ip += s;
++ }
++ msort_with_tmp (p, p->t + n * sizeof (void *), n);
++
++ /* tp[0] .. tp[n - 1] is now sorted, copy around entries of
++ the original array. Knuth vol. 3 (2nd ed.) exercise 5.2-10. */
++ char *kp;
++ size_t i;
++ for (i = 0, ip = (char *) b; i < n; i++, ip += s)
++ if ((kp = tp[i]) != ip)
++ {
++ size_t j = i;
++ char *jp = ip;
++ memcpy (tmp_storage, ip, s);
++
++ do
++ {
++ size_t k = (kp - (char *) b) / s;
++ tp[j] = jp;
++ memcpy (jp, kp, s);
++ j = k;
++ jp = kp;
++ kp = tp[k];
++ }
++ while (kp != ip);
++
++ tp[j] = jp;
++ memcpy (jp, tmp_storage, s);
++ }
++}
+
+ void
+ __qsort_r (void *const pbase, size_t total_elems, size_t size,
+ __compar_d_fn_t cmp, void *arg)
+ {
+- char *base_ptr = (char *) pbase;
+-
+- const size_t max_thresh = MAX_THRESH * size;
+-
+ if (total_elems <= 1)
+- /* Avoid lossage with unsigned arithmetic below. */
+ return;
+
+- enum swap_type_t swap_type;
+- if (is_aligned (pbase, size, 8))
+- swap_type = SWAP_WORDS_64;
+- else if (is_aligned (pbase, size, 4))
+- swap_type = SWAP_WORDS_32;
+- else
+- swap_type = SWAP_BYTES;
++ /* Align to the maximum size used by the swap optimization. */
++ _Alignas (uint64_t) char tmp[QSORT_STACK_SIZE];
++ size_t total_size = total_elems * size;
++ char *buf;
+
+- /* Maximum depth before quicksort switches to heapsort. */
+- size_t depth = 2 * (sizeof (size_t) * CHAR_BIT - 1
+- - __builtin_clzl (total_elems));
++ if (size > INDIRECT_SORT_SIZE_THRES)
++ total_size = 2 * total_elems * sizeof (void *) + size;
+
+- if (total_elems > MAX_THRESH)
++ if (total_size < sizeof buf)
++ buf = tmp;
++ else
+ {
+- char *lo = base_ptr;
+- char *hi = &lo[size * (total_elems - 1)];
+- stack_node stack[STACK_SIZE];
+- stack_node *top = push (stack, NULL, NULL, depth);
+-
+- while (stack < top)
+- {
+- if (depth == 0)
+- {
+- heapsort_r (lo, hi, size, swap_type, cmp, arg);
+- top = pop (top, &lo, &hi, &depth);
+- continue;
+- }
+-
+- char *left_ptr;
+- char *right_ptr;
+-
+- /* Select median value from among LO, MID, and HI. Rearrange
+- LO and HI so the three values are sorted. This lowers the
+- probability of picking a pathological pivot value and
+- skips a comparison for both the LEFT_PTR and RIGHT_PTR in
+- the while loops. */
+-
+- char *mid = lo + size * ((hi - lo) / size >> 1);
+-
+- if ((*cmp) ((void *) mid, (void *) lo, arg) < 0)
+- do_swap (mid, lo, size, swap_type);
+- if ((*cmp) ((void *) hi, (void *) mid, arg) < 0)
+- do_swap (mid, hi, size, swap_type);
+- else
+- goto jump_over;
+- if ((*cmp) ((void *) mid, (void *) lo, arg) < 0)
+- do_swap (mid, lo, size, swap_type);
+- jump_over:;
+-
+- left_ptr = lo + size;
+- right_ptr = hi - size;
+-
+- /* Here's the famous ``collapse the walls'' section of quicksort.
+- Gotta like those tight inner loops! They are the main reason
+- that this algorithm runs much faster than others. */
+- do
+- {
+- while (left_ptr != mid
+- && (*cmp) ((void *) left_ptr, (void *) mid, arg) < 0)
+- left_ptr += size;
+-
+- while (right_ptr != mid
+- && (*cmp) ((void *) mid, (void *) right_ptr, arg) < 0)
+- right_ptr -= size;
+-
+- if (left_ptr < right_ptr)
+- {
+- do_swap (left_ptr, right_ptr, size, swap_type);
+- if (mid == left_ptr)
+- mid = right_ptr;
+- else if (mid == right_ptr)
+- mid = left_ptr;
+- left_ptr += size;
+- right_ptr -= size;
+- }
+- else if (left_ptr == right_ptr)
+- {
+- left_ptr += size;
+- right_ptr -= size;
+- break;
+- }
+- }
+- while (left_ptr <= right_ptr);
+-
+- /* Set up pointers for next iteration. First determine whether
+- left and right partitions are below the threshold size. If so,
+- ignore one or both. Otherwise, push the larger partition's
+- bounds on the stack and continue sorting the smaller one. */
+-
+- if ((size_t) (right_ptr - lo) <= max_thresh)
+- {
+- if ((size_t) (hi - left_ptr) <= max_thresh)
+- /* Ignore both small partitions. */
+- {
+- top = pop (top, &lo, &hi, &depth);
+- --depth;
+- }
+- else
+- {
+- /* Ignore small left partition. */
+- lo = left_ptr;
+- --depth;
+- }
+- }
+- else if ((size_t) (hi - left_ptr) <= max_thresh)
+- /* Ignore small right partition. */
+- {
+- hi = right_ptr;
+- --depth;
+- }
+- else if ((right_ptr - lo) > (hi - left_ptr))
+- {
+- /* Push larger left partition indices. */
+- top = push (top, lo, right_ptr, depth - 1);
+- lo = left_ptr;
+- }
+- else
+- {
+- /* Push larger right partition indices. */
+- top = push (top, left_ptr, hi, depth - 1);
+- hi = right_ptr;
+- }
+- }
++ int save = errno;
++ buf = malloc (total_size);
++ __set_errno (save);
++ if (buf == NULL)
++ {
++ /* Fallback to heapsort in case of memory failure. */
++ heapsort_r (pbase, total_elems - 1, size, cmp, arg);
++ return;
++ }
++ }
++
++ if (size > INDIRECT_SORT_SIZE_THRES)
++ {
++ const struct msort_param msort_param =
++ {
++ .s = sizeof (void *),
++ .cmp = cmp,
++ .arg = arg,
++ .var = SWAP_VOID_ARG,
++ .t = buf,
++ };
++ indirect_msort_with_tmp (&msort_param, pbase, total_elems, size);
++ }
++ else
++ {
++ const struct msort_param msort_param =
++ {
++ .s = size,
++ .cmp = cmp,
++ .arg = arg,
++ .var = get_swap_type (pbase, size),
++ .t = buf,
++ };
++ msort_with_tmp (&msort_param, pbase, total_elems);
+ }
+
+- /* Once the BASE_PTR array is partially sorted by quicksort the rest
+- is completely sorted using insertion sort, since this is efficient
+- for partitions below MAX_THRESH size. BASE_PTR points to the beginning
+- of the array to sort, and END_PTR points at the very last element in
+- the array (*not* one beyond it!). */
+- insertion_sort_qsort_partitions (pbase, total_elems, size, swap_type, cmp,
+- arg);
++ if (buf != tmp)
++ free (buf);
+ }
+ libc_hidden_def (__qsort_r)
+ weak_alias (__qsort_r, qsort_r)
+diff --git a/stdlib/tst-qsort4.c b/stdlib/tst-qsort4.c
+index a7abaa1a37461666..4cf373f22e28fade 100644
+--- a/stdlib/tst-qsort4.c
++++ b/stdlib/tst-qsort4.c
+@@ -30,35 +30,12 @@ cmp (const void *a1, const void *b1, void *closure)
+ return *a - *b;
+ }
+
+-/* Wrapper around heapsort_r that set ups the required variables. */
+-static void
+-heapsort_wrapper (void *const pbase, size_t total_elems, size_t size,
+- __compar_d_fn_t cmp, void *arg)
+-{
+- char *base_ptr = (char *) pbase;
+- char *lo = base_ptr;
+- char *hi = &lo[size * (total_elems - 1)];
+-
+- if (total_elems <= 1)
+- /* Avoid lossage with unsigned arithmetic below. */
+- return;
+-
+- enum swap_type_t swap_type;
+- if (is_aligned (pbase, size, 8))
+- swap_type = SWAP_WORDS_64;
+- else if (is_aligned (pbase, size, 4))
+- swap_type = SWAP_WORDS_32;
+- else
+- swap_type = SWAP_BYTES;
+- heapsort_r (lo, hi, size, swap_type, cmp, arg);
+-}
+-
+ static void
+ check_one_sort (signed char *array, int length)
+ {
+ signed char *copy = xmalloc (length);
+ memcpy (copy, array, length);
+- heapsort_wrapper (copy, length, 1, cmp, NULL);
++ heapsort_r (copy, length - 1, 1, cmp, NULL);
+
+ /* Verify that the result is sorted. */
+ for (int i = 1; i < length; ++i)
+diff --git a/stdlib/tst-qsort5.c b/stdlib/tst-qsort5.c
+deleted file mode 100644
+index d3a88c30f8ffb135..0000000000000000
+--- a/stdlib/tst-qsort5.c
++++ /dev/null
+@@ -1,171 +0,0 @@
+-/* Adversarial test for qsort_r.
+- Copyright (C) 2023 Free Software Foundation, Inc.
+- This file is part of the GNU C Library.
+-
+- The GNU C Library is free software; you can redistribute it and/or
+- modify it under the terms of the GNU Lesser General Public
+- License as published by the Free Software Foundation; either
+- version 2.1 of the License, or (at your option) any later version.
+-
+- The GNU C Library is distributed in the hope that it will be useful,
+- but WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- Lesser General Public License for more details.
+-
+- You should have received a copy of the GNU Lesser General Public
+- License along with the GNU C Library; if not, see
+- . */
+-
+-/* The approach follows Douglas McIlroy, A Killer Adversary for
+- Quicksort. Software—Practice and Experience 29 (1999) 341-344.
+- Downloaded
+- (2023-11-17). */
+-
+-#include
+-#include
+-#include
+-#include
+-#include
+-
+-struct context
+-{
+- /* Called the gas value in the paper. This value is larger than all
+- other values (length minus one will do), so comparison with any
+- decided value has a known result. */
+- int undecided_value;
+-
+- /* If comparing undecided values, one of them as to be assigned a
+- value to ensure consistency with future comparisons. This is the
+- value that will be used. Starts out at zero. */
+- int next_decided;
+-
+- /* Used to trick pivot selection. Deciding the value for the last
+- seen undcided value in a decided/undecided comparison happens
+- to trick the many qsort implementations. */
+- int last_undecided_index;
+-
+- /* This array contains the actually asigned values. The call to
+- qsort_r sorts a different array that contains indices into this
+- array. */
+- int *decided_values;
+-};
+-
+-static int
+-compare_opponent (const void *l1, const void *r1, void *ctx1)
+-{
+- const int *l = l1;
+- const int *r = r1;
+- struct context *ctx = ctx1;
+- int rvalue = ctx->decided_values[*r];
+- int lvalue = ctx->decided_values[*l];
+-
+- if (lvalue == ctx->undecided_value)
+- {
+- if (rvalue == ctx->undecided_value)
+- {
+- /* Both values are undecided. In this case, make a decision
+- for the last-used undecided value. This is tweak is very
+- specific to quicksort. */
+- if (*l == ctx->last_undecided_index)
+- {
+- ctx->decided_values[*l] = ctx->next_decided;
+- ++ctx->next_decided;
+- /* The undecided value or *r is greater. */
+- return -1;
+- }
+- else
+- {
+- ctx->decided_values[*r] = ctx->next_decided;
+- ++ctx->next_decided;
+- /* The undecided value for *l is greater. */
+- return 1;
+- }
+- }
+- else
+- {
+- ctx->last_undecided_index = *l;
+- return 1;
+- }
+- }
+- else
+- {
+- /* *l is a decided value. */
+- if (rvalue == ctx->undecided_value)
+- {
+- ctx->last_undecided_index = *r;
+- /* The undecided value for *r is greater. */
+- return -1;
+- }
+- else
+- return lvalue - rvalue;
+- }
+-}
+-
+-/* Return a pointer to the adversarial permutation of length N. */
+-static int *
+-create_permutation (size_t n)
+-{
+- struct context ctx =
+- {
+- .undecided_value = n - 1, /* Larger than all other values. */
+- .decided_values = xcalloc (n, sizeof (int)),
+- };
+- for (size_t i = 0; i < n; ++i)
+- ctx.decided_values[i] = ctx.undecided_value;
+- int *scratch = xcalloc (n, sizeof (int));
+- for (size_t i = 0; i < n; ++i)
+- scratch[i] = i;
+- qsort_r (scratch, n, sizeof (*scratch), compare_opponent, &ctx);
+- free (scratch);
+- return ctx.decided_values;
+-}
+-
+-/* Callback function for qsort which counts the number of invocations
+- in *CLOSURE. */
+-static int
+-compare_counter (const void *l1, const void *r1, void *closure)
+-{
+- const int *l = l1;
+- const int *r = r1;
+- unsigned long long int *counter = closure;
+- ++*counter;
+- return *l - *r;
+-}
+-
+-/* Count the comparisons required for an adversarial permutation of
+- length N. */
+-static unsigned long long int
+-count_comparisons (size_t n)
+-{
+- int *array = create_permutation (n);
+- unsigned long long int counter = 0;
+- qsort_r (array, n, sizeof (*array), compare_counter, &counter);
+- free (array);
+- return counter;
+-}
+-
+-/* Check the scaling factor for one adversarial permutation of length
+- N, and report some statistics. */
+-static void
+-check_one_n (size_t n)
+-{
+- unsigned long long int count = count_comparisons (n);
+- double factor = count / (n * log (count));
+- printf ("info: length %zu: %llu comparisons ~ %f * n * log (n)\n",
+- n, count, factor);
+- /* This is an arbitrary factor which is true for the current
+- implementation across a wide range of sizes. */
+- TEST_VERIFY (factor <= 4.5);
+-}
+-
+-static int
+-do_test (void)
+-{
+- check_one_n (100);
+- check_one_n (1000);
+- for (int i = 1; i <= 15; ++i)
+- check_one_n (i * 10 * 1000);
+- return 0;
+-}
+-
+-#include
diff --git a/glibc-RHEL-24168-16.patch b/glibc-RHEL-24168-16.patch
new file mode 100644
index 0000000..90d52b3
--- /dev/null
+++ b/glibc-RHEL-24168-16.patch
@@ -0,0 +1,27 @@
+commit 74d2731a5fb2676b64092bc25e7f193db1b17b2b
+Author: Kuan-Wei Chiu
+Date: Tue Jan 16 10:16:56 2024 +0800
+
+ stdlib: Fix heapsort for cases with exactly two elements
+
+ When malloc fails to allocate a buffer and falls back to heapsort, the
+ current heapsort implementation does not perform sorting when there are
+ exactly two elements. Heapsort is now skipped only when there is
+ exactly one element.
+
+ Signed-off-by: Kuan-Wei Chiu
+ Reviewed-by: Adhemerval Zanella
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index b95889047ba31193..7b6c7e1f79974157 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -163,7 +163,7 @@ get_swap_type (void *const pbase, size_t size)
+ static void
+ heapsort_r (void *base, size_t n, size_t size, __compar_d_fn_t cmp, void *arg)
+ {
+- if (n <= 1)
++ if (n == 0)
+ return;
+
+ enum swap_type_t swap_type = get_swap_type (base, size);
diff --git a/glibc-RHEL-24168-17.patch b/glibc-RHEL-24168-17.patch
new file mode 100644
index 0000000..5f8df13
--- /dev/null
+++ b/glibc-RHEL-24168-17.patch
@@ -0,0 +1,27 @@
+commit 1bb28b7b4f01709b841c86850e1bb83b554feafe
+Author: Kuan-Wei Chiu
+Date: Tue Jan 16 10:16:57 2024 +0800
+
+ stdlib: Verify heapsort for two-element cases
+
+ Adjust the testing approach to start from scenarios with only 2
+ elements, as insertion sort no longer handles such cases.
+
+ Signed-off-by: Kuan-Wei Chiu
+ Reviewed-by: Adhemerval Zanella
+
+diff --git a/stdlib/tst-qsort4.c b/stdlib/tst-qsort4.c
+index 4cf373f22e28fade..7909793d9eb3edc7 100644
+--- a/stdlib/tst-qsort4.c
++++ b/stdlib/tst-qsort4.c
+@@ -96,9 +96,7 @@ do_test (void)
+ check_one_sort ((signed char[16]) {15, 3, 4, 2, 1, 0, 8, 7, 6, 5, 14,
+ 13, 12, 11, 10, 9}, 16);
+
+- /* Array lengths 2 and less are not handled by heapsort_r and
+- deferred to insertion sort. */
+- for (int i = 3; i <= 8; ++i)
++ for (int i = 2; i <= 8; ++i)
+ {
+ signed char *buf = xmalloc (i);
+ check_combinations (i, buf, 0);
diff --git a/glibc-RHEL-24168-18.patch b/glibc-RHEL-24168-18.patch
new file mode 100644
index 0000000..8ae9f04
--- /dev/null
+++ b/glibc-RHEL-24168-18.patch
@@ -0,0 +1,32 @@
+commit 31bd548650673e8b5ae1a31f1c596ff8305a5d4c
+Author: Adhemerval Zanella
+Date: Wed Jan 17 08:08:01 2024 -0300
+
+ stdlib: Remove unused is_aligned function from qsort.c
+
+ Checked on x86_64-linux-gnu.
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 7b6c7e1f79974157..8db8a81d182dd1fc 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -41,19 +41,6 @@ enum swap_type_t
+ typedef uint32_t __attribute__ ((__may_alias__)) u32_alias_t;
+ typedef uint64_t __attribute__ ((__may_alias__)) u64_alias_t;
+
+-/* If this function returns true, elements can be safely copied using word
+- loads and stores. Otherwise, it might not be safe. BASE (as an integer)
+- must be a multiple of the word alignment. SIZE must be a multiple of
+- WORDSIZE. Since WORDSIZE must be a multiple of the word alignment, and
+- WORDSIZE is a power of two on all supported platforms, this function for
+- speed merely checks that BASE and SIZE are both multiples of the word
+- size. */
+-static inline bool
+-is_aligned (const void *base, size_t size, size_t wordsize)
+-{
+- return (((uintptr_t) base | size) & (wordsize - 1)) == 0;
+-}
+-
+ static inline void
+ swap_words_64 (void * restrict a, void * restrict b, size_t n)
+ {
diff --git a/glibc-RHEL-24168-19.patch b/glibc-RHEL-24168-19.patch
new file mode 100644
index 0000000..23510fe
--- /dev/null
+++ b/glibc-RHEL-24168-19.patch
@@ -0,0 +1,51 @@
+commit dfa3394a605c8f6f25e4f827789bc89eca1d206c
+Author: Xi Ruoyao
+Date: Tue Jan 23 04:29:18 2024 +0800
+
+ qsort: Fix a typo causing unnecessary malloc/free (BZ 31276)
+
+ In qsort_r we allocate a buffer sized QSORT_STACK_SIZE (1024) on stack
+ and we intend to use it if all elements can fit into it. But there is a
+ typo:
+
+ if (total_size < sizeof buf)
+ buf = tmp;
+ else
+ /* allocate a buffer on heap and use it ... */
+
+ Here "buf" is a pointer, thus sizeof buf is just 4 or 8, instead of
+ 1024. There is also a minor issue that we should use "<=" instead of
+ "<".
+
+ This bug is detected debugging some strange heap corruption running the
+ Ruby-3.3.0 test suite (on an experimental Linux From Scratch build using
+ Binutils-2.41.90 and Glibc trunk, and also Fedora Rawhide [1]). It
+ seems Ruby is doing some wild "optimization" by jumping into somewhere
+ in qsort_r instead of calling it normally, resulting in a double free of
+ buf if we allocate it on heap. The issue can be reproduced
+ deterministically with:
+
+ LD_PRELOAD=/usr/lib/libc_malloc_debug.so MALLOC_CHECK_=3 \
+ LD_LIBRARY_PATH=. ./ruby test/runner.rb test/ruby/test_enum.rb
+
+ in Ruby-3.3.0 tree after building it. This change would hide the issue
+ for Ruby, but Ruby is likely still buggy (if using this "optimization"
+ sorting larger arrays).
+
+ [1]:https://kojipkgs.fedoraproject.org/work/tasks/9729/111889729/build.log
+
+ Signed-off-by: Xi Ruoyao
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 8db8a81d182dd1fc..2cdd5c1fe790f55c 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -354,7 +354,7 @@ __qsort_r (void *const pbase, size_t total_elems, size_t size,
+ if (size > INDIRECT_SORT_SIZE_THRES)
+ total_size = 2 * total_elems * sizeof (void *) + size;
+
+- if (total_size < sizeof buf)
++ if (total_size <= sizeof tmp)
+ buf = tmp;
+ else
+ {
diff --git a/glibc-RHEL-24168-2.patch b/glibc-RHEL-24168-2.patch
new file mode 100644
index 0000000..aebb3f7
--- /dev/null
+++ b/glibc-RHEL-24168-2.patch
@@ -0,0 +1,344 @@
+commit d275970ab56f8ba6a3ca598aba75db4daabe5924
+Author: Adhemerval Zanella
+Date: Fri Apr 8 09:57:57 2022 -0300
+
+ stdlib: Reflow and sort most variable assignments
+
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index fe43bec0f9d581d5..03f8478c64408ed3 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -22,49 +22,145 @@ subdir := stdlib
+
+ include ../Makeconfig
+
+-headers := stdlib.h bits/stdlib.h bits/stdlib-ldbl.h bits/stdlib-float.h \
+- monetary.h bits/monetary-ldbl.h \
+- inttypes.h stdint.h bits/wordsize.h bits/timesize.h \
+- errno.h sys/errno.h bits/errno.h bits/types/error_t.h \
+- ucontext.h sys/ucontext.h bits/indirect-return.h \
+- alloca.h fmtmsg.h \
+- bits/stdlib-bsearch.h sys/random.h bits/stdint-intn.h \
+- bits/stdint-uintn.h bits/time64.h \
+-
+-routines := \
+- atof atoi atol atoll \
+- abort \
+- bsearch qsort msort \
+- getenv putenv setenv secure-getenv \
+- exit on_exit atexit cxa_atexit cxa_finalize old_atexit \
+- quick_exit at_quick_exit cxa_at_quick_exit cxa_thread_atexit_impl \
+- abs labs llabs \
+- div ldiv lldiv \
+- mblen mbstowcs mbtowc wcstombs wctomb \
+- random random_r rand rand_r \
+- drand48 erand48 lrand48 nrand48 mrand48 jrand48 \
+- srand48 seed48 lcong48 \
+- drand48_r erand48_r lrand48_r nrand48_r mrand48_r jrand48_r \
+- srand48_r seed48_r lcong48_r \
+- drand48-iter getrandom getentropy \
+- strfromf strfromd strfroml \
+- strtol strtoul strtoll strtoull \
+- strtol_l strtoul_l strtoll_l strtoull_l \
+- strtof strtod strtold \
+- strtof_l strtod_l strtold_l \
+- strtof_nan strtod_nan strtold_nan \
+- system canonicalize \
+- a64l l64a \
+- rpmatch strfmon strfmon_l getsubopt xpg_basename fmtmsg \
+- getcontext setcontext makecontext swapcontext
+-aux = grouping groupingwc tens_in_limb
++headers := \
++ alloca.h \
++ bits/errno.h \
++ bits/indirect-return.h \
++ bits/monetary-ldbl.h \
++ bits/stdint-intn.h \
++ bits/stdint-uintn.h \
++ bits/stdlib-bsearch.h \
++ bits/stdlib-float.h \
++ bits/stdlib.h \
++ bits/stdlib-ldbl.h \
++ bits/time64.h \
++ bits/timesize.h \
++ bits/types/error_t.h \
++ bits/wordsize.h \
++ errno.h \
++ fmtmsg.h \
++ inttypes.h \
++ monetary.h \
++ stdint.h \
++ stdlib.h \
++ sys/errno.h \
++ sys/random.h \
++ sys/ucontext.h \
++ ucontext.h \
++ # headers
++
++routines := \
++ a64l \
++ abort \
++ abs \
++ at_quick_exit \
++ atof \
++ atoi \
++ atol\
++ atoll \
++ bsearch \
++ canonicalize \
++ cxa_at_quick_exit \
++ cxa_atexit \
++ cxa_finalize \
++ cxa_thread_atexit_impl \
++ div \
++ drand48 \
++ drand48-iter \
++ drand48_r \
++ erand48 \
++ erand48_r \
++ exit \
++ fmtmsg \
++ getcontext \
++ getentropy \
++ getenv \
++ getrandom \
++ getsubopt \
++ jrand48 \
++ jrand48_r \
++ l64a \
++ labs \
++ lcong48 \
++ lcong48_r \
++ ldiv \
++ llabs \
++ lldiv \
++ lrand48 \
++ lrand48_r \
++ makecontext \
++ mblen \
++ mbstowcs \
++ mbtowc \
++ mrand48 \
++ mrand48_r \
++ msort \
++ nrand48 \
++ nrand48_r \
++ old_atexit \
++ on_exit atexit \
++ putenv \
++ qsort \
++ quick_exit \
++ rand \
++ rand_r \
++ random \
++ random_r \
++ rpmatch \
++ secure-getenv \
++ seed48 \
++ seed48_r \
++ setcontext \
++ setenv \
++ srand48 \
++ srand48_r \
++ strfmon \
++ strfmon_l \
++ strfromd \
++ strfromf \
++ strfroml \
++ strtod \
++ strtod_l \
++ strtod_nan \
++ strtof \
++ strtof_l \
++ strtof_nan \
++ strtol \
++ strtol_l \
++ strtold \
++ strtold_l \
++ strtold_nan \
++ strtoll \
++ strtoll_l \
++ strtoul \
++ strtoul_l \
++ strtoull \
++ strtoull_l \
++ swapcontext \
++ system \
++ wcstombs \
++ wctomb \
++ xpg_basename \
++ # routines
++
++aux = \
++ grouping \
++ groupingwc \
++ tens_in_limb \
++ # aux
+
+ # These routines will be omitted from the libc shared object.
+ # Instead the static object files will be included in a special archive
+ # linked against when the shared library will be used.
+-static-only-routines = atexit at_quick_exit
++static-only-routines = \
++ atexit \
++ at_quick_exit \
++ # static-only-routines
++
++test-srcs := \
++ tst-fmtmsg \
++ #test-srcs
+
+-test-srcs := tst-fmtmsg
+ tests := \
+ bug-fmtmsg1 \
+ bug-getcontext \
+@@ -155,15 +251,29 @@ tests := \
+ tst-width \
+ tst-width-stdint \
+ tst-xpg-basename \
+-# tests
++ # tests
++
++tests-internal := \
++ tst-strtod1i \
++ tst-strtod3 \
++ tst-strtod4 \
++ tst-strtod5i \
++ tst-tls-atexit \
++ tst-tls-atexit-nodelete \
++ # tests-internal
++
++tests-static := \
++ tst-secure-getenv \
++ # tests-static
+
+-tests-internal := tst-strtod1i tst-strtod3 tst-strtod4 tst-strtod5i \
+- tst-tls-atexit tst-tls-atexit-nodelete
+-tests-static := tst-secure-getenv
+-tests-container := tst-system
++tests-container := \
++ tst-system \
++ #tests-container
+
+ ifeq ($(build-hardcoded-path-in-tests),yes)
+-tests += tst-empty-env
++tests += \
++ tst-empty-env \
++ # tests
+ endif
+
+ LDLIBS-test-atexit-race = $(shared-thread-library)
+@@ -188,30 +298,76 @@ CFLAGS-tst-thread-quick_exit.o = -std=c++11
+ LDLIBS-tst-thread-quick_exit = -lstdc++
+ $(objpfx)tst-thread-quick_exit: $(shared-thread-library)
+ else
+-tests-unsupported += tst-quick_exit tst-thread-quick_exit
++tests-unsupported += \
++ tst-quick_exit \
++ tst-thread-quick_exit \
++ # tests-unsupported
+ endif
+
+-modules-names = tst-tls-atexit-lib test-dlclose-exit-race-helper
++modules-names = \
++ test-dlclose-exit-race-helper \
++ tst-tls-atexit-lib \
++ # modules-names
+ extra-test-objs += $(addsuffix .os, $(modules-names))
+
+ ifeq ($(build-shared),yes)
+-tests += tst-putenv
++tests += \
++ tst-putenv \
++ # tests
+ endif
+
+ # Several mpn functions from GNU MP are used by the strtod function.
+-mpn-routines := inlines add_n addmul_1 cmp divmod_1 divrem udiv_qrnnd \
+- lshift rshift mod_1 mul mul_1 mul_n sub_n submul_1
+-mpn-headers = longlong.h gmp.h gmp-impl.h gmp-mparam.h asm-syntax.h
+-
+-routines := $(strip $(routines) $(mpn-routines)) \
+- dbl2mpn ldbl2mpn \
+- mpn2flt mpn2dbl mpn2ldbl
+-aux += fpioconst mp_clz_tab
+-
+-tests-extras += tst-putenvmod
+-extra-test-objs += tst-putenvmod.os
+-
+-generated += isomac isomac.out tst-putenvmod.so
++mpn-routines := \
++ add_n \
++ addmul_1 \
++ cmp \
++ divmod_1 \
++ divrem \
++ inlines \
++ lshift \
++ mod_1 \
++ mul \
++ mul_1 \
++ mul_n \
++ rshift \
++ sub_n \
++ submul_1 \
++ udiv_qrnnd \
++ # mpn-routines
++mpn-headers = \
++ asm-syntax.h \
++ gmp-impl.h \
++ gmp-mparam.h \
++ gmp.h \
++ longlong.h \
++ # mpn-headers
++
++routines := \
++ $(strip $(routines) $(mpn-routines)) \
++ dbl2mpn \
++ ldbl2mpn \
++ mpn2dbl \
++ mpn2flt \
++ mpn2ldbl \
++ # routines
++aux += \
++ fpioconst \
++ mp_clz_tab \
++ # aux
++
++tests-extras += \
++ tst-putenvmod \
++ # tests-extras
++
++extra-test-objs += \
++ tst-putenvmod.os \
++ # extra-test-objs
++
++generated += \
++ isomac \
++ isomac.out \
++ tst-putenvmod.so \
++ # generated
+
+ CFLAGS-bsearch.c += $(uses-callbacks)
+ CFLAGS-msort.c += $(uses-callbacks)
+@@ -247,9 +403,17 @@ endif
+ include ../Rules
+
+ ifeq ($(run-built-tests),yes)
+-LOCALES := cs_CZ.UTF-8 de_DE.UTF-8 en_US.ISO-8859-1 tr_TR.UTF-8 \
+- tr_TR.ISO-8859-9 tg_TJ.UTF-8 hr_HR.UTF-8 hi_IN.UTF-8 \
+- el_GR.UTF-8
++LOCALES := \
++ cs_CZ.UTF-8 \
++ de_DE.UTF-8 \
++ el_GR.UTF-8 \
++ en_US.ISO-8859-1 \
++ hi_IN.UTF-8 \
++ hr_HR.UTF-8 \
++ tg_TJ.UTF-8 \
++ tr_TR.ISO-8859-9 \
++ tr_TR.UTF-8 \
++ # LOCALES
+ include ../gen-locales.mk
+
+ $(objpfx)bug-strtod2.out: $(gen-locales)
diff --git a/glibc-RHEL-24168-20.patch b/glibc-RHEL-24168-20.patch
new file mode 100644
index 0000000..f829d9a
--- /dev/null
+++ b/glibc-RHEL-24168-20.patch
@@ -0,0 +1,65 @@
+commit e7b90e6e605cf236d4bd79e4930cd6a46f9932c7
+Author: Paul Eggert
+Date: Thu Feb 1 11:52:46 2024 -0800
+
+ stdlib: fix qsort example in manual
+
+ * manual/search.texi (Comparison Functions, Array Sort Function):
+ Sort an array of long ints, not doubles, to avoid hassles
+ with NaNs.
+
+ Reviewed-by: Siddhesh Poyarekar
+
+diff --git a/manual/search.texi b/manual/search.texi
+index ffaadc46f51b18f9..db577a5332651c36 100644
+--- a/manual/search.texi
++++ b/manual/search.texi
+@@ -35,19 +35,22 @@ second, zero if they are ``equal'', and positive if the first argument
+ is ``greater''.
+
+ Here is an example of a comparison function which works with an array of
+-numbers of type @code{double}:
++numbers of type @code{long int}:
+
+ @smallexample
+ int
+-compare_doubles (const void *a, const void *b)
++compare_long_ints (const void *a, const void *b)
+ @{
+- const double *da = (const double *) a;
+- const double *db = (const double *) b;
++ const long int *la = a;
++ const long int *lb = b;
+
+- return (*da > *db) - (*da < *db);
++ return (*la > *lb) - (*la < *lb);
+ @}
+ @end smallexample
+
++(The code would have to be more complicated for an array of @code{double},
++to handle NaNs correctly.)
++
+ The header file @file{stdlib.h} defines a name for the data type of
+ comparison functions. This type is a GNU extension.
+
+@@ -183,16 +186,16 @@ in the array before making some comparisons. The only way to perform
+ a stable sort with @code{qsort} is to first augment the objects with a
+ monotonic counter of some kind.
+
+-Here is a simple example of sorting an array of doubles in numerical
++Here is a simple example of sorting an array of @code{long int} in numerical
+ order, using the comparison function defined above (@pxref{Comparison
+ Functions}):
+
+ @smallexample
+ @{
+- double *array;
+- int size;
++ long int *array;
++ size_t nmemb;
+ @dots{}
+- qsort (array, size, sizeof (double), compare_doubles);
++ qsort (array, nmemb, sizeof *array, compare_long_ints);
+ @}
+ @end smallexample
+
diff --git a/glibc-RHEL-24168-21.patch b/glibc-RHEL-24168-21.patch
new file mode 100644
index 0000000..c14d359
--- /dev/null
+++ b/glibc-RHEL-24168-21.patch
@@ -0,0 +1,142 @@
+commit 57581acd9559217e859fdac693145ce6399f4d70
+Author: Paul Eggert
+Date: Sat Apr 6 08:44:01 2024 -0700
+
+ Fix bsearch, qsort doc to match POSIX better
+
+ * manual/search.texi (Array Search Function):
+ Correct the statement about lfind’s mean runtime:
+ it is proportional to a number (not that number),
+ and this is true only if random elements are searched for.
+ Relax the constraint on bsearch’s array argument:
+ POSIX says it need not be sorted, only partially sorted.
+ Say that the first arg passed to bsearch’s comparison function
+ is the key, and the second arg is an array element, as
+ POSIX requires. For bsearch and qsort, say that the
+ comparison function should not alter the array, as POSIX
+ requires. For qsort, say that the comparison function
+ must define a total order, as POSIX requires, that
+ it should not depend on element addresses, that
+ the original array index can be used for stable sorts,
+ and that if qsort still works if memory allocation fails.
+ Be more consistent in calling the array elements
+ “elements” rather than “objects”.
+
+ Co-authored-by: Zack Weinberg
+
+diff --git a/manual/search.texi b/manual/search.texi
+index db577a5332651c36..cb08c494092ef77f 100644
+--- a/manual/search.texi
++++ b/manual/search.texi
+@@ -84,8 +84,9 @@ The return value is a pointer to the matching element in the array
+ starting at @var{base} if it is found. If no matching element is
+ available @code{NULL} is returned.
+
+-The mean runtime of this function is @code{*@var{nmemb}}/2. This
+-function should only be used if elements often get added to or deleted from
++The mean runtime of this function is proportional to @code{*@var{nmemb}/2},
++assuming random elements of the array are searched for. This
++function should be used only if elements often get added to or deleted from
+ the array in which case it might not be useful to sort the array before
+ searching.
+ @end deftypefun
+@@ -122,26 +123,34 @@ bytes. If one is sure the element is in the array it is better to use
+ calling @code{lsearch}.
+ @end deftypefun
+
+-To search a sorted array for an element matching the key, use the
+-@code{bsearch} function. The prototype for this function is in
++To search a sorted or partially sorted array for an element matching the key,
++use the @code{bsearch} function. The prototype for this function is in
+ the header file @file{stdlib.h}.
+ @pindex stdlib.h
+
+ @deftypefun {void *} bsearch (const void *@var{key}, const void *@var{array}, size_t @var{count}, size_t @var{size}, comparison_fn_t @var{compare})
+ @standards{ISO, stdlib.h}
+ @safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}}
+-The @code{bsearch} function searches the sorted array @var{array} for an object
++The @code{bsearch} function searches @var{array} for an element
+ that is equivalent to @var{key}. The array contains @var{count} elements,
+ each of which is of size @var{size} bytes.
+
+ The @var{compare} function is used to perform the comparison. This
+-function is called with two pointer arguments and should return an
++function is called with arguments that point to the key and to an
++array element, in that order, and should return an
+ integer less than, equal to, or greater than zero corresponding to
+-whether its first argument is considered less than, equal to, or greater
+-than its second argument. The elements of the @var{array} must already
+-be sorted in ascending order according to this comparison function.
+-
+-The return value is a pointer to the matching array element, or a null
++whether the key is considered less than, equal to, or greater than
++the array element. The function should not alter the array's contents,
++and the same array element should always compare the same way with the key.
++
++Although the array need not be completely sorted, it should be
++partially sorted with respect to @var{key}. That is, the array should
++begin with elements that compare less than @var{key}, followed by
++elements that compare equal to @var{key}, and ending with elements
++that compare greater than @var{key}. Any or all of these element
++sequences can be empty.
++
++The return value is a pointer to a matching array element, or a null
+ pointer if no match is found. If the array contains more than one element
+ that matches, the one that is returned is unspecified.
+
+@@ -171,20 +180,22 @@ array elements. This function is called with two pointer arguments and
+ should return an integer less than, equal to, or greater than zero
+ corresponding to whether its first argument is considered less than,
+ equal to, or greater than its second argument.
++The function must not alter the array's contents, and must define a
++total ordering on the array elements, including any unusual values
++such as floating-point NaN (@pxref{Infinity and NaN}).
++Because the sorting process can move elements,
++the function's return value must not depend on the element addresses
++or the relative positions of elements within the array,
++as these are meaningless while @code{qsort} is running.
+
+ @cindex stable sorting
+-@strong{Warning:} If two objects compare as equal, their order after
++@strong{Warning:} If two elements compare equal, their order after
+ sorting is unpredictable. That is to say, the sorting is not stable.
+ This can make a difference when the comparison considers only part of
+-the elements. Two elements with the same sort key may differ in other
+-respects.
+-
+-Although the object addresses passed to the comparison function lie
+-within the array, they need not correspond with the original locations
+-of those objects because the sorting algorithm may swap around objects
+-in the array before making some comparisons. The only way to perform
+-a stable sort with @code{qsort} is to first augment the objects with a
+-monotonic counter of some kind.
++the elements and two elements that compare equal may differ in other
++respects. To ensure a stable sort in this situation, you can augment
++each element with an appropriate tie-breaking value, such as its
++original array index.
+
+ Here is a simple example of sorting an array of @code{long int} in numerical
+ order, using the comparison function defined above (@pxref{Comparison
+@@ -202,18 +213,19 @@ Functions}):
+ The @code{qsort} function derives its name from the fact that it was
+ originally implemented using the ``quick sort'' algorithm.
+
+-The implementation of @code{qsort} attempts to allocate auxiliary storage
++The implementation of @code{qsort} attempts to allocate auxiliary memory
+ and use the merge sort algorithm, without violating C standard requirement
+ that arguments passed to the comparison function point within the array.
++If the memory allocation fails, @code{qsort} resorts to a slower algorithm.
+ @end deftypefun
+
+ @node Search/Sort Example
+ @section Searching and Sorting Example
+
+ Here is an example showing the use of @code{qsort} and @code{bsearch}
+-with an array of structures. The objects in the array are sorted
++with an array of structures. The elements of the array are sorted
+ by comparing their @code{name} fields with the @code{strcmp} function.
+-Then, we can look up individual objects based on their names.
++Then, we can look up individual elements based on their names.
+
+ @comment This example is dedicated to the memory of Jim Henson. RIP.
+ @smallexample
diff --git a/glibc-RHEL-24168-22.patch b/glibc-RHEL-24168-22.patch
new file mode 100644
index 0000000..d9aa1e1
--- /dev/null
+++ b/glibc-RHEL-24168-22.patch
@@ -0,0 +1,26 @@
+commit 7eed691cc2b6c5dbb6066ee1251606a744c7f05c
+Author: Arjun Shankar
+Date: Wed Jul 2 17:11:32 2025 +0200
+
+ stdlib/Makefile: Remove deleted test's libm dependency
+
+ tst-qsort5 was deleted in 709fbd3ec3595f2d1076b4fec09a739327459288.
+ Therefore remove its redundant libm dependency.
+
+ Reviewed-by: Florian Weimer
+
+Conflicts:
+ stdlib/Makefile: Context line mismatch due to missing tests.
+
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index a9d91a57c08ac506..e517e306b868c432 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -484,7 +484,6 @@ $(objpfx)tst-setcontext3.out: tst-setcontext3.sh $(objpfx)tst-setcontext3
+ $(common-objpfx)stdlib/; \
+ $(evaluate-test)
+
+-$(objpfx)tst-qsort5: $(libm)
+ $(objpfx)tst-getenv-signal: $(shared-thread-library)
+ $(objpfx)tst-getenv-thread: $(shared-thread-library)
+ $(objpfx)tst-getenv-unsetenv: $(shared-thread-library)
diff --git a/glibc-RHEL-24168-3.patch b/glibc-RHEL-24168-3.patch
new file mode 100644
index 0000000..cf80613
--- /dev/null
+++ b/glibc-RHEL-24168-3.patch
@@ -0,0 +1,286 @@
+commit fccf38c51746e0817c2409bb361398f9465e0760
+Author: Adhemerval Zanella
+Date: Tue Oct 3 09:22:45 2023 -0300
+
+ string: Add internal memswap implementation
+
+ The prototype is:
+
+ void __memswap (void *restrict p1, void *restrict p2, size_t n)
+
+ The function swaps the content of two memory blocks P1 and P2 of
+ len N. Memory overlap is NOT handled.
+
+ It will be used on qsort optimization.
+
+ Checked on x86_64-linux-gnu and aarch64-linux-gnu.
+ Reviewed-by: Noah Goldstein
+
+diff --git a/string/Makefile b/string/Makefile
+index 3e4331113f08424c..3ebf7597aad75bfe 100644
+--- a/string/Makefile
++++ b/string/Makefile
+@@ -66,6 +66,18 @@ tests := tester inl-tester noinl-tester testcopy test-ffs \
+ test-sig_np tst-strerror-fail \
+ test-strdup test-strndup
+
++tests-static-internal := \
++ test-memswap \
++# tests-static-internal
++
++tests-internal := \
++ $(tests-static-internal) \
++ # tests-internal
++
++tests-static := \
++ $(tests-static-internal) \
++ # tests-static
++
+ # Both tests require the .mo translation files generated by msgfmt.
+ tests-translation := tst-strsignal \
+ tst-strerror
+diff --git a/string/test-memswap.c b/string/test-memswap.c
+new file mode 100644
+index 0000000000000000..162beb91e3e96c23
+--- /dev/null
++++ b/string/test-memswap.c
+@@ -0,0 +1,192 @@
++/* Test and measure memcpy functions.
++ Copyright (C) 2023 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++#include
++#include
++#include
++
++#define TEST_MAIN
++#define BUF1PAGES 3
++#include "test-string.h"
++
++static unsigned char *ref1;
++static unsigned char *ref2;
++
++static void
++do_one_test (unsigned char *p1, unsigned char *ref1, unsigned char *p2,
++ unsigned char *ref2, size_t len)
++{
++ __memswap (p1, p2, len);
++
++ TEST_COMPARE_BLOB (p1, len, ref2, len);
++ TEST_COMPARE_BLOB (p2, len, ref1, len);
++}
++
++static inline void
++do_test (size_t align1, size_t align2, size_t len)
++{
++ align1 &= page_size;
++ if (align1 + len >= page_size)
++ return;
++
++ align2 &= page_size;
++ if (align2 + len >= page_size)
++ return;
++
++ unsigned char *p1 = buf1 + align1;
++ unsigned char *p2 = buf2 + align2;
++ for (size_t repeats = 0; repeats < 2; ++repeats)
++ {
++ size_t i, j;
++ for (i = 0, j = 1; i < len; i++, j += 23)
++ {
++ ref1[i] = p1[i] = j;
++ ref2[i] = p2[i] = UCHAR_MAX - j;
++ }
++
++ do_one_test (p1, ref1, p2, ref2, len);
++ }
++}
++
++static void
++do_random_tests (void)
++{
++ for (size_t n = 0; n < ITERATIONS; n++)
++ {
++ size_t len, size, size1, size2, align1, align2;
++
++ if (n == 0)
++ {
++ len = getpagesize ();
++ size = len + 512;
++ size1 = size;
++ size2 = size;
++ align1 = 512;
++ align2 = 512;
++ }
++ else
++ {
++ if ((random () & 255) == 0)
++ size = 65536;
++ else
++ size = 768;
++ if (size > page_size)
++ size = page_size;
++ size1 = size;
++ size2 = size;
++ size_t i = random ();
++ if (i & 3)
++ size -= 256;
++ if (i & 1)
++ size1 -= 256;
++ if (i & 2)
++ size2 -= 256;
++ if (i & 4)
++ {
++ len = random () % size;
++ align1 = size1 - len - (random () & 31);
++ align2 = size2 - len - (random () & 31);
++ if (align1 > size1)
++ align1 = 0;
++ if (align2 > size2)
++ align2 = 0;
++ }
++ else
++ {
++ align1 = random () & 63;
++ align2 = random () & 63;
++ len = random () % size;
++ if (align1 + len > size1)
++ align1 = size1 - len;
++ if (align2 + len > size2)
++ align2 = size2 - len;
++ }
++ }
++ unsigned char *p1 = buf1 + page_size - size1;
++ unsigned char *p2 = buf2 + page_size - size2;
++ size_t j = align1 + len + 256;
++ if (j > size1)
++ j = size1;
++ for (size_t i = 0; i < j; ++i)
++ ref1[i] = p1[i] = random () & 255;
++
++ j = align2 + len + 256;
++ if (j > size2)
++ j = size2;
++
++ for (size_t i = 0; i < j; ++i)
++ ref2[i] = p2[i] = random () & 255;
++
++ do_one_test (p1 + align1, ref1 + align1, p2 + align2, ref2 + align2, len);
++ }
++}
++
++static int
++test_main (void)
++{
++ test_init ();
++ /* Use the start of buf1 for reference buffers. */
++ ref1 = buf1;
++ ref2 = buf1 + page_size;
++ buf1 = ref2 + page_size;
++
++ printf ("%23s", "");
++ printf ("\t__memswap\n");
++
++ for (size_t i = 0; i < 18; ++i)
++ {
++ do_test (0, 0, 1 << i);
++ do_test (i, 0, 1 << i);
++ do_test (0, i, 1 << i);
++ do_test (i, i, 1 << i);
++ }
++
++ for (size_t i = 0; i < 32; ++i)
++ {
++ do_test (0, 0, i);
++ do_test (i, 0, i);
++ do_test (0, i, i);
++ do_test (i, i, i);
++ }
++
++ for (size_t i = 3; i < 32; ++i)
++ {
++ if ((i & (i - 1)) == 0)
++ continue;
++ do_test (0, 0, 16 * i);
++ do_test (i, 0, 16 * i);
++ do_test (0, i, 16 * i);
++ do_test (i, i, 16 * i);
++ }
++
++ for (size_t i = 19; i <= 25; ++i)
++ {
++ do_test (255, 0, 1 << i);
++ do_test (0, 4000, 1 << i);
++ do_test (0, 255, i);
++ do_test (0, 4000, i);
++ }
++
++ do_test (0, 0, getpagesize ());
++
++ do_random_tests ();
++
++ return 0;
++}
++
++#include
+diff --git a/sysdeps/generic/memswap.h b/sysdeps/generic/memswap.h
+new file mode 100644
+index 0000000000000000..f09dae1ebbc2ec0f
+--- /dev/null
++++ b/sysdeps/generic/memswap.h
+@@ -0,0 +1,41 @@
++/* Swap the content of two memory blocks, overlap is NOT handled.
++ Copyright (C) 2023 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++#include
++
++static inline void
++__memswap (void *__restrict p1, void *__restrict p2, size_t n)
++{
++ /* Use multiple small memcpys with constant size to enable inlining on most
++ targets. */
++ enum { SWAP_GENERIC_SIZE = 32 };
++ unsigned char tmp[SWAP_GENERIC_SIZE];
++ while (n > SWAP_GENERIC_SIZE)
++ {
++ memcpy (tmp, p1, SWAP_GENERIC_SIZE);
++ p1 = __mempcpy (p1, p2, SWAP_GENERIC_SIZE);
++ p2 = __mempcpy (p2, tmp, SWAP_GENERIC_SIZE);
++ n -= SWAP_GENERIC_SIZE;
++ }
++ while (n > 0)
++ {
++ unsigned char t = ((unsigned char *)p1)[--n];
++ ((unsigned char *)p1)[n] = ((unsigned char *)p2)[n];
++ ((unsigned char *)p2)[n] = t;
++ }
++}
diff --git a/glibc-RHEL-24168-4.patch b/glibc-RHEL-24168-4.patch
new file mode 100644
index 0000000..59ea6e5
--- /dev/null
+++ b/glibc-RHEL-24168-4.patch
@@ -0,0 +1,157 @@
+commit 21d30c774c7f9f5878f0bf9438736c702b0a58a3
+Author: Adhemerval Zanella
+Date: Tue Oct 3 09:22:46 2023 -0300
+
+ stdlib: Optimization qsort{_r} swap implementation
+
+ The optimization takes in consideration both the most common elements
+ are either 32 or 64 bit in size and inputs are aligned to the word
+ boundary. This is similar to what msort does.
+
+ For large buffer the swap operation uses memcpy/mempcpy with a
+ small fixed size buffer (so compiler might inline the operations).
+
+ Checked on x86_64-linux-gnu.
+ Reviewed-by: Noah Goldstein
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 23f2d283147073ac..59b220ba1c375ca3 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -22,22 +22,73 @@
+
+ #include
+ #include
++#include
+ #include
+ #include
++#include
+
+-/* Byte-wise swap two items of size SIZE. */
+-#define SWAP(a, b, size) \
+- do \
+- { \
+- size_t __size = (size); \
+- char *__a = (a), *__b = (b); \
+- do \
+- { \
+- char __tmp = *__a; \
+- *__a++ = *__b; \
+- *__b++ = __tmp; \
+- } while (--__size > 0); \
+- } while (0)
++/* Swap SIZE bytes between addresses A and B. These helpers are provided
++ along the generic one as an optimization. */
++
++enum swap_type_t
++ {
++ SWAP_WORDS_64,
++ SWAP_WORDS_32,
++ SWAP_BYTES
++ };
++
++/* If this function returns true, elements can be safely copied using word
++ loads and stores. Otherwise, it might not be safe. BASE (as an integer)
++ must be a multiple of the word alignment. SIZE must be a multiple of
++ WORDSIZE. Since WORDSIZE must be a multiple of the word alignment, and
++ WORDSIZE is a power of two on all supported platforms, this function for
++ speed merely checks that BASE and SIZE are both multiples of the word
++ size. */
++static inline bool
++is_aligned (const void *base, size_t size, size_t wordsize)
++{
++ return (((uintptr_t) base | size) & (wordsize - 1)) == 0;
++}
++
++static inline void
++swap_words_64 (void * restrict a, void * restrict b, size_t n)
++{
++ typedef uint64_t __attribute__ ((__may_alias__)) u64_alias_t;
++ do
++ {
++ n -= 8;
++ u64_alias_t t = *(u64_alias_t *)(a + n);
++ *(u64_alias_t *)(a + n) = *(u64_alias_t *)(b + n);
++ *(u64_alias_t *)(b + n) = t;
++ } while (n);
++}
++
++static inline void
++swap_words_32 (void * restrict a, void * restrict b, size_t n)
++{
++ typedef uint32_t __attribute__ ((__may_alias__)) u32_alias_t;
++ do
++ {
++ n -= 4;
++ u32_alias_t t = *(u32_alias_t *)(a + n);
++ *(u32_alias_t *)(a + n) = *(u32_alias_t *)(b + n);
++ *(u32_alias_t *)(b + n) = t;
++ } while (n);
++}
++
++/* Replace the indirect call with a serie of if statements. It should help
++ the branch predictor. */
++static void
++do_swap (void * restrict a, void * restrict b, size_t size,
++ enum swap_type_t swap_type)
++{
++ if (swap_type == SWAP_WORDS_64)
++ swap_words_64 (a, b, size);
++ else if (swap_type == SWAP_WORDS_32)
++ swap_words_32 (a, b, size);
++ else
++ __memswap (a, b, size);
++}
+
+ /* Discontinue quicksort algorithm when partition gets below this size.
+ This particular magic number was chosen to work best on a Sun 4/260. */
+@@ -97,6 +148,14 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ /* Avoid lossage with unsigned arithmetic below. */
+ return;
+
++ enum swap_type_t swap_type;
++ if (is_aligned (pbase, size, 8))
++ swap_type = SWAP_WORDS_64;
++ else if (is_aligned (pbase, size, 4))
++ swap_type = SWAP_WORDS_32;
++ else
++ swap_type = SWAP_BYTES;
++
+ if (total_elems > MAX_THRESH)
+ {
+ char *lo = base_ptr;
+@@ -120,13 +179,13 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ char *mid = lo + size * ((hi - lo) / size >> 1);
+
+ if ((*cmp) ((void *) mid, (void *) lo, arg) < 0)
+- SWAP (mid, lo, size);
++ do_swap (mid, lo, size, swap_type);
+ if ((*cmp) ((void *) hi, (void *) mid, arg) < 0)
+- SWAP (mid, hi, size);
++ do_swap (mid, hi, size, swap_type);
+ else
+ goto jump_over;
+ if ((*cmp) ((void *) mid, (void *) lo, arg) < 0)
+- SWAP (mid, lo, size);
++ do_swap (mid, lo, size, swap_type);
+ jump_over:;
+
+ left_ptr = lo + size;
+@@ -145,7 +204,7 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+
+ if (left_ptr < right_ptr)
+ {
+- SWAP (left_ptr, right_ptr, size);
++ do_swap (left_ptr, right_ptr, size, swap_type);
+ if (mid == left_ptr)
+ mid = right_ptr;
+ else if (mid == right_ptr)
+@@ -217,7 +276,7 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ tmp_ptr = run_ptr;
+
+ if (tmp_ptr != base_ptr)
+- SWAP (tmp_ptr, base_ptr, size);
++ do_swap (tmp_ptr, base_ptr, size, swap_type);
+
+ /* Insertion sort, running from left-hand-side up to right-hand-side. */
+
diff --git a/glibc-RHEL-24168-5.patch b/glibc-RHEL-24168-5.patch
new file mode 100644
index 0000000..414b261
--- /dev/null
+++ b/glibc-RHEL-24168-5.patch
@@ -0,0 +1,125 @@
+commit a035a9857e11faf16ed021b5e80faf215262afd1
+Author: Adhemerval Zanella
+Date: Tue Oct 3 09:22:47 2023 -0300
+
+ stdlib: Move insertion sort out qsort
+
+ Reviewed-by: Noah Goldstein
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 59b220ba1c375ca3..35020e4c00e5fce3 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -112,6 +112,58 @@ typedef struct
+ #define STACK_NOT_EMPTY (stack < top)
+
+
++static inline void
++insertion_sort_qsort_partitions (void *const pbase, size_t total_elems,
++ size_t size, enum swap_type_t swap_type,
++ __compar_d_fn_t cmp, void *arg)
++{
++ char *base_ptr = (char *) pbase;
++ char *const end_ptr = &base_ptr[size * (total_elems - 1)];
++ char *tmp_ptr = base_ptr;
++#define min(x, y) ((x) < (y) ? (x) : (y))
++ const size_t max_thresh = MAX_THRESH * size;
++ char *thresh = min(end_ptr, base_ptr + max_thresh);
++ char *run_ptr;
++
++ /* Find smallest element in first threshold and place it at the
++ array's beginning. This is the smallest array element,
++ and the operation speeds up insertion sort's inner loop. */
++
++ for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size)
++ if (cmp (run_ptr, tmp_ptr, arg) < 0)
++ tmp_ptr = run_ptr;
++
++ if (tmp_ptr != base_ptr)
++ do_swap (tmp_ptr, base_ptr, size, swap_type);
++
++ /* Insertion sort, running from left-hand-side up to right-hand-side. */
++
++ run_ptr = base_ptr + size;
++ while ((run_ptr += size) <= end_ptr)
++ {
++ tmp_ptr = run_ptr - size;
++ while (cmp (run_ptr, tmp_ptr, arg) < 0)
++ tmp_ptr -= size;
++
++ tmp_ptr += size;
++ if (tmp_ptr != run_ptr)
++ {
++ char *trav;
++
++ trav = run_ptr + size;
++ while (--trav >= run_ptr)
++ {
++ char c = *trav;
++ char *hi, *lo;
++
++ for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo)
++ *hi = *lo;
++ *hi = c;
++ }
++ }
++ }
++}
++
+ /* Order size using quicksort. This implementation incorporates
+ four optimizations discussed in Sedgewick:
+
+@@ -258,51 +310,6 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ for partitions below MAX_THRESH size. BASE_PTR points to the beginning
+ of the array to sort, and END_PTR points at the very last element in
+ the array (*not* one beyond it!). */
+-
+-#define min(x, y) ((x) < (y) ? (x) : (y))
+-
+- {
+- char *const end_ptr = &base_ptr[size * (total_elems - 1)];
+- char *tmp_ptr = base_ptr;
+- char *thresh = min(end_ptr, base_ptr + max_thresh);
+- char *run_ptr;
+-
+- /* Find smallest element in first threshold and place it at the
+- array's beginning. This is the smallest array element,
+- and the operation speeds up insertion sort's inner loop. */
+-
+- for (run_ptr = tmp_ptr + size; run_ptr <= thresh; run_ptr += size)
+- if ((*cmp) ((void *) run_ptr, (void *) tmp_ptr, arg) < 0)
+- tmp_ptr = run_ptr;
+-
+- if (tmp_ptr != base_ptr)
+- do_swap (tmp_ptr, base_ptr, size, swap_type);
+-
+- /* Insertion sort, running from left-hand-side up to right-hand-side. */
+-
+- run_ptr = base_ptr + size;
+- while ((run_ptr += size) <= end_ptr)
+- {
+- tmp_ptr = run_ptr - size;
+- while ((*cmp) ((void *) run_ptr, (void *) tmp_ptr, arg) < 0)
+- tmp_ptr -= size;
+-
+- tmp_ptr += size;
+- if (tmp_ptr != run_ptr)
+- {
+- char *trav;
+-
+- trav = run_ptr + size;
+- while (--trav >= run_ptr)
+- {
+- char c = *trav;
+- char *hi, *lo;
+-
+- for (hi = lo = trav; (lo -= size) >= tmp_ptr; hi = lo)
+- *hi = *lo;
+- *hi = c;
+- }
+- }
+- }
+- }
++ insertion_sort_qsort_partitions (pbase, total_elems, size, swap_type, cmp,
++ arg);
+ }
diff --git a/glibc-RHEL-24168-6.patch b/glibc-RHEL-24168-6.patch
new file mode 100644
index 0000000..ac10f44
--- /dev/null
+++ b/glibc-RHEL-24168-6.patch
@@ -0,0 +1,85 @@
+commit d097f3c79be55d646d86efb7ce876bf84d5ebe4e
+Author: Adhemerval Zanella
+Date: Tue Oct 3 09:22:48 2023 -0300
+
+ stdlib: qsort: Move some macros to inline function
+
+ Reviewed-by: Noah Goldstein
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 35020e4c00e5fce3..821a87420638c5a5 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -101,15 +101,28 @@ typedef struct
+ char *hi;
+ } stack_node;
+
+-/* The next 4 #defines implement a very fast in-line stack abstraction. */
+ /* The stack needs log (total_elements) entries (we could even subtract
+ log(MAX_THRESH)). Since total_elements has type size_t, we get as
+ upper bound for log (total_elements):
+ bits per byte (CHAR_BIT) * sizeof(size_t). */
+-#define STACK_SIZE (CHAR_BIT * sizeof (size_t))
+-#define PUSH(low, high) ((void) ((top->lo = (low)), (top->hi = (high)), ++top))
+-#define POP(low, high) ((void) (--top, (low = top->lo), (high = top->hi)))
+-#define STACK_NOT_EMPTY (stack < top)
++enum { STACK_SIZE = CHAR_BIT * sizeof (size_t) };
++
++static inline stack_node *
++push (stack_node *top, char *lo, char *hi)
++{
++ top->lo = lo;
++ top->hi = hi;
++ return ++top;
++}
++
++static inline stack_node *
++pop (stack_node *top, char **lo, char **hi)
++{
++ --top;
++ *lo = top->lo;
++ *hi = top->hi;
++ return top;
++}
+
+
+ static inline void
+@@ -213,11 +226,9 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ char *lo = base_ptr;
+ char *hi = &lo[size * (total_elems - 1)];
+ stack_node stack[STACK_SIZE];
+- stack_node *top = stack;
+-
+- PUSH (NULL, NULL);
++ stack_node *top = stack + 1;
+
+- while (STACK_NOT_EMPTY)
++ while (stack < top)
+ {
+ char *left_ptr;
+ char *right_ptr;
+@@ -282,7 +293,7 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ {
+ if ((size_t) (hi - left_ptr) <= max_thresh)
+ /* Ignore both small partitions. */
+- POP (lo, hi);
++ top = pop (top, &lo, &hi);
+ else
+ /* Ignore small left partition. */
+ lo = left_ptr;
+@@ -293,13 +304,13 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ else if ((right_ptr - lo) > (hi - left_ptr))
+ {
+ /* Push larger left partition indices. */
+- PUSH (lo, right_ptr);
++ top = push (top, lo, right_ptr);
+ lo = left_ptr;
+ }
+ else
+ {
+ /* Push larger right partition indices. */
+- PUSH (left_ptr, hi);
++ top = push (top, left_ptr, hi);
+ hi = right_ptr;
+ }
+ }
diff --git a/glibc-RHEL-24168-7.patch b/glibc-RHEL-24168-7.patch
new file mode 100644
index 0000000..70136e6
--- /dev/null
+++ b/glibc-RHEL-24168-7.patch
@@ -0,0 +1,176 @@
+commit 274a46c9b25ab733a1fb9fb1497f1beecae30193
+Author: Adhemerval Zanella
+Date: Tue Oct 3 09:22:49 2023 -0300
+
+ stdlib: Implement introsort for qsort (BZ 19305)
+
+ This patch makes the quicksort implementation to acts as introsort, to
+ avoid worse-case performance (and thus making it O(nlog n)). It switch
+ to heapsort when the depth level reaches 2*log2(total elements). The
+ heapsort is a textbook implementation.
+
+ Checked on x86_64-linux-gnu and aarch64-linux-gnu.
+ Reviewed-by: Noah Goldstein
+
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index 821a87420638c5a5..db299eb333cf0302 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -99,6 +99,7 @@ typedef struct
+ {
+ char *lo;
+ char *hi;
++ size_t depth;
+ } stack_node;
+
+ /* The stack needs log (total_elements) entries (we could even subtract
+@@ -108,22 +109,85 @@ typedef struct
+ enum { STACK_SIZE = CHAR_BIT * sizeof (size_t) };
+
+ static inline stack_node *
+-push (stack_node *top, char *lo, char *hi)
++push (stack_node *top, char *lo, char *hi, size_t depth)
+ {
+ top->lo = lo;
+ top->hi = hi;
++ top->depth = depth;
+ return ++top;
+ }
+
+ static inline stack_node *
+-pop (stack_node *top, char **lo, char **hi)
++pop (stack_node *top, char **lo, char **hi, size_t *depth)
+ {
+ --top;
+ *lo = top->lo;
+ *hi = top->hi;
++ *depth = top->depth;
+ return top;
+ }
+
++/* NB: N is inclusive bound for BASE. */
++static inline void
++siftdown (void *base, size_t size, size_t k, size_t n,
++ enum swap_type_t swap_type, __compar_d_fn_t cmp, void *arg)
++{
++ while (k <= n / 2)
++ {
++ size_t j = 2 * k;
++ if (j < n && cmp (base + (j * size), base + ((j + 1) * size), arg) < 0)
++ j++;
++
++ if (cmp (base + (k * size), base + (j * size), arg) >= 0)
++ break;
++
++ do_swap (base + (size * j), base + (k * size), size, swap_type);
++ k = j;
++ }
++}
++
++static inline void
++heapify (void *base, size_t size, size_t n, enum swap_type_t swap_type,
++ __compar_d_fn_t cmp, void *arg)
++{
++ size_t k = n / 2;
++ while (1)
++ {
++ siftdown (base, size, k, n, swap_type, cmp, arg);
++ if (k-- == 0)
++ break;
++ }
++}
++
++/* A non-recursive heapsort, used on introsort implementation as a fallback
++ routine with worst-case performance of O(nlog n) and worst-case space
++ complexity of O(1). It sorts the array starting at BASE and ending at
++ END, with each element of SIZE bytes. The SWAP_TYPE is the callback
++ function used to swap elements, and CMP is the function used to compare
++ elements. */
++static void
++heapsort_r (void *base, void *end, size_t size, enum swap_type_t swap_type,
++ __compar_d_fn_t cmp, void *arg)
++{
++ const size_t count = ((uintptr_t) end - (uintptr_t) base) / size;
++
++ if (count < 2)
++ return;
++
++ size_t n = count - 1;
++
++ /* Build the binary heap, largest value at the base[0]. */
++ heapify (base, size, n, swap_type, cmp, arg);
++
++ /* On each iteration base[0:n] is the binary heap, while base[n:count]
++ is sorted. */
++ while (n > 0)
++ {
++ do_swap (base, base + (n * size), size, swap_type);
++ n--;
++ siftdown (base, size, 0, n, swap_type, cmp, arg);
++ }
++}
+
+ static inline void
+ insertion_sort_qsort_partitions (void *const pbase, size_t total_elems,
+@@ -209,7 +273,7 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+
+ const size_t max_thresh = MAX_THRESH * size;
+
+- if (total_elems == 0)
++ if (total_elems <= 1)
+ /* Avoid lossage with unsigned arithmetic below. */
+ return;
+
+@@ -221,15 +285,26 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ else
+ swap_type = SWAP_BYTES;
+
++ /* Maximum depth before quicksort switches to heapsort. */
++ size_t depth = 2 * (sizeof (size_t) * CHAR_BIT - 1
++ - __builtin_clzl (total_elems));
++
+ if (total_elems > MAX_THRESH)
+ {
+ char *lo = base_ptr;
+ char *hi = &lo[size * (total_elems - 1)];
+ stack_node stack[STACK_SIZE];
+- stack_node *top = stack + 1;
++ stack_node *top = push (stack, NULL, NULL, depth);
+
+ while (stack < top)
+ {
++ if (depth == 0)
++ {
++ heapsort_r (lo, hi, size, swap_type, cmp, arg);
++ top = pop (top, &lo, &hi, &depth);
++ continue;
++ }
++
+ char *left_ptr;
+ char *right_ptr;
+
+@@ -293,7 +368,7 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ {
+ if ((size_t) (hi - left_ptr) <= max_thresh)
+ /* Ignore both small partitions. */
+- top = pop (top, &lo, &hi);
++ top = pop (top, &lo, &hi, &depth);
+ else
+ /* Ignore small left partition. */
+ lo = left_ptr;
+@@ -304,13 +379,13 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ else if ((right_ptr - lo) > (hi - left_ptr))
+ {
+ /* Push larger left partition indices. */
+- top = push (top, lo, right_ptr);
++ top = push (top, lo, right_ptr, depth - 1);
+ lo = left_ptr;
+ }
+ else
+ {
+ /* Push larger right partition indices. */
+- top = push (top, left_ptr, hi);
++ top = push (top, left_ptr, hi, depth - 1);
+ hi = right_ptr;
+ }
+ }
diff --git a/glibc-RHEL-24168-8.patch b/glibc-RHEL-24168-8.patch
new file mode 100644
index 0000000..8158906
--- /dev/null
+++ b/glibc-RHEL-24168-8.patch
@@ -0,0 +1,491 @@
+commit 03bf8357e8291857a435afcc3048e0b697b6cc04
+Author: Adhemerval Zanella
+Date: Tue Oct 3 09:22:50 2023 -0300
+
+ stdlib: Remove use of mergesort on qsort (BZ 21719)
+
+ This patch removes the mergesort optimization on qsort implementation
+ and uses the introsort instead. The mergesort implementation has some
+ issues:
+
+ - It is as-safe only for certain types sizes (if total size is less
+ than 1 KB with large element sizes also forcing memory allocation)
+ which contradicts the function documentation. Although not required
+ by the C standard, it is preferable and doable to have an O(1) space
+ implementation.
+
+ - The malloc for certain element size and element number adds
+ arbitrary latency (might even be worse if malloc is interposed).
+
+ - To avoid trigger swap from memory allocation the implementation
+ relies on system information that might be virtualized (for instance
+ VMs with overcommit memory) which might lead to potentially use of
+ swap even if system advertise more memory than actually has. The
+ check also have the downside of issuing syscalls where none is
+ expected (although only once per execution).
+
+ - The mergesort is suboptimal on an already sorted array (BZ#21719).
+
+ The introsort implementation is already optimized to use constant extra
+ space (due to the limit of total number of elements from maximum VM
+ size) and thus can be used to avoid the malloc usage issues.
+
+ Resulting performance is slower due the usage of qsort, specially in the
+ worst-case scenario (partialy or sorted arrays) and due the fact
+ mergesort uses a slight improved swap operations.
+
+ This change also renders the BZ#21719 fix unrequired (since it is meant
+ to fix the sorted input performance degradation for mergesort). The
+ manual is also updated to indicate the function is now async-cancel
+ safe.
+
+ Checked on x86_64-linux-gnu.
+ Reviewed-by: Noah Goldstein
+
+Conflicts:
+ stdlib/msort.c: Deletion had conflicts due to skipped backports.
+
+diff --git a/include/stdlib.h b/include/stdlib.h
+index 22c9fb65c3074765..fd108df58ddf8b89 100644
+--- a/include/stdlib.h
++++ b/include/stdlib.h
+@@ -107,8 +107,6 @@ extern int __posix_openpt (int __oflag) attribute_hidden;
+ extern int __add_to_environ (const char *name, const char *value,
+ const char *combines, int replace)
+ attribute_hidden;
+-extern void _quicksort (void *const pbase, size_t total_elems,
+- size_t size, __compar_d_fn_t cmp, void *arg);
+
+ extern int __on_exit (void (*__func) (int __status, void *__arg), void *__arg);
+
+diff --git a/manual/argp.texi b/manual/argp.texi
+index 0023441812d4e584..b77ad68285ecb732 100644
+--- a/manual/argp.texi
++++ b/manual/argp.texi
+@@ -735,7 +735,7 @@ for options, bad phase of the moon, etc.
+ @c hol_set_group ok
+ @c hol_find_entry ok
+ @c hol_sort @mtslocale @acucorrupt
+-@c qsort dup @acucorrupt
++@c qsort dup
+ @c hol_entry_qcmp @mtslocale
+ @c hol_entry_cmp @mtslocale
+ @c group_cmp ok
+diff --git a/manual/locale.texi b/manual/locale.texi
+index 720e0ca952a665bd..f6afa5dc44a2a016 100644
+--- a/manual/locale.texi
++++ b/manual/locale.texi
+@@ -253,7 +253,7 @@ The symbols in this section are defined in the header file @file{locale.h}.
+ @c calculate_head_size ok
+ @c __munmap ok
+ @c compute_hashval ok
+-@c qsort dup @acucorrupt
++@c qsort dup
+ @c rangecmp ok
+ @c malloc @ascuheap @acsmem
+ @c strdup @ascuheap @acsmem
+@@ -275,7 +275,6 @@ The symbols in this section are defined in the header file @file{locale.h}.
+ @c realloc @ascuheap @acsmem
+ @c realloc @ascuheap @acsmem
+ @c fclose @ascuheap @asulock @acsmem @acsfd @aculock
+-@c qsort @ascuheap @acsmem
+ @c alias_compare dup
+ @c libc_lock_unlock @aculock
+ @c _nl_explode_name @ascuheap @acsmem
+diff --git a/manual/search.texi b/manual/search.texi
+index 5691bf2f2b2bb861..a550858478f7fc83 100644
+--- a/manual/search.texi
++++ b/manual/search.texi
+@@ -159,7 +159,7 @@ To sort an array using an arbitrary comparison function, use the
+
+ @deftypefun void qsort (void *@var{array}, size_t @var{count}, size_t @var{size}, comparison_fn_t @var{compare})
+ @standards{ISO, stdlib.h}
+-@safety{@prelim{}@mtsafe{}@assafe{}@acunsafe{@acucorrupt{}}}
++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}}
+ The @code{qsort} function sorts the array @var{array}. The array
+ contains @var{count} elements, each of which is of size @var{size}.
+
+@@ -199,9 +199,8 @@ Functions}):
+ The @code{qsort} function derives its name from the fact that it was
+ originally implemented using the ``quick sort'' algorithm.
+
+-The implementation of @code{qsort} in this library might not be an
+-in-place sort and might thereby use an extra amount of memory to store
+-the array.
++The implementation of @code{qsort} in this library is an in-place sort
++and uses a constant extra space (allocated on the stack).
+ @end deftypefun
+
+ @node Search/Sort Example
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index 03f8478c64408ed3..3b89bc2aa0307321 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -94,7 +94,6 @@ routines := \
+ mbtowc \
+ mrand48 \
+ mrand48_r \
+- msort \
+ nrand48 \
+ nrand48_r \
+ old_atexit \
+@@ -370,7 +369,6 @@ generated += \
+ # generated
+
+ CFLAGS-bsearch.c += $(uses-callbacks)
+-CFLAGS-msort.c += $(uses-callbacks)
+ CFLAGS-qsort.c += $(uses-callbacks)
+ CFLAGS-system.c += -fexceptions
+ CFLAGS-system.os = -fomit-frame-pointer
+diff --git a/stdlib/msort.c b/stdlib/msort.c
+deleted file mode 100644
+index 8750cc59db2337cf..0000000000000000
+--- a/stdlib/msort.c
++++ /dev/null
+@@ -1,310 +0,0 @@
+-/* An alternative to qsort, with an identical interface.
+- This file is part of the GNU C Library.
+- Copyright (C) 1992-2021 Free Software Foundation, Inc.
+- Written by Mike Haertel, September 1988.
+-
+- The GNU C Library is free software; you can redistribute it and/or
+- modify it under the terms of the GNU Lesser General Public
+- License as published by the Free Software Foundation; either
+- version 2.1 of the License, or (at your option) any later version.
+-
+- The GNU C Library is distributed in the hope that it will be useful,
+- but WITHOUT ANY WARRANTY; without even the implied warranty of
+- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+- Lesser General Public License for more details.
+-
+- You should have received a copy of the GNU Lesser General Public
+- License along with the GNU C Library; if not, see
+- . */
+-
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-#include
+-
+-struct msort_param
+-{
+- size_t s;
+- size_t var;
+- __compar_d_fn_t cmp;
+- void *arg;
+- char *t;
+-};
+-static void msort_with_tmp (const struct msort_param *p, void *b, size_t n);
+-
+-static void
+-msort_with_tmp (const struct msort_param *p, void *b, size_t n)
+-{
+- char *b1, *b2;
+- size_t n1, n2;
+-
+- if (n <= 1)
+- return;
+-
+- n1 = n / 2;
+- n2 = n - n1;
+- b1 = b;
+- b2 = (char *) b + (n1 * p->s);
+-
+- msort_with_tmp (p, b1, n1);
+- msort_with_tmp (p, b2, n2);
+-
+- char *tmp = p->t;
+- const size_t s = p->s;
+- __compar_d_fn_t cmp = p->cmp;
+- void *arg = p->arg;
+- switch (p->var)
+- {
+- case 0:
+- while (n1 > 0 && n2 > 0)
+- {
+- if ((*cmp) (b1, b2, arg) <= 0)
+- {
+- *(uint32_t *) tmp = *(uint32_t *) b1;
+- b1 += sizeof (uint32_t);
+- --n1;
+- }
+- else
+- {
+- *(uint32_t *) tmp = *(uint32_t *) b2;
+- b2 += sizeof (uint32_t);
+- --n2;
+- }
+- tmp += sizeof (uint32_t);
+- }
+- break;
+- case 1:
+- while (n1 > 0 && n2 > 0)
+- {
+- if ((*cmp) (b1, b2, arg) <= 0)
+- {
+- *(uint64_t *) tmp = *(uint64_t *) b1;
+- b1 += sizeof (uint64_t);
+- --n1;
+- }
+- else
+- {
+- *(uint64_t *) tmp = *(uint64_t *) b2;
+- b2 += sizeof (uint64_t);
+- --n2;
+- }
+- tmp += sizeof (uint64_t);
+- }
+- break;
+- case 2:
+- while (n1 > 0 && n2 > 0)
+- {
+- unsigned long *tmpl = (unsigned long *) tmp;
+- unsigned long *bl;
+-
+- tmp += s;
+- if ((*cmp) (b1, b2, arg) <= 0)
+- {
+- bl = (unsigned long *) b1;
+- b1 += s;
+- --n1;
+- }
+- else
+- {
+- bl = (unsigned long *) b2;
+- b2 += s;
+- --n2;
+- }
+- while (tmpl < (unsigned long *) tmp)
+- *tmpl++ = *bl++;
+- }
+- break;
+- case 3:
+- while (n1 > 0 && n2 > 0)
+- {
+- if ((*cmp) (*(const void **) b1, *(const void **) b2, arg) <= 0)
+- {
+- *(void **) tmp = *(void **) b1;
+- b1 += sizeof (void *);
+- --n1;
+- }
+- else
+- {
+- *(void **) tmp = *(void **) b2;
+- b2 += sizeof (void *);
+- --n2;
+- }
+- tmp += sizeof (void *);
+- }
+- break;
+- default:
+- while (n1 > 0 && n2 > 0)
+- {
+- if ((*cmp) (b1, b2, arg) <= 0)
+- {
+- tmp = (char *) __mempcpy (tmp, b1, s);
+- b1 += s;
+- --n1;
+- }
+- else
+- {
+- tmp = (char *) __mempcpy (tmp, b2, s);
+- b2 += s;
+- --n2;
+- }
+- }
+- break;
+- }
+-
+- if (n1 > 0)
+- memcpy (tmp, b1, n1 * s);
+- memcpy (b, p->t, (n - n2) * s);
+-}
+-
+-
+-void
+-__qsort_r (void *b, size_t n, size_t s, __compar_d_fn_t cmp, void *arg)
+-{
+- size_t size = n * s;
+- char *tmp = NULL;
+- struct msort_param p;
+-
+- /* For large object sizes use indirect sorting. */
+- if (s > 32)
+- size = 2 * n * sizeof (void *) + s;
+-
+- if (size < 1024)
+- /* The temporary array is small, so put it on the stack. */
+- p.t = __alloca (size);
+- else
+- {
+- /* We should avoid allocating too much memory since this might
+- have to be backed up by swap space. */
+- static long int phys_pages;
+- static int pagesize;
+-
+- if (pagesize == 0)
+- {
+- phys_pages = __sysconf (_SC_PHYS_PAGES);
+-
+- if (phys_pages == -1)
+- /* Error while determining the memory size. So let's
+- assume there is enough memory. Otherwise the
+- implementer should provide a complete implementation of
+- the `sysconf' function. */
+- phys_pages = (long int) (~0ul >> 1);
+-
+- /* The following determines that we will never use more than
+- a quarter of the physical memory. */
+- phys_pages /= 4;
+-
+- /* Make sure phys_pages is written to memory. */
+- atomic_write_barrier ();
+-
+- pagesize = __sysconf (_SC_PAGESIZE);
+- }
+-
+- /* Just a comment here. We cannot compute
+- phys_pages * pagesize
+- and compare the needed amount of memory against this value.
+- The problem is that some systems might have more physical
+- memory then can be represented with a `size_t' value (when
+- measured in bytes. */
+-
+- /* If the memory requirements are too high don't allocate memory. */
+- if (size / pagesize > (size_t) phys_pages)
+- {
+- _quicksort (b, n, s, cmp, arg);
+- return;
+- }
+-
+- /* It's somewhat large, so malloc it. */
+- int save = errno;
+- tmp = malloc (size);
+- __set_errno (save);
+- if (tmp == NULL)
+- {
+- /* Couldn't get space, so use the slower algorithm
+- that doesn't need a temporary array. */
+- _quicksort (b, n, s, cmp, arg);
+- return;
+- }
+- p.t = tmp;
+- }
+-
+- p.s = s;
+- p.var = 4;
+- p.cmp = cmp;
+- p.arg = arg;
+-
+- if (s > 32)
+- {
+- /* Indirect sorting. */
+- char *ip = (char *) b;
+- void **tp = (void **) (p.t + n * sizeof (void *));
+- void **t = tp;
+- void *tmp_storage = (void *) (tp + n);
+-
+- while ((void *) t < tmp_storage)
+- {
+- *t++ = ip;
+- ip += s;
+- }
+- p.s = sizeof (void *);
+- p.var = 3;
+- msort_with_tmp (&p, p.t + n * sizeof (void *), n);
+-
+- /* tp[0] .. tp[n - 1] is now sorted, copy around entries of
+- the original array. Knuth vol. 3 (2nd ed.) exercise 5.2-10. */
+- char *kp;
+- size_t i;
+- for (i = 0, ip = (char *) b; i < n; i++, ip += s)
+- if ((kp = tp[i]) != ip)
+- {
+- size_t j = i;
+- char *jp = ip;
+- memcpy (tmp_storage, ip, s);
+-
+- do
+- {
+- size_t k = (kp - (char *) b) / s;
+- tp[j] = jp;
+- memcpy (jp, kp, s);
+- j = k;
+- jp = kp;
+- kp = tp[k];
+- }
+- while (kp != ip);
+-
+- tp[j] = jp;
+- memcpy (jp, tmp_storage, s);
+- }
+- }
+- else
+- {
+- if ((s & (sizeof (uint32_t) - 1)) == 0
+- && ((char *) b - (char *) 0) % __alignof__ (uint32_t) == 0)
+- {
+- if (s == sizeof (uint32_t))
+- p.var = 0;
+- else if (s == sizeof (uint64_t)
+- && ((char *) b - (char *) 0) % __alignof__ (uint64_t) == 0)
+- p.var = 1;
+- else if ((s & (sizeof (unsigned long) - 1)) == 0
+- && ((char *) b - (char *) 0)
+- % __alignof__ (unsigned long) == 0)
+- p.var = 2;
+- }
+- msort_with_tmp (&p, b, n);
+- }
+- free (tmp);
+-}
+-libc_hidden_def (__qsort_r)
+-weak_alias (__qsort_r, qsort_r)
+-
+-
+-void
+-qsort (void *b, size_t n, size_t s, __compar_fn_t cmp)
+-{
+- return __qsort_r (b, n, s, (__compar_d_fn_t) cmp, NULL);
+-}
+-libc_hidden_def (qsort)
+diff --git a/stdlib/qsort.c b/stdlib/qsort.c
+index db299eb333cf0302..cb1619aa0ae7de72 100644
+--- a/stdlib/qsort.c
++++ b/stdlib/qsort.c
+@@ -20,7 +20,6 @@
+ Engineering a sort function; Jon Bentley and M. Douglas McIlroy;
+ Software - Practice and Experience; Vol. 23 (11), 1249-1265, 1993. */
+
+-#include
+ #include
+ #include
+ #include
+@@ -266,8 +265,8 @@ insertion_sort_qsort_partitions (void *const pbase, size_t total_elems,
+ stack size is needed (actually O(1) in this case)! */
+
+ void
+-_quicksort (void *const pbase, size_t total_elems, size_t size,
+- __compar_d_fn_t cmp, void *arg)
++__qsort_r (void *const pbase, size_t total_elems, size_t size,
++ __compar_d_fn_t cmp, void *arg)
+ {
+ char *base_ptr = (char *) pbase;
+
+@@ -399,3 +398,12 @@ _quicksort (void *const pbase, size_t total_elems, size_t size,
+ insertion_sort_qsort_partitions (pbase, total_elems, size, swap_type, cmp,
+ arg);
+ }
++libc_hidden_def (__qsort_r)
++weak_alias (__qsort_r, qsort_r)
++
++void
++qsort (void *b, size_t n, size_t s, __compar_fn_t cmp)
++{
++ return __qsort_r (b, n, s, (__compar_d_fn_t) cmp, NULL);
++}
++libc_hidden_def (qsort)
diff --git a/glibc-RHEL-24168-9.patch b/glibc-RHEL-24168-9.patch
new file mode 100644
index 0000000..7c06a98
--- /dev/null
+++ b/glibc-RHEL-24168-9.patch
@@ -0,0 +1,399 @@
+commit bc888a3976700a3607f6ec4a36dbf3030161cb3e
+Author: Adhemerval Zanella
+Date: Tue Oct 3 09:22:51 2023 -0300
+
+ stdlib: Add more qsort{_r} coverage
+
+ This patch adds a qsort and qsort_r to trigger the worst case
+ scenario for the quicksort (which glibc current lacks coverage).
+ The test is done with random input, dfferent internal types (uint8_t,
+ uint16_t, uint32_t, uint64_t, large size), and with
+ different set of element numbers.
+
+ Checked on x86_64-linux-gnu and i686-linux-gnu.
+ Reviewed-by: Noah Goldstein
+
+diff --git a/stdlib/Makefile b/stdlib/Makefile
+index 3b89bc2aa0307321..4039e5395eeea2b0 100644
+--- a/stdlib/Makefile
++++ b/stdlib/Makefile
+@@ -211,6 +211,7 @@ tests := \
+ tst-on_exit \
+ tst-qsort \
+ tst-qsort2 \
++ tst-qsort3 \
+ tst-quick_exit \
+ tst-rand48 \
+ tst-rand48-2 \
+diff --git a/stdlib/tst-qsort3.c b/stdlib/tst-qsort3.c
+new file mode 100644
+index 0000000000000000..421560d74434a116
+--- /dev/null
++++ b/stdlib/tst-qsort3.c
+@@ -0,0 +1,366 @@
++/* qsort(_r) tests to trigger worst case for quicksort.
++ Copyright (C) 2023 Free Software Foundation, Inc.
++ This file is part of the GNU C Library.
++
++ The GNU C Library is free software; you can redistribute it and/or
++ modify it under the terms of the GNU Lesser General Public
++ License as published by the Free Software Foundation; either
++ version 2.1 of the License, or (at your option) any later version.
++
++ The GNU C Library is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ Lesser General Public License for more details.
++
++ You should have received a copy of the GNU Lesser General Public
++ License along with the GNU C Library; if not, see
++ . */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++typedef enum
++{
++ Sorted,
++ Random,
++ Repeated,
++ Bitonic,
++ Duplicated,
++} arraytype_t;
++
++/* Ratio of total of elements which will be repeated. */
++static const double RepeatedRatio = 0.2;
++
++/* Ratio of duplicated element . */
++static const double DuplicatedRatio = 0.4;
++
++struct array_t
++{
++ arraytype_t type;
++ const char *name;
++} static const arraytypes[] =
++{
++ { Sorted, "Sorted" },
++ { Random, "Random" },
++ { Repeated, "Repeated" },
++ { Bitonic, "Bitonic" },
++ { Duplicated, "Duplicated" },
++};
++
++/* Return the index of BASE as interpreted as an array of elements
++ of size SIZE. */
++static inline void *
++arr (void *base, size_t idx, size_t size)
++{
++ return (void*)((uintptr_t)base + (idx * size));
++}
++
++/* Functions used to check qsort. */
++static int
++uint8_t_cmp (const void *a, const void *b)
++{
++ uint8_t ia = *(uint8_t*)a;
++ uint8_t ib = *(uint8_t*)b;
++ return (ia > ib) - (ia < ib);
++}
++
++static int
++uint16_t_cmp (const void *a, const void *b)
++{
++ uint16_t ia = *(uint16_t*)a;
++ uint16_t ib = *(uint16_t*)b;
++ return (ia > ib) - (ia < ib);
++}
++
++static int
++uint32_t_cmp (const void *a, const void *b)
++{
++ uint32_t ia = *(uint32_t*)a;
++ uint32_t ib = *(uint32_t*)b;
++ return (ia > ib) - (ia < ib);
++}
++
++static int
++uint64_t_cmp (const void *a, const void *b)
++{
++ uint64_t ia = *(uint64_t*)a;
++ uint64_t ib = *(uint64_t*)b;
++ return (ia > ib) - (ia < ib);
++}
++
++#define LARGE_SIZE 47
++
++static int
++large_cmp (const void *a, const void *b)
++{
++ return memcmp (a, b, LARGE_SIZE);
++}
++
++/* Function used to check qsort_r. */
++typedef enum
++{
++ UINT8_CMP_T,
++ UINT16_CMP_T,
++ UINT32_CMP_T,
++ UINT64_CMP_T,
++ LARGE_CMP_T
++} type_cmp_t;
++
++static type_cmp_t
++uint_t_cmp_type (size_t sz)
++{
++ switch (sz)
++ {
++ case sizeof (uint8_t): return UINT8_CMP_T;
++ case sizeof (uint16_t): return UINT16_CMP_T;
++ case sizeof (uint64_t): return UINT64_CMP_T;
++ case sizeof (uint32_t): return UINT32_CMP_T;
++ default: return LARGE_CMP_T;
++ }
++}
++
++static int
++uint_t_cmp (const void *a, const void *b, void *arg)
++{
++ type_cmp_t type = *(type_cmp_t*) arg;
++ switch (type)
++ {
++ case UINT8_CMP_T: return uint8_t_cmp (a, b);
++ case UINT32_CMP_T: return uint32_t_cmp (a, b);
++ case UINT16_CMP_T: return uint16_t_cmp (a, b);
++ case UINT64_CMP_T: return uint64_t_cmp (a, b);
++ default: return large_cmp (a, b);
++ }
++}
++
++static void
++seq (void *elem, size_t type_size, int value)
++{
++ if (type_size == sizeof (uint8_t))
++ *(uint8_t*)elem = value;
++ else if (type_size == sizeof (uint16_t))
++ *(uint16_t*)elem = value;
++ else if (type_size == sizeof (uint32_t))
++ *(uint32_t*)elem = value;
++ else if (type_size == sizeof (uint64_t))
++ *(uint64_t*)elem = value;
++ else
++ memset (elem, value, type_size);
++}
++
++static void
++fill_array (void *array, void *refarray, size_t nmemb, size_t type_size,
++ arraytype_t type)
++{
++ size_t size = nmemb * type_size;
++
++ switch (type)
++ {
++ case Sorted:
++ for (size_t i = 0; i < nmemb; i++)
++ seq (arr (array, i, type_size), type_size, i);
++ break;
++
++ case Random:
++ arc4random_buf (array, size);
++ break;
++
++ case Repeated:
++ {
++ arc4random_buf (array, size);
++
++ void *randelem = xmalloc (type_size);
++ arc4random_buf (randelem, type_size);
++
++ /* Repeat REPEATED elements (based on RepeatRatio ratio) in the random
++ array. */
++ size_t repeated = (size_t)(nmemb * RepeatedRatio);
++ for (size_t i = 0; i < repeated; i++)
++ {
++ size_t pos = arc4random_uniform (nmemb - 1);
++ memcpy (arr (array, pos, type_size), randelem, type_size);
++ }
++ free (randelem);
++ }
++ break;
++
++ case Bitonic:
++ {
++ size_t i;
++ for (i = 0; i < nmemb / 2; i++)
++ seq (arr (array, i, type_size), type_size, i);
++ for ( ; i < nmemb; i++)
++ seq (arr (array, i, type_size), type_size, (nmemb - 1) - i);
++ }
++ break;
++
++ case Duplicated:
++ {
++ int randelem1 = arc4random ();
++ for (size_t i = 0; i < nmemb; i++)
++ seq (arr (array, i, type_size), type_size, randelem1);
++
++ size_t duplicates = (size_t)(nmemb * DuplicatedRatio);
++ int randelem2 = arc4random ();
++ for (size_t i = 0; i < duplicates; i++)
++ {
++ size_t pos = arc4random_uniform (nmemb - 1);
++ seq (arr (array, pos, type_size), type_size, randelem2);
++ }
++ }
++ break;
++ }
++
++ memcpy (refarray, array, size);
++}
++
++typedef int (*cmpfunc_t)(const void *, const void *);
++
++/* Simple insertion sort to use as reference sort. */
++static void
++qsort_r_ref (void *p, size_t n, size_t s, __compar_d_fn_t cmp, void *arg)
++{
++ if (n <= 1)
++ return;
++
++ int i = 1;
++ char tmp[s];
++ while (i < n)
++ {
++ memcpy (tmp, arr (p, i, s), s);
++ int j = i - 1;
++ while (j >= 0 && cmp (arr (p, j, s), tmp, arg) > 0)
++ {
++ memcpy (arr (p, j + 1, s), arr (p, j, s), s);
++ j = j - 1;
++ }
++ memcpy (arr (p, j + 1, s), tmp, s);
++ i = i + 1;
++ }
++}
++
++static void
++qsort_ref (void *b, size_t n, size_t s, __compar_fn_t cmp)
++{
++ return qsort_r_ref (b, n, s, (__compar_d_fn_t) cmp, NULL);
++}
++
++/* Check if ARRAY of total NMEMB element of size SIZE is sorted
++ based on CMPFUNC. */
++static void
++check_array (void *array, void *refarray, size_t nmemb, size_t type_size,
++ cmpfunc_t cmpfunc)
++{
++ for (size_t i = 1; i < nmemb; i++)
++ {
++ int ret = cmpfunc (arr (array, i, type_size),
++ arr (array, i-1, type_size));
++ TEST_VERIFY_EXIT (ret >= 0);
++ }
++
++ size_t size = nmemb * type_size;
++ TEST_COMPARE_BLOB (array, size, refarray, size);
++}
++
++static void
++check_qsort (void *buf, void *refbuf, size_t nelem, size_t type_size,
++ arraytype_t type, cmpfunc_t cmpfunc)
++{
++ fill_array (buf, refbuf, nelem, type_size, type);
++
++ qsort (buf, nelem, type_size, cmpfunc);
++ qsort_ref (refbuf, nelem, type_size, cmpfunc);
++
++ check_array (buf, refbuf, nelem, type_size, cmpfunc);
++}
++
++static void
++check_qsort_r (void *buf, void *refbuf, size_t nelem, size_t type_size,
++ arraytype_t type, cmpfunc_t cmpfunc)
++{
++ fill_array (buf, refbuf, nelem, type_size, type);
++
++ type_cmp_t typecmp = uint_t_cmp_type (type_size);
++
++ qsort_r (buf, nelem, type_size, uint_t_cmp, &typecmp);
++ qsort_r_ref (refbuf, nelem, type_size, uint_t_cmp, &typecmp);
++
++ check_array (buf, refbuf, nelem, type_size, cmpfunc);
++}
++
++static int
++do_test (void)
++{
++ /* Some random sizes. */
++ static const size_t nelems[] = { 0, 1, 7, 20, 32, 100, 256, 1024, 4256 };
++ size_t max_nelems = 0;
++ for (int i = 0; i < array_length (nelems); i++)
++ if (nelems[i] > max_nelems)
++ max_nelems = nelems[i];
++
++ static const struct test_t
++ {
++ size_t type_size;
++ cmpfunc_t cmpfunc;
++ }
++ tests[] =
++ {
++ { sizeof (uint8_t), uint8_t_cmp },
++ { sizeof (uint16_t), uint16_t_cmp },
++ { sizeof (uint32_t), uint32_t_cmp },
++ { sizeof (uint64_t), uint64_t_cmp },
++ /* Test swap with large elements. */
++ { LARGE_SIZE, large_cmp },
++ };
++ size_t max_type_size = 0;
++ for (int i = 0; i < array_length (tests); i++)
++ if (tests[i].type_size > max_type_size)
++ max_type_size = tests[i].type_size;
++
++ void *buf = reallocarray (NULL, max_nelems, max_type_size);
++ TEST_VERIFY_EXIT (buf != NULL);
++ void *refbuf = reallocarray (NULL, max_nelems, max_type_size);
++ TEST_VERIFY_EXIT (refbuf != NULL);
++
++ for (const struct test_t *test = tests; test < array_end (tests); ++test)
++ {
++ if (test_verbose > 0)
++ printf ("info: testing qsort with type_size=%zu\n", test->type_size);
++ for (const struct array_t *arraytype = arraytypes;
++ arraytype < array_end (arraytypes);
++ ++arraytype)
++ {
++ if (test_verbose > 0)
++ printf (" distribution=%s\n", arraytype->name);
++ for (const size_t *nelem = nelems;
++ nelem < array_end (nelems);
++ ++nelem)
++ {
++ if (test_verbose > 0)
++ printf (" nelem=%zu, total size=%zu\n", *nelem,
++ *nelem * test->type_size);
++
++ check_qsort (buf, refbuf, *nelem, test->type_size,
++ arraytype->type, test->cmpfunc);
++ check_qsort_r (buf, refbuf, *nelem, test->type_size,
++ arraytype->type, test->cmpfunc);
++ }
++ }
++ }
++
++ free (buf);
++ free (refbuf);
++
++ return 0;
++}
++
++#include
diff --git a/glibc.spec b/glibc.spec
index bce2f0e..1acf253 100644
--- a/glibc.spec
+++ b/glibc.spec
@@ -157,7 +157,7 @@ end \
Summary: The GNU libc libraries
Name: glibc
Version: %{glibcversion}
-Release: 210%{?dist}
+Release: 211%{?dist}
# In general, GPLv2+ is used by programs, LGPLv2+ is used for
# libraries.
@@ -1257,6 +1257,28 @@ Patch947: glibc-RHEL-53909-1.patch
Patch948: glibc-RHEL-53909-2.patch
Patch949: glibc-RHEL-62188-1.patch
Patch950: glibc-RHEL-62188-2.patch
+Patch951: glibc-RHEL-24168-1.patch
+Patch952: glibc-RHEL-24168-2.patch
+Patch953: glibc-RHEL-24168-3.patch
+Patch954: glibc-RHEL-24168-4.patch
+Patch955: glibc-RHEL-24168-5.patch
+Patch956: glibc-RHEL-24168-6.patch
+Patch957: glibc-RHEL-24168-7.patch
+Patch958: glibc-RHEL-24168-8.patch
+Patch959: glibc-RHEL-24168-9.patch
+Patch960: glibc-RHEL-24168-10.patch
+Patch961: glibc-RHEL-24168-11.patch
+Patch962: glibc-RHEL-24168-12.patch
+Patch963: glibc-RHEL-24168-13.patch
+Patch964: glibc-RHEL-24168-14.patch
+Patch965: glibc-RHEL-24168-15.patch
+Patch966: glibc-RHEL-24168-16.patch
+Patch967: glibc-RHEL-24168-17.patch
+Patch968: glibc-RHEL-24168-18.patch
+Patch969: glibc-RHEL-24168-19.patch
+Patch970: glibc-RHEL-24168-20.patch
+Patch971: glibc-RHEL-24168-21.patch
+Patch972: glibc-RHEL-24168-22.patch
##############################################################################
# Continued list of core "glibc" package information:
@@ -3254,6 +3276,9 @@ update_gconv_modules_cache ()
%endif
%changelog
+* Tue Jul 08 2025 Arjun Shankar - 2.34-211
+- Improve qsort implementation (RHEL-24168)
+
* Tue Jul 01 2025 Arjun Shankar - 2.34-210
- Add new tests for clock_nanosleep (RHEL-62188)