import CS coreutils-8.32-39.el9

This commit is contained in:
eabdullin 2025-03-11 07:06:58 +00:00
parent 99bead6739
commit 44a5f2e547
5 changed files with 272 additions and 1 deletions

View File

@ -0,0 +1,63 @@
Fix failures in extended testsuite runs
Cherry-picked from upstream commits:
* https://github.com/coreutils/coreutils/commit/c431893b96bf4dc3b3918a605387f1b0e3843242
* https://github.com/coreutils/coreutils/commit/c0e5f8c59b951ae13ca9cb9945cd77163489e1d9
diff --git a/tests/rmdir/ignore.sh b/tests/rmdir/ignore.sh
index 65e92d012..b26ac533a 100755
--- a/tests/rmdir/ignore.sh
+++ b/tests/rmdir/ignore.sh
@@ -33,17 +33,24 @@ test -d "$cwd/a/b/c" && fail=1
# Between 6.11 and 8.31, the following rmdir would mistakenly succeed.
mkdir -p x/y || framework_failure_
chmod a-w x || framework_failure_
-returns_ 1 rmdir --ignore-fail-on-non-empty x/y || fail=1
+
+if ! uid_is_privileged_; then # root does not get EPERM.
+ returns_ 1 rmdir --ignore-fail-on-non-empty x/y || fail=1
+fi
+
test -d x/y || fail=1
# Between 6.11 and 8.31, the following rmdir would mistakenly fail,
# and also give a non descript error
touch x/y/z || framework_failure_
rmdir --ignore-fail-on-non-empty x/y || fail=1
test -d x/y || fail=1
-# assume empty dir if unreadable entries (so failure to remove diagnosed)
-rm x/y/z || framework_failure_
-chmod a-r x/y || framework_failure_
-returns_ 1 rmdir --ignore-fail-on-non-empty x/y || fail=1
-test -d x/y || fail=1
+
+if ! uid_is_privileged_; then # root does not get EPERM.
+ # assume empty dir if unreadable entries (so failure to remove diagnosed)
+ rm x/y/z || framework_failure_
+ chmod a-r x/y || framework_failure_
+ returns_ 1 rmdir --ignore-fail-on-non-empty x/y || fail=1
+ test -d x/y || fail=1
+fi
Exit $fail
diff --git a/tests/cp/fiemap-2.sh b/tests/cp/fiemap-2.sh
index e75d232e3..da7503074 100755
--- a/tests/cp/fiemap-2.sh
+++ b/tests/cp/fiemap-2.sh
@@ -32,7 +32,7 @@ dd bs=1k seek=128 of=k < /dev/null || framework_failure_
for append in no yes; do
test $append = yes && printf y >> k
for i in always never; do
- cp --sparse=$i k k2 || fail=1
+ cp --reflink=never --sparse=$i k k2 || fail=1
cmp k k2 || fail=1
done
done
@@ -48,7 +48,7 @@ dd bs=1k seek=1 of=k count=255 < /dev/zero || framework_failure_
# Currently, on my F14/ext4 desktop, this K file starts off with size 256KiB,
# (note that the K in the preceding test starts off with size 4KiB).
# cp from coreutils-8.9 with --sparse=always reduces the size to 32KiB.
-cp --sparse=always k k2 || fail=1
+cp --reflink=never --sparse=always k k2 || fail=1
if test $(stat -c %b k2) -ge $(stat -c %b k); then
# If not sparse, then double check by creating with dd
# as we're not guaranteed that seek will create a hole.

View File

@ -0,0 +1,69 @@
OpenSSL on s390x unconditionally opens /dev/z90crypt cryptographic device.
This patch backports commits relaxing the fdlimit sort tests:
* https://github.com/coreutils/coreutils/commit/5c9998fbab1911c8ccb7a7d0fb03e42ed310afe4
* https://github.com/coreutils/coreutils/commit/9348edb6b650db58a501e4e26e5dbf8e8b13e84d
diff --git a/tests/misc/sort-continue.sh b/tests/misc/sort-continue.sh
index c273363954..5326bfece5 100755
--- a/tests/misc/sort-continue.sh
+++ b/tests/misc/sort-continue.sh
@@ -19,8 +19,14 @@
. "${srcdir=.}/tests/init.sh"; path_prepend_ ./src
print_ver_ sort
+# This script uses 'ulimit -n 7' to limit 'sort' to at most 7 open files:
+# stdin, stdout, stderr, two input and one output files when merging,
+# and an extra. The extra is for old-fashioned platforms like Solaris 10
+# where opening a temp file also requires opening /dev/urandom to
+# calculate the temp file's name.
+
# Skip the test when running under valgrind.
-( ulimit -n 6; sort 3<&- 4<&- 5<&- < /dev/null ) \
+( ulimit -n 7; sort 3<&- 4<&- 5<&- 6<&- < /dev/null ) \
|| skip_ 'fd-limited sort failed; are you running under valgrind?'
for i in $(seq 31); do
@@ -31,16 +37,16 @@ done
test_files=$(echo __test.*)
(
- ulimit -n 6
- sort -n -m $test_files 3<&- 4<&- 5<&- < /dev/null > out
+ ulimit -n 7
+ sort -n -m $test_files 3<&- 4<&- 5<&- 6<&- < /dev/null > out
) &&
compare in out ||
{ fail=1; echo 'file descriptor exhaustion not handled' 1>&2; }
echo 32 | tee -a in > in1
(
- ulimit -n 6
- sort -n -m $test_files - 3<&- 4<&- 5<&- < in1 > out
+ ulimit -n 7
+ sort -n -m $test_files - 3<&- 4<&- 5<&- 6<&- < in1 > out
) &&
compare in out || { fail=1; echo 'stdin not handled properly' 1>&2; }
diff --git a/tests/misc/sort-merge-fdlimit.sh b/tests/misc/sort-merge-fdlimit.sh
index db2d8ebf09..7dda7f8f5e 100755
--- a/tests/misc/sort-merge-fdlimit.sh
+++ b/tests/misc/sort-merge-fdlimit.sh
@@ -61,9 +61,16 @@ done
# This test finds the bug only with shells that do not close FDs on
# exec, and will miss the bug (if present) on other shells, but it's
# not easy to fix this without running afoul of the OpenBSD-like sh bugs.
+#
+# This script uses 'ulimit -n 10' with 7, 8 and 9 open
+# to limit 'sort' to at most 7 open files:
+# stdin, stdout, stderr, two input and one output files when merging,
+# and an extra. The extra is for old-fashioned platforms like Solaris 10
+# where opening a temp file also requires opening /dev/urandom to
+# calculate the temp file's name.
(seq 6 && echo 6) >exp || framework_failure_
echo 6 >out || framework_failure_
-(exec 3<&- 4<&- 5<&- 6</dev/null 7<&6 8<&6 9<&6 &&
+(exec 3<&- 4<&- 5<&- 6<&- 7</dev/null 8<&7 9<&7 &&
ulimit -n 10 &&
sort -n -m --batch-size=7 -o out out in/1 in/2 in/3 in/4 in/5 out
) &&

View File

@ -0,0 +1,55 @@
commit 45c2456a56337ebcafe0dd9faa2bd995ccbc3357
Author: Florian Weimer <fweimer@redhat.com>
Date: Mon Nov 11 14:05:53 2024 +0100
nproc: Use affinity mask even on systems with more than 1024 CPUs.
* lib/nproc.c (num_processors_via_affinity_mask): Retry
with larger affinity masks if CPU_ALLOC_SIZE is available.
diff --git a/lib/nproc.c b/lib/nproc.c
index 92a07e8289..48bc3d06fa 100644
--- a/lib/nproc.c
+++ b/lib/nproc.c
@@ -20,6 +20,7 @@
#include <config.h>
#include "nproc.h"
+#include <errno.h>
#include <limits.h>
#include <stdlib.h>
#include <unistd.h>
@@ -124,6 +125,33 @@ num_processors_via_affinity_mask (void)
return count;
}
}
+#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC \
+ && defined CPU_ALLOC_SIZE /* glibc >= 2.6 */
+ {
+ unsigned int alloc_count = 1024;
+ while (1)
+ {
+ cpu_set_t *set = CPU_ALLOC (alloc_count);
+ if (set == NULL)
+ return 0;
+ unsigned int size = CPU_ALLOC_SIZE (alloc_count);
+ if (sched_getaffinity (0, size, set) == 0)
+ {
+ unsigned int count = CPU_COUNT_S (size, set);
+ CPU_FREE (set);
+ return count;
+ }
+ if (errno != EINVAL)
+ {
+ CPU_FREE (set);
+ return 0;
+ }
+ CPU_FREE (set);
+ alloc_count *= 2;
+ if (alloc_count == 0)
+ return 0;
+ }
+ }
#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC /* glibc >= 2.3.4 */
{
cpu_set_t set;

View File

@ -0,0 +1,64 @@
commit ee0bc695303775da5026091a65e8ec2b764f4a26
Author: Bruno Haible <bruno@clisp.org>
Date: Mon Nov 11 15:40:52 2024 +0100
nproc: Use affinity mask even in out-of-memory situations.
* lib/nproc.c (num_processors_via_affinity_mask): Use a stack-allocated
cpu_set_t as fallback. Add comments.
diff --git a/lib/nproc.c b/lib/nproc.c
index 48bc3d06fa..0b5898d88f 100644
--- a/lib/nproc.c
+++ b/lib/nproc.c
@@ -125,15 +125,25 @@ num_processors_via_affinity_mask (void)
return count;
}
}
-#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC \
- && defined CPU_ALLOC_SIZE /* glibc >= 2.6 */
+#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC /* glibc >= 2.3.4 */
+ /* There are two ways to use the sched_getaffinity() function:
+ - With a statically-sized cpu_set_t.
+ - With a dynamically-sized cpu_set_t.
+ Documentation:
+ <https://www.kernel.org/doc/man-pages/online/pages/man2/sched_getaffinity.2.html>
+ <https://www.kernel.org/doc/man-pages/online/pages/man3/CPU_SET.3.html>
+ The second way has the advantage that it works on systems with more than
+ 1024 CPUs. The first way has the advantage that it works also when memory
+ is tight. */
+# if defined CPU_ALLOC_SIZE /* glibc >= 2.6 */
{
unsigned int alloc_count = 1024;
- while (1)
+ for (;;)
{
cpu_set_t *set = CPU_ALLOC (alloc_count);
if (set == NULL)
- return 0;
+ /* Out of memory. */
+ break;
unsigned int size = CPU_ALLOC_SIZE (alloc_count);
if (sched_getaffinity (0, size, set) == 0)
{
@@ -143,16 +153,19 @@ num_processors_via_affinity_mask (void)
}
if (errno != EINVAL)
{
+ /* Some other error. */
CPU_FREE (set);
return 0;
}
CPU_FREE (set);
+ /* Retry with some larger cpu_set_t. */
alloc_count *= 2;
if (alloc_count == 0)
+ /* Integer overflow. Avoid an endless loop. */
return 0;
}
}
-#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC /* glibc >= 2.3.4 */
+# endif
{
cpu_set_t set;

View File

@ -1,7 +1,7 @@
Summary: A set of basic GNU tools commonly used in shell scripts
Name: coreutils
Version: 8.32
Release: 36%{?dist}
Release: 39%{?dist}
License: GPLv3+
Url: https://www.gnu.org/software/coreutils/
Source0: https://ftp.gnu.org/gnu/%{name}/%{name}-%{version}.tar.xz
@ -73,6 +73,17 @@ Patch19: coreutils-nfsv4-acls.patch
# fix tail on kernels with 64k pagesize
Patch20: coreutils-8.32-tail-64kpages.patch
# fix extended upstream test suite failures (RHEL-60290)
Patch21: coreutils-8.32-fix-extended-testsuite.patch
# Fix affinity mask handling in nproc for large CPU counts (RHEL-54139)
# https://bugzilla.redhat.com/show_bug.cgi?id=2325167
Patch22: coreutils-nproc-affinity-1.patch
Patch23: coreutils-nproc-affinity-2.patch
# fix sort fdlimit test failures on s390x with /dev/z90crypt (RHEL-60290)
Patch24: coreutils-8.32-s390x-fdlimit.patch
# disable the test-lock gnulib test prone to deadlock
Patch100: coreutils-8.26-test-lock.patch
@ -322,6 +333,15 @@ rm -f $RPM_BUILD_ROOT%{_infodir}/dir
%license COPYING
%changelog
* Mon Dec 09 2024 Lukáš Zaoral <lzaoral@redhat.com> - 8.32-39
- fix sort fdlimit test failures on s390x with /dev/z90crypt (RHEL-60290)
* Tue Nov 26 2024 Lukáš Zaoral <lzaoral@redhat.com> - 8.32-38
- fix affinity mask handling in nproc for large CPU counts (RHEL-54139)
* Tue Oct 22 2024 Lukáš Zaoral <lzaoral@redhat.com> - 8.32-37
- fix extended upstream test suite failures (RHEL-60290)
* Fri Aug 16 2024 Lukáš Zaoral <lzaoral@redhat.com> - 8.32-36
- fix fold exit code for non-existent files (RHEL-54568)
- enable LTO on ppc64le