commit ee0bc695303775da5026091a65e8ec2b764f4a26 Author: Bruno Haible Date: Mon Nov 11 15:40:52 2024 +0100 nproc: Use affinity mask even in out-of-memory situations. * lib/nproc.c (num_processors_via_affinity_mask): Use a stack-allocated cpu_set_t as fallback. Add comments. diff --git a/lib/nproc.c b/lib/nproc.c index 48bc3d06fa..0b5898d88f 100644 --- a/lib/nproc.c +++ b/lib/nproc.c @@ -125,15 +125,25 @@ num_processors_via_affinity_mask (void) return count; } } -#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC \ - && defined CPU_ALLOC_SIZE /* glibc >= 2.6 */ +#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC /* glibc >= 2.3.4 */ + /* There are two ways to use the sched_getaffinity() function: + - With a statically-sized cpu_set_t. + - With a dynamically-sized cpu_set_t. + Documentation: + + + The second way has the advantage that it works on systems with more than + 1024 CPUs. The first way has the advantage that it works also when memory + is tight. */ +# if defined CPU_ALLOC_SIZE /* glibc >= 2.6 */ { unsigned int alloc_count = 1024; - while (1) + for (;;) { cpu_set_t *set = CPU_ALLOC (alloc_count); if (set == NULL) - return 0; + /* Out of memory. */ + break; unsigned int size = CPU_ALLOC_SIZE (alloc_count); if (sched_getaffinity (0, size, set) == 0) { @@ -143,16 +153,19 @@ num_processors_via_affinity_mask (void) } if (errno != EINVAL) { + /* Some other error. */ CPU_FREE (set); return 0; } CPU_FREE (set); + /* Retry with some larger cpu_set_t. */ alloc_count *= 2; if (alloc_count == 0) + /* Integer overflow. Avoid an endless loop. */ return 0; } } -#elif HAVE_SCHED_GETAFFINITY_LIKE_GLIBC /* glibc >= 2.3.4 */ +# endif { cpu_set_t set;