diff --git a/glibc-RHEL-119437-1.patch b/glibc-RHEL-119437-1.patch new file mode 100644 index 0000000..49fa2b5 --- /dev/null +++ b/glibc-RHEL-119437-1.patch @@ -0,0 +1,462 @@ +commit 3270c50e4853d9356eb5892364c52cd1558860ec +Author: DJ Delorie +Date: Fri May 2 20:51:18 2025 -0400 + + manual: add more pthread functions + + Add stubs and partial docs for many undocumented pthreads functions. + While neither exhaustive nor complete, gives minimal usage docs + for many functions and expands the pthreads chapters, making it + easier to continue improving this section in the future. + + Reviewed-by: Collin Funk + +Conflicts: + manual/threads.texi + (Downstream is missing commit ee33752b5d8c53eb5a357f67ab7ab6288fba5fd9) + +diff --git a/manual/threads.texi b/manual/threads.texi +index 7b9c79636c9cc79c..5bf7fdb3157b5ea0 100644 +--- a/manual/threads.texi ++++ b/manual/threads.texi +@@ -552,14 +552,97 @@ get different values identified by the same key. On failure, + This section describes the @glibcadj{} POSIX Threads implementation. + + @menu ++* Creating and Destroying Threads:: + * Thread-specific Data:: Support for creating and + managing thread-specific data + * POSIX Semaphores:: Support for process and thread + synchronization using semaphores ++* POSIX Barriers:: Support for process and thread ++ synchronization using barriers ++* POSIX Spin Locks:: Support for process and thread ++ synchronization using spinlocks ++* POSIX Mutexes:: Support for mutual exclusion ++* POSIX Threads Other APIs:: Other Standard functions + * Non-POSIX Extensions:: Additional functions to extend + POSIX Thread functionality + @end menu + ++@node Creating and Destroying Threads ++@subsection Creating and Destroying Threads ++ ++@deftypefun int pthread_create (pthread_t *@var{newthread}, const pthread_attr_t *@var{attr}, void *(*@var{start_routine}) (void *), void *@var{arg}) ++This function creates a new thread with attributes @var{attr}. This ++thread will call @var{start_routine} and pass it @var{arg}. If ++@var{start_routine} returns, the thread will exit and the return value ++will become the thread's exit value. The new thread's ID is stored in ++@var{newthread}. Returns 0 on success. ++@manpagefunctionstub{pthread_create, 3} ++@end deftypefun ++ ++@deftypefun int pthread_detach (pthread_t @var{th}) ++Indicates that thread @var{th} must clean up after itself ++automatically when it exits, as the parent thread will not call ++@code{pthread_join} on it. ++@manpagefunctionstub{pthread_detach, 3} ++@end deftypefun ++ ++@deftypefun int pthread_join (pthread_t @var{th}, void **@var{thread_return}) ++Waits for thread @var{th} to exit, and stores its return value in ++@var{thread_return}. ++@manpagefunctionstub{pthread_join, 3} ++@end deftypefun ++ ++@deftypefun int pthread_kill (pthread_t @var{th}, int @var{signal}) ++Sends signal @var{signal} to thread @var{th}. ++@manpagefunctionstub{pthread_kill, 3} ++@end deftypefun ++ ++@deftypefun pthread_t pthread_self (void) ++Returns the ID of the thread which performed the call. ++@manpagefunctionstub{pthread_self, 3} ++@end deftypefun ++ ++Each thread has a set of attributes which are passed to ++@code{pthread_create} via the @code{pthread_attr_t} type, which should ++be considered an opaque type. ++ ++@deftypefun int pthread_attr_init (pthread_attr_t *@var{attr}) ++Initializes @var{attr} to its default values and allocates any ++resources required. Once initialized, @var{attr} can be modified by ++other @code{pthread_attr_*} functions, or used by ++@code{pthread_create}. ++@manpagefunctionstub{pthread_attr_init, 3} ++@end deftypefun ++ ++@deftypefun int pthread_attr_destroy (pthread_attr_t *@var{attr}) ++When no longer needed, @var{attr} should be destroyed with this ++function, which releases any resources allocated. Note that ++@var{attr} is only needed for the @code{pthread_create} call, not for ++the running thread itself. ++@manpagefunctionstub{pthread_attr_destroy, 3} ++@end deftypefun ++ ++@deftypefun int pthread_attr_setdetachstate (pthread_attr_t *@var{attr}, int @var{detachstate}) ++Sets the detach state attribute for @var{attr}. This attribute may be one of the following: ++ ++@table @code ++@item PTHREAD_CREATE_DETACHED ++Causes the created thread to be detached, that is, as if ++@code{pthread_detach} had been called on it. ++ ++@item PTHREAD_CREATE_JOINABLE ++Causes the created thread to be joinable, that is, @code{pthread_join} ++must be called on it. ++@end table ++ ++@manpagefunctionstub{pthread_attr_setdetachstate, 3} ++@end deftypefun ++ ++@deftypefun int pthread_attr_getdetachstate (const pthread_attr_t *@var{attr}, int *@var{detachstate}) ++Gets the detach state attribute from @var{attr}. ++@manpagefunctionstub{pthread_attr_getdetachstate, 3} ++@end deftypefun ++ + @node Thread-specific Data + @subsection Thread-specific Data + +@@ -718,6 +801,272 @@ against the clock specified by @var{clockid} rather than + @end deftypefun + + ++@node POSIX Barriers ++@subsection POSIX Barriers ++ ++A POSIX barrier works as follows: a file-local or global ++@code{pthread_barrier_t} object is initialized via ++@code{pthread_barrier_init} to require @var{count} threads to wait on ++it. After that, up to @var{count}-1 threads will wait on the barrier ++via @code{pthread_barrier_wait}. None of these calls will return ++until @var{count} threads are waiting via the next call to ++@code{pthread_barrier_wait}, at which point, all of these calls will ++return. The net result is that @var{count} threads will be ++synchronized at that point. At some point after this, the barrier is ++destroyed via @code{pthread_barrier_destroy}. Note that a barrier ++must be destroyed before being re-initialized, to ensure that all ++threads are properly synchronized, but need not be destroyed and ++re-initialized before being reused. ++ ++@deftypefun int pthread_barrier_init (pthread_barrier_t *@var{barrier}, const pthread_barrierattr_t *@var{attr}, unsigned int @var{count}) ++@standards{POSIX, pthread.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++ ++This function initializes a barrier to synchronize @var{count} ++threads. The barrier must be uninitialized or destroyed before it is ++initialized; attempting to initialize an in-use barrier results in ++undefined behavior. ++ ++The @var{attr} argument to @code{pthread_barrier_init} is typically ++NULL for a process-private barrier, but may be used to share a barrier ++across processes (documentation TBD). ++ ++On success, 0 is returned. On error, one of the following is returned: ++ ++@table @code ++@item EINVAL ++Either @var{count} is zero, or is large enough to cause an internal ++overflow. ++@end table ++ ++@end deftypefun ++ ++@deftypefun int pthread_barrier_wait (pthread_barrier_t *@var{barrier}) ++@standards{POSIX, pthread.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++ ++This function synchronizes threads. The first @var{count}-1 threads ++that wait on @var{barrier} will just wait. The next thread that waits ++on @var{barrier} will cause all @var{count} threads' calls to return. ++The @var{barrier} must be initialized with @code{pthread_barrier_init} ++and not yet destroyed with @code{pthread_barrier_destroy}. ++ ++The return value of this function is ++@code{PTHREAD_BARRIER_SERIAL_THREAD} for one thread (it is unspecified ++which thread) and 0 for the remainder, for each batch of @var{count} ++threads synchronized. After such a batch is synchronized, the ++@var{barrier} will begin synchronizing the next @var{count} threads. ++ ++@end deftypefun ++ ++ ++@deftypefun int pthread_barrier_destroy (pthread_barrier_t *@var{barrier}) ++@standards{POSIX, pthread.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++ ++Destroys @var{barrier} and releases any resources it may have ++allocated. A barrier must not be destroyed if any thread is waiting ++on it, or if it was not initialized. This call always succeeds and ++returns 0. ++ ++@end deftypefun ++ ++@node POSIX Spin Locks ++@subsection POSIX Spin Locks ++ ++A spinlock is a low overhead lock suitable for use in a realtime ++thread where it's known that the thread won't be paused by the ++scheduler. Non-realtime threads should use mutexes instead. ++ ++@deftypefun int pthread_spin_init (pthread_spinlock_t *@var{lock}, int @var{pshared}) ++Initializes a spinlock. @var{pshared} is one of: ++ ++@table @code ++@item PTHREAD_PROCESS_PRIVATE ++This spinlock is private to the process which created it. ++ ++@item PTHREAD_PROCESS_SHARED ++This spinlock is shared across any process that can access it, for ++example through shared memory. ++@end table ++ ++@manpagefunctionstub{pthread_spin_init, 3} ++@end deftypefun ++ ++@deftypefun int pthread_spin_destroy (pthread_spinlock_t *@var{lock}) ++Destroys a spinlock and releases any resources it held. ++@manpagefunctionstub{pthread_spin_destroy, 3} ++@end deftypefun ++ ++@deftypefun int pthread_spin_lock (pthread_spinlock_t *@var{lock}) ++Locks a spinlock. Only one thread at a time can lock a spinlock. If ++another thread has locked this spinlock, the calling thread waits ++until it is unlocked, then attempts to lock it. ++@manpagefunctionstub{pthread_spin_lock, 3} ++@end deftypefun ++ ++@deftypefun int pthread_spin_unlock (pthread_spinlock_t *@var{lock}) ++Unlocks a spinlock. If one or more threads are waiting for the lock ++to be unlocked, one of them (unspecified which) will succeed in ++locking it, and will return from @code{pthread_spin_lock}). ++@manpagefunctionstub{pthread_spin_unlock, 3} ++@end deftypefun ++ ++@deftypefun int pthread_spin_trylock (pthread_spinlock_t *@var{lock}) ++Like @code{pthread_spin_unlock} but returns 0 if the lock was ++unlocked, or EBUSY if it was locked. ++@manpagefunctionstub{pthread_spin_trylock, 3} ++@end deftypefun ++ ++@node POSIX Mutexes ++@subsection POSIX Mutexes ++ ++A @emph{mutex}, or ``mutual exclusion'', is a way of guaranteeing that ++only one thread at a time is able to execute a protected bit of code ++(or access any other resource). Two or more threads trying to execute ++the same code at the same time, will instead take turns, according to ++the mutex. ++ ++A mutex is much like a spinlock, but implemented in a way that is more ++appropriate for use in non-realtime threads, and is more ++resource-conserving. ++ ++@deftypefun int pthread_mutex_init (pthread_mutex_t *@var{mutex}, const pthread_mutexattr_t *@var{mutexattr}) ++Initiailizes a mutex. ++@manpagefunctionstub{pthread_mutex_init, 3} ++@end deftypefun ++ ++@deftypefun int pthread_mutex_destroy (pthread_mutex_t *@var{mutex}) ++Destroys a no-longer-needed mutex. ++@manpagefunctionstub{pthread_mutex_destroy, 3} ++@end deftypefun ++ ++@deftypefun int pthread_mutex_lock (pthread_mutex_t *@var{mutex}) ++Only one thread at a time may lock @var{mutex}, and must unlock it ++when appropriate. If a thread calls @code{pthread_mutex_lock} while ++@var{mutex} is locked by another thread, the calling thread will wait ++until @var{mutex} is unlocked, then attempt to lock it. Since there ++may be many threads waiting at the same time, the calling thread may ++need to repeat this wait-and-try many times before it successfully ++locks @var{mutex}, at which point the call to ++@code{pthread_mutex_locks} returns succesfully. ++ ++This function may fail with the following: ++ ++@table @code ++@item EAGAIN ++Too many locks were attempted. ++ ++@item EDEADLK ++The calling thread already holds a lock on @var{mutex}. ++ ++@item EINVAL ++@var{mutex} has an invalid kind, or an invalid priority was requested. ++ ++@item ENOTRECOVERABLE ++The thread holding the lock died in a way that the system cannot recover from. ++ ++@item EOWNERDEAD ++The thread holding the lock died in a way that the system can recover from. ++ ++@end table ++ ++@manpagefunctionstub{pthread_mutex_lock, 3} ++@end deftypefun ++ ++@deftypefun int pthread_mutex_trylock (pthread_mutex_t *@var{mutex}) ++Like @code{pthread_mutex_lock} but if the lock cannot be immediately ++obtained, returns EBUSY. ++@manpagefunctionstub{pthread_mutex_trylock, 3} ++@end deftypefun ++ ++@deftypefun int pthread_mutex_unlock (pthread_mutex_t *@var{mutex}) ++Unlocks @var{mutex}. Returns EPERM if the calling thread doesn't hold ++the lock on @var{mutex}. ++@manpagefunctionstub{pthread_mutex_unlock, 3} ++@end deftypefun ++ ++@deftypefun int pthread_mutex_clocklock (pthread_mutex_t *@var{mutex}, clockid_t @var{clockid}, const struct timespec *@var{abstime}) ++@end deftypefun ++ ++@deftypefun int pthread_mutex_timedlock (pthread_mutex_t *@var{mutex}, const struct timespec *@var{abstime}) ++ ++These two functions act like @code{pthread_mutex_lock} with the ++exception that the call will not wait past time @var{abstime}, as ++reported by @var{clockid} or (for @code{pthread_mutex_timedlock}) ++@code{CLOCK_REALTIME}. If @var{abstime} is reached and the mutex ++still cannot be locked, an @code{ETIMEDOUT} error is returned. ++If the time had already passed when these functions ++are called, and the mutex cannot be immediately locked, the function ++times out immediately. ++@end deftypefun ++ ++@deftypefun int pthread_mutexattr_init (const pthread_mutexattr_t *@var{attr}) ++Initializes @var{attr} with default values. ++@manpagefunctionstub{pthread_mutexattr_init, 3} ++@end deftypefun ++ ++@deftypefun int pthread_mutexattr_destroy (pthread_mutexattr_t *@var{attr}) ++Destroys @var{attr} and releases any resources it may have allocated. ++@manpagefunctionstub{pthread_mutexattr_destroy, 3} ++@end deftypefun ++ ++@deftypefun int pthread_mutexattr_settype (pthread_mutexattr_t *@var{attr}, int @var{kind}) ++This functions allow you to change what kind of mutex a mutex is, by ++changing the attributes used to initialize it. The values for ++@var{kind} are: ++ ++@table @code ++@item PTHREAD_MUTEX_NORMAL ++No attempt to detect deadlock is performed; a thread will deadlock if ++it tries to lock this mutex yet already holds a lock to it. ++Attempting to unlock a mutex not locked by the calling thread results ++in undefined behavior. ++ ++@item PTHREAD_MUTEX_ERRORCHECK ++Attemps to relock a mutex, or unlock a mutex not held, will result in an error. ++ ++@item PTHREAD_MUTEX_RECURSIVE ++Attempts to relock a mutex already held succeed, but require a ++matching number of unlocks to release it. Attempts to unlock a mutex ++not held will result in an error. ++ ++@item PTHREAD_MUTEX_DEFAULT ++Attemps to relock a mutex, or unlock a mutex not held, will result in ++undefined behavior. This is the default. ++ ++@end table ++@end deftypefun ++ ++@deftypefun int pthread_mutexattr_gettype (const pthread_mutexattr_t *@var{attr}, int *@var{kind}) ++This function gets the kind of mutex @var{mutex} is. ++@end deftypefun ++ ++@node POSIX Threads Other APIs ++@subsection POSIX Threads Other APIs ++ ++@deftypefun int pthread_equal (pthread_t @var{thread1}, pthread_t @var{thread2}) ++Compares two thread IDs. If they are the same, returns nonzero, else returns zero. ++@manpagefunctionstub{pthread_equal, 3} ++@end deftypefun ++ ++@deftypefun int pthread_getcpuclockid (pthread_t @var{th}, __clockid_t *@var{clock_id}) ++Get the clock associated with @var{th}. ++@manpagefunctionstub{pthread_getcpuclockid, 3} ++@end deftypefun ++ ++@deftypefun int pthread_once (pthread_once_t *@var{once_control}, void (*@var{init_routine}) (void)) ++Calls @var{init_routine} once for each @var{once_control}, which must ++be statically initalized to @code{PTHREAD_ONCE_INIT}. Subsequent ++calls to @code{pthread_once} with the same @var{once_control} do not ++call @var{init_routine}, even in multi-threaded environments. ++@manpagefunctionstub{pthread_once, 3} ++@end deftypefun ++ ++@deftypefun int pthread_sigmask (int @var{how}, const __sigset_t *@var{newmask}, __sigset_t *@var{oldmask}) ++@manpagefunctionstub{pthread_sigmask, 3} ++@end deftypefun ++ + @node Non-POSIX Extensions + @subsection Non-POSIX Extensions + +@@ -729,8 +1078,10 @@ the standard. + * Default Thread Attributes:: Setting default attributes for + threads in a process. + * Initial Thread Signal Mask:: Setting the initial mask of threads. ++* Thread CPU Affinity:: Limiting which CPUs can run a thread. + * Waiting with Explicit Clocks:: Functions for waiting with an + explicit clock specification. ++* Thread Names:: Changing the name of a thread. + * Single-Threaded:: Detecting single-threaded execution. + * Restartable Sequences:: Linux-specific restartable sequences + integration. +@@ -849,6 +1200,36 @@ signal mask and use @code{pthread_sigmask} to apply it to the thread. + If the signal mask was copied to a heap allocation, the copy should be + freed. + ++@node Thread CPU Affinity ++@subsubsection Thread CPU Affinity ++ ++Processes and threads normally run on any available CPU. However, ++they can be given an @emph{affinity} to one or more CPUs, which limits ++them to the CPU set specified. ++ ++@deftypefun int pthread_attr_setaffinity_np (pthread_attr_t *@var{attr}, size_t @var{cpusetsize}, const cpu_set_t *@var{cpuset}) ++Sets the CPU affinity in @var{attr}. The CPU affinity ++controls which CPUs a thread may execute on. @xref{CPU Affinity}. ++@manpagefunctionstub{pthread_attr_setaffinity_np, 3} ++@end deftypefun ++ ++@deftypefun int pthread_attr_getaffinity_np (const pthread_attr_t *@var{attr}, size_t @var{cpusetsize}, cpu_set_t *@var{cpuset}) ++Gets the CPU affinity settings from @var{attr}. ++@manpagefunctionstub{pthread_attr_getaffinity_np, 3} ++@end deftypefun ++ ++@deftypefun int pthread_setaffinity_np (pthread_t *@var{th}, size_t @var{cpusetsize}, const cpu_set_t *@var{cpuset}) ++Sets the CPU affinity for thread @var{th}. The CPU affinity controls ++which CPUs a thread may execute on. @xref{CPU Affinity}. ++@manpagefunctionstub{pthread_setaffinity_np, 3} ++@end deftypefun ++ ++@deftypefun int pthread_getaffinity_np (const pthread_t *@var{th}, size_t @var{cpusetsize}, cpu_set_t *@var{cpuset}) ++Gets the CPU affinity for thread @var{th}. The CPU affinity controls ++which CPUs a thread may execute on. @xref{CPU Affinity}. ++@manpagefunctionstub{pthread_getaffinity_np, 3} ++@end deftypefun ++ + @node Waiting with Explicit Clocks + @subsubsection Functions for Waiting According to a Specific Clock + +@@ -931,6 +1312,21 @@ Currently, @var{clockid} must be either @code{CLOCK_MONOTONIC} or + The @code{sem_clockwait} function also works using a @code{clockid_t} + argument. @xref{POSIX Semaphores}. + ++@node Thread Names ++@subsubsection Thread Names ++ ++@deftypefun int pthread_setname_np (pthread_t @var{th}, const char *@var{name}) ++Gives thread @var{th} the name @var{name}. This name shows up in ++@code{ps} when it's listing individual threads. @var{name} is a ++NUL-terminated string of no more than 15 non-NUL characters. ++@manpagefunctionstub{pthread_setname_np, 3} ++@end deftypefun ++ ++@deftypefun int pthread_getname_np (pthread_t @var{th}, char *@var{buf}, size_t @var{buflen}) ++Retrieves the name of thread @var{th}. ++@manpagefunctionstub{pthread_getname_np, 3} ++@end deftypefun ++ + @node Single-Threaded + @subsubsection Detecting Single-Threaded Execution + diff --git a/glibc-RHEL-119437-2.patch b/glibc-RHEL-119437-2.patch new file mode 100644 index 0000000..bcb2bb3 --- /dev/null +++ b/glibc-RHEL-119437-2.patch @@ -0,0 +1,231 @@ +commit 21e54f160f6245f959cca1f48bad9cca487c2570 +Author: DJ Delorie +Date: Thu Apr 24 18:03:21 2025 -0400 + + manual: add remaining CPU_* macros + + Adds remaining CPU_* macros, including the CPU_*_S macros + for dynamic-sized cpu sets. + + Reviewed-by: Collin Funk + +diff --git a/manual/resource.texi b/manual/resource.texi +index 685ddd6defc57818..409277d064cc32e1 100644 +--- a/manual/resource.texi ++++ b/manual/resource.texi +@@ -1362,26 +1362,73 @@ extent the Linux kernel interface. + @standards{GNU, sched.h} + This data set is a bitset where each bit represents a CPU. How the + system's CPUs are mapped to bits in the bitset is system dependent. +-The data type has a fixed size; in the unlikely case that the number +-of bits are not sufficient to describe the CPUs of the system a +-different interface has to be used. ++The data type has a fixed size; it is strongly recommended to allocate ++a dynamically sized set based on the actual number of CPUs detected, ++such as via @code{get_nprocs_conf()}, and use the @code{CPU_*_S} ++variants instead of the fixed-size ones. + + This type is a GNU extension and is defined in @file{sched.h}. + @end deftp + +-To manipulate the bitset, to set and reset bits, a number of macros are +-defined. Some of the macros take a CPU number as a parameter. Here +-it is important to never exceed the size of the bitset. The following +-macro specifies the number of bits in the @code{cpu_set_t} bitset. ++To manipulate the bitset, to set and reset bits, and thus add and ++remove CPUs from the sets, a number of macros are defined. Some of ++the macros take a CPU number as a parameter. Here it is important to ++never exceed the size of the bitset, either @code{CPU_SETSIZE} for ++fixed sets or the allocated size for dynamic sets. For each macro ++there is a fixed-size version (documented below) and a dynamic-sized ++version (with a @code{_S} suffix). + + @deftypevr Macro int CPU_SETSIZE + @standards{GNU, sched.h} + The value of this macro is the maximum number of CPUs which can be +-handled with a @code{cpu_set_t} object. ++handled with a fixed @code{cpu_set_t} object. + @end deftypevr + ++For applications that require CPU sets larger than the built-in size, ++a set of macros that support dynamically-sized sets are defined. ++ ++@deftypefn Macro size_t CPU_ALLOC_SIZE (size_t @var{count}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++@c CPU_ALLOC_SIZE ok ++@c __CPU_ALLOC_SIZE ok ++Given a count of CPUs to hold, returns the size of the set to ++allocate. This return value is appropriate to be used in the *_S macros. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ ++@deftypefn Macro {cpu_set_t *} CPU_ALLOC (size_t @var{count}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@asunsafe{@asulock{}}@acunsafe{@aculock{} @acsfd{} @acsmem{}}} ++@c CPU_ALLOC ++@c __CPU_ALLOC ++@c __sched_cpualloc ++@c malloc ++Given the count of CPUs to hold, returns a set large enough to hold ++them; that is, the resulting set will be valid for CPUs numbered 0 ++through @var{count}-1, inclusive. This set must be freed via ++@code{CPU_FREE} to avoid memory leaks. Warning: the argument is the ++CPU @emph{count} and not the size returned by @code{CPU_ALLOC_SIZE}. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ ++@deftypefn Macro void CPU_FREE (cpu_set_t *@var{set}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@asunsafe{@asulock{}}@acunsafe{@aculock{} @acsfd{} @acsmem{}}} ++@c CPU_FREE ++@c __CPU_FREE ++@c __sched_cpufree ++@c free ++Frees a CPU set previously allocated by @code{CPU_ALLOC}. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ + The type @code{cpu_set_t} should be considered opaque; all +-manipulation should happen via the next four macros. ++manipulation should happen via the @code{CPU_*} macros described ++below. + + @deftypefn Macro void CPU_ZERO (cpu_set_t *@var{set}) + @standards{GNU, sched.h} +@@ -1424,6 +1471,39 @@ evaluated more than once. + This macro is a GNU extension and is defined in @file{sched.h}. + @end deftypefn + ++@deftypefn Macro {cpu_set_t *} CPU_AND (cpu_set_t *@var{dest}, cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++@c CPU_AND ok ++@c __CPU_OP_S ok ++This macro populates @var{dest} with only those CPUs included in both ++@var{src1} and @var{src2}. Its value is @var{dest}. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ ++@deftypefn Macro {cpu_set_t *} CPU_OR (cpu_set_t *@var{dest}, cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++@c CPU_OR ok ++@c __CPU_OP_S ok ++This macro populates @var{dest} with those CPUs included in either ++@var{src1} or @var{src2}. Its value is @var{dest}. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ ++@deftypefn Macro {cpu_set_t *} CPU_XOR (cpu_set_t *@var{dest}, cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++@c CPU_XOR ok ++@c __CPU_OP_S ok ++This macro populates @var{dest} with those CPUs included in either ++@var{src1} or @var{src2}, but not both. Its value is @var{dest}. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ + @deftypefn Macro int CPU_ISSET (int @var{cpu}, const cpu_set_t *@var{set}) + @standards{GNU, sched.h} + @safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} +@@ -1440,6 +1520,54 @@ evaluated more than once. + This macro is a GNU extension and is defined in @file{sched.h}. + @end deftypefn + ++@deftypefn Macro int CPU_COUNT (const cpu_set_t *@var{set}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++@c CPU_COUNT ok ++@c __CPU_COUNT_S ok ++@c __sched_cpucount ok ++@c countbits ok ++This macro returns the count of CPUs (bits) set in @var{set}. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ ++@deftypefn Macro int CPU_EQUAL (cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@standards{GNU, sched.h} ++@safety{@prelim{}@mtsafe{}@assafe{}@acsafe{}} ++@c CPU_EQUAL ok ++@c __CPU_EQUAL_S ok ++@c memcmp ok ++This macro returns nonzero if the two sets @var{set1} and @var{set2} ++have the same contents; that is, the set of CPUs represented by both ++sets is identical. ++ ++This macro is a GNU extension and is defined in @file{sched.h}. ++@end deftypefn ++ ++@deftypefn Macro void CPU_ZERO_S (size_t @var{size}, cpu_set_t *@var{set}) ++@end deftypefn ++@deftypefn Macro void CPU_SET_S (int @var{cpu}, size_t @var{size}, cpu_set_t *@var{set}) ++@end deftypefn ++@deftypefn Macro void CPU_CLR_S (int @var{cpu}, size_t @var{size}, cpu_set_t *@var{set}) ++@end deftypefn ++@deftypefn Macro {cpu_set_t *} CPU_AND_S (size_t @var{size}, cpu_set_t *@var{dest}, cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@end deftypefn ++@deftypefn Macro {cpu_set_t *} CPU_OR_S (size_t @var{size}, cpu_set_t *@var{dest}, cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@end deftypefn ++@deftypefn Macro {cpu_set_t *} CPU_XOR_S (size_t @var{size}, cpu_set_t *@var{dest}, cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@end deftypefn ++@deftypefn Macro int CPU_ISSET_S (int @var{cpu}, size_t @var{size}, const cpu_set_t *@var{set}) ++@end deftypefn ++@deftypefn Macro int CPU_COUNT_S (size_t @var{size}, const cpu_set_t *@var{set}) ++@end deftypefn ++@deftypefn Macro int CPU_EQUAL_S (size_t @var{size}, cpu_set_t *@var{src1}, cpu_set_t *@var{src2}) ++@end deftypefn ++ ++Each of these macros performs the same action as its non-@code{_S} variant, ++but takes a @var{size} argument to specify the set size. This ++@var{size} argument is as returned by the @code{CPU_ALLOC_SIZE} macro, ++defined above. + + CPU bitsets can be constructed from scratch or the currently installed + affinity mask can be retrieved from the system. +@@ -1525,6 +1653,37 @@ The operating system does not support this function. + This function is Linux-specific and is declared in @file{sched.h}. + @end deftypefun + ++Here's an example of how to use most of the above to limit the number ++of CPUs a process runs on, not including error handling or good logic ++on CPU choices: ++ ++@example ++#define _GNU_SOURCE ++#include ++#include ++#include ++void ++limit_cpus (void) ++@{ ++ unsigned int mycpu; ++ size_t nproc, cssz, cpu; ++ cpu_set_t *cs; ++ getcpu (&mycpu, NULL); ++ nproc = get_nprocs_conf (); ++ cssz = CPU_ALLOC_SIZE (nproc); ++ cs = CPU_ALLOC (nproc); ++ sched_getaffinity (0, cssz, cs); ++ if (CPU_COUNT_S (cssz, cs) > nproc / 2) ++ @{ ++ for (cpu = nproc / 2; cpu < nproc; cpu ++) ++ if (cpu != mycpu) ++ CPU_CLR_S (cpu, cssz, cs); ++ sched_setaffinity (0, cssz, cs); ++ @} ++ CPU_FREE (cs); ++@} ++@end example ++ + @node Memory Resources + @section Querying memory available resources + diff --git a/glibc-RHEL-119437-3.patch b/glibc-RHEL-119437-3.patch new file mode 100644 index 0000000..cab120c --- /dev/null +++ b/glibc-RHEL-119437-3.patch @@ -0,0 +1,35 @@ +commit 579f8668816b35f8302e89e5255aff60b81938df +Author: DJ Delorie +Date: Thu May 15 16:38:11 2025 -0400 + + manual: add sched_getcpu() + + Reviewed-by: Collin Funk + +diff --git a/manual/resource.texi b/manual/resource.texi +index 409277d064cc32e1..824db78a97b8f387 100644 +--- a/manual/resource.texi ++++ b/manual/resource.texi +@@ -1653,6 +1653,22 @@ The operating system does not support this function. + This function is Linux-specific and is declared in @file{sched.h}. + @end deftypefun + ++@deftypefun int sched_getcpu (void) ++@standards{Linux, } ++ ++Similar to @code{getcpu} but with a simpler interface. On success, ++returns a nonnegative number identifying the CPU on which the current ++thread is running. Returns @code{-1} on failure. The following ++@code{errno} error condition is defined for this function: ++ ++@table @code ++@item ENOSYS ++The operating system does not support this function. ++@end table ++ ++This function is Linux-specific and is declared in @file{sched.h}. ++@end deftypefun ++ + Here's an example of how to use most of the above to limit the number + of CPUs a process runs on, not including error handling or good logic + on CPU choices: diff --git a/glibc.spec b/glibc.spec index 987f765..51e852c 100644 --- a/glibc.spec +++ b/glibc.spec @@ -2357,7 +2357,7 @@ update_gconv_modules_cache () %endif %changelog -* Tue Dec 02 2025 Eduard Abdullin - 2.39-99.alma.1 +* Wed Dec 03 2025 Eduard Abdullin - 2.39-100.alma.1 - Overwrite target for x86_64_v2 - Update patch-git.lua to handle AlmaLinux branches correctly