forked from rpms/glibc
73667d0be6
* Thu Apr 28 2022 Carlos O'Donell <carlos@redhat.com> - 2.34-32 - Sync with upstream branch release/2.34/master, commit c66c92181ddbd82306537a608e8c0282587131de: - posix/glob.c: update from gnulib (BZ#25659) - linux: Fix fchmodat with AT_SYMLINK_NOFOLLOW for 64 bit time_t (BZ#29097) * Wed Apr 27 2022 Carlos O'Donell <carlos@redhat.com> - 2.34-31 - Sync with upstream branch release/2.34/master, commit 55640ed3fde48360a8e8083be4843bd2dc7cecfe: - i386: Regenerate ulps - linux: Fix missing internal 64 bit time_t stat usage - x86: Optimize L(less_vec) case in memcmp-evex-movbe.S - x86: Don't set Prefer_No_AVX512 for processors with AVX512 and AVX-VNNI - x86-64: Use notl in EVEX strcmp [BZ #28646] - x86: Shrink memcmp-sse4.S code size - x86: Double size of ERMS rep_movsb_threshold in dl-cacheinfo.h - x86: Optimize memmove-vec-unaligned-erms.S - x86-64: Replace movzx with movzbl - x86-64: Remove Prefer_AVX2_STRCMP - x86-64: Improve EVEX strcmp with masked load - x86: Replace sse2 instructions with avx in memcmp-evex-movbe.S - x86: Optimize memset-vec-unaligned-erms.S - x86: Optimize memcmp-evex-movbe.S for frontend behavior and size - x86: Modify ENTRY in sysdep.h so that p2align can be specified - x86-64: Optimize load of all bits set into ZMM register [BZ #28252] - scripts/glibcelf.py: Mark as UNSUPPORTED on Python 3.5 and earlier - dlfcn: Do not use rtld_active () to determine ld.so state (bug 29078) - INSTALL: Rephrase -with-default-link documentation - misc: Fix rare fortify crash on wchar funcs. [BZ 29030] - Default to --with-default-link=no (bug 25812) - scripts: Add glibcelf.py module * Thu Apr 21 2022 Carlos O'Donell <carlos@redhat.com> - 2.34-30 - Sync with upstream branch release/2.34/master, commit 71326f1f2fd09dafb9c34404765fb88129e94237: - nptl: Fix pthread_cancel cancelhandling atomic operations - mips: Fix mips64n32 64 bit time_t stat support (BZ#29069) - hurd: Fix arbitrary error code - nptl: Handle spurious EINTR when thread cancellation is disabled (BZ#29029) - S390: Add new s390 platform z16. - NEWS: Update fixed bug list for LD_AUDIT backports. - hppa: Fix bind-now audit (BZ #28857) - elf: Replace tst-audit24bmod2.so with tst-audit24bmod2 - Fix elf/tst-audit25a with default bind now toolchains - elf: Fix runtime linker auditing on aarch64 (BZ #26643) - elf: Issue la_symbind for bind-now (BZ #23734) - elf: Fix initial-exec TLS access on audit modules (BZ #28096) - elf: Add la_activity during application exit - elf: Do not fail for failed dlmopen on audit modules (BZ #28061) - elf: Issue audit la_objopen for vDSO - elf: Add audit tests for modules with TLSDESC - elf: Avoid unnecessary slowdown from profiling with audit (BZ#15533) - elf: Add _dl_audit_pltexit - elf: Add _dl_audit_pltenter - elf: Add _dl_audit_preinit - elf: Add _dl_audit_symbind_alt and _dl_audit_symbind - elf: Add _dl_audit_objclose - elf: Add _dl_audit_objsearch - elf: Add _dl_audit_activity_map and _dl_audit_activity_nsid - elf: Add _dl_audit_objopen - elf: Move la_activity (LA_ACT_ADD) after _dl_add_to_namespace_list() (BZ #28062) - elf: Move LAV_CURRENT to link_lavcurrent.h - elf: Fix elf_get_dynamic_info() for bootstrap - elf: Fix dynamic-link.h usage on rtld.c - elf: Fix elf_get_dynamic_info definition - elf: Avoid nested functions in the loader [BZ #27220] - powerpc: Delete unneeded ELF_MACHINE_BEFORE_RTLD_RELOC - hppa: Use END instead of PSEUDO_END in swapcontext.S - hppa: Implement swapcontext in assembler (bug 28960) Resolves: #2003291 Resolves: #2064181 Resolves: #2072328 Resolves: #2075713 Resolves: #2077838
835 lines
27 KiB
Diff
835 lines
27 KiB
Diff
commit 290db09546b260a30137d03ce97a857e6f15b648
|
|
Author: Adhemerval Zanella <adhemerval.zanella@linaro.org>
|
|
Date: Wed Apr 6 12:24:42 2022 -0300
|
|
|
|
nptl: Handle spurious EINTR when thread cancellation is disabled (BZ#29029)
|
|
|
|
Some Linux interfaces never restart after being interrupted by a signal
|
|
handler, regardless of the use of SA_RESTART [1]. It means that for
|
|
pthread cancellation, if the target thread disables cancellation with
|
|
pthread_setcancelstate and calls such interfaces (like poll or select),
|
|
it should not see spurious EINTR failures due the internal SIGCANCEL.
|
|
|
|
However recent changes made pthread_cancel to always sent the internal
|
|
signal, regardless of the target thread cancellation status or type.
|
|
To fix it, the previous semantic is restored, where the cancel signal
|
|
is only sent if the target thread has cancelation enabled in
|
|
asynchronous mode.
|
|
|
|
The cancel state and cancel type is moved back to cancelhandling
|
|
and atomic operation are used to synchronize between threads. The
|
|
patch essentially revert the following commits:
|
|
|
|
8c1c0aae20 nptl: Move cancel type out of cancelhandling
|
|
2b51742531 nptl: Move cancel state out of cancelhandling
|
|
26cfbb7162 nptl: Remove CANCELING_BITMASK
|
|
|
|
However I changed the atomic operation to follow the internal C11
|
|
semantic and removed the MACRO usage, it simplifies a bit the
|
|
resulting code (and removes another usage of the old atomic macros).
|
|
|
|
Checked on x86_64-linux-gnu, i686-linux-gnu, aarch64-linux-gnu,
|
|
and powerpc64-linux-gnu.
|
|
|
|
[1] https://man7.org/linux/man-pages/man7/signal.7.html
|
|
|
|
Reviewed-by: Florian Weimer <fweimer@redhat.com>
|
|
Tested-by: Aurelien Jarno <aurelien@aurel32.net>
|
|
|
|
(cherry-picked from commit 404656009b459658138ed1bd18f3c6cf3863e6a6)
|
|
|
|
diff --git a/manual/process.texi b/manual/process.texi
|
|
index 28c9531f4294f56e..9307379194c6f666 100644
|
|
--- a/manual/process.texi
|
|
+++ b/manual/process.texi
|
|
@@ -68,8 +68,7 @@ until the subprogram terminates before you can do anything else.
|
|
@c CLEANUP_HANDLER @ascuplugin @ascuheap @acsmem
|
|
@c libc_cleanup_region_start @ascuplugin @ascuheap @acsmem
|
|
@c pthread_cleanup_push_defer @ascuplugin @ascuheap @acsmem
|
|
-@c __pthread_testcancel @ascuplugin @ascuheap @acsmem
|
|
-@c CANCEL_ENABLED_AND_CANCELED ok
|
|
+@c cancel_enabled_and_canceled @ascuplugin @ascuheap @acsmem
|
|
@c do_cancel @ascuplugin @ascuheap @acsmem
|
|
@c cancel_handler ok
|
|
@c kill syscall ok
|
|
diff --git a/nptl/allocatestack.c b/nptl/allocatestack.c
|
|
index 554a721f814b53c4..96101753ec2f4323 100644
|
|
--- a/nptl/allocatestack.c
|
|
+++ b/nptl/allocatestack.c
|
|
@@ -120,8 +120,6 @@ get_cached_stack (size_t *sizep, void **memp)
|
|
|
|
/* Cancellation handling is back to the default. */
|
|
result->cancelhandling = 0;
|
|
- result->cancelstate = PTHREAD_CANCEL_ENABLE;
|
|
- result->canceltype = PTHREAD_CANCEL_DEFERRED;
|
|
result->cleanup = NULL;
|
|
result->setup_failed = 0;
|
|
|
|
diff --git a/nptl/cancellation.c b/nptl/cancellation.c
|
|
index 05962784d51fb98b..e97d56f97d7a5698 100644
|
|
--- a/nptl/cancellation.c
|
|
+++ b/nptl/cancellation.c
|
|
@@ -31,19 +31,26 @@ int
|
|
__pthread_enable_asynccancel (void)
|
|
{
|
|
struct pthread *self = THREAD_SELF;
|
|
+ int oldval = atomic_load_relaxed (&self->cancelhandling);
|
|
|
|
- int oldval = THREAD_GETMEM (self, canceltype);
|
|
- THREAD_SETMEM (self, canceltype, PTHREAD_CANCEL_ASYNCHRONOUS);
|
|
+ while (1)
|
|
+ {
|
|
+ int newval = oldval | CANCELTYPE_BITMASK;
|
|
|
|
- int ch = THREAD_GETMEM (self, cancelhandling);
|
|
+ if (newval == oldval)
|
|
+ break;
|
|
|
|
- if (self->cancelstate == PTHREAD_CANCEL_ENABLE
|
|
- && (ch & CANCELED_BITMASK)
|
|
- && !(ch & EXITING_BITMASK)
|
|
- && !(ch & TERMINATED_BITMASK))
|
|
- {
|
|
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
|
|
- __do_cancel ();
|
|
+ if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &oldval, newval))
|
|
+ {
|
|
+ if (cancel_enabled_and_canceled_and_async (newval))
|
|
+ {
|
|
+ self->result = PTHREAD_CANCELED;
|
|
+ __do_cancel ();
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
}
|
|
|
|
return oldval;
|
|
@@ -57,10 +64,29 @@ __pthread_disable_asynccancel (int oldtype)
|
|
{
|
|
/* If asynchronous cancellation was enabled before we do not have
|
|
anything to do. */
|
|
- if (oldtype == PTHREAD_CANCEL_ASYNCHRONOUS)
|
|
+ if (oldtype & CANCELTYPE_BITMASK)
|
|
return;
|
|
|
|
struct pthread *self = THREAD_SELF;
|
|
- self->canceltype = PTHREAD_CANCEL_DEFERRED;
|
|
+ int newval;
|
|
+ int oldval = atomic_load_relaxed (&self->cancelhandling);
|
|
+ do
|
|
+ {
|
|
+ newval = oldval & ~CANCELTYPE_BITMASK;
|
|
+ }
|
|
+ while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &oldval, newval));
|
|
+
|
|
+ /* We cannot return when we are being canceled. Upon return the
|
|
+ thread might be things which would have to be undone. The
|
|
+ following loop should loop until the cancellation signal is
|
|
+ delivered. */
|
|
+ while (__glibc_unlikely ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
|
|
+ == CANCELING_BITMASK))
|
|
+ {
|
|
+ futex_wait_simple ((unsigned int *) &self->cancelhandling, newval,
|
|
+ FUTEX_PRIVATE);
|
|
+ newval = atomic_load_relaxed (&self->cancelhandling);
|
|
+ }
|
|
}
|
|
libc_hidden_def (__pthread_disable_asynccancel)
|
|
diff --git a/nptl/cleanup_defer.c b/nptl/cleanup_defer.c
|
|
index 7e858d0df068276b..35ba40fb0247c7cc 100644
|
|
--- a/nptl/cleanup_defer.c
|
|
+++ b/nptl/cleanup_defer.c
|
|
@@ -31,9 +31,22 @@ ___pthread_register_cancel_defer (__pthread_unwind_buf_t *buf)
|
|
ibuf->priv.data.prev = THREAD_GETMEM (self, cleanup_jmp_buf);
|
|
ibuf->priv.data.cleanup = THREAD_GETMEM (self, cleanup);
|
|
|
|
- /* Disable asynchronous cancellation for now. */
|
|
- ibuf->priv.data.canceltype = THREAD_GETMEM (self, canceltype);
|
|
- THREAD_SETMEM (self, canceltype, PTHREAD_CANCEL_DEFERRED);
|
|
+ int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
|
|
+ if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
|
|
+ {
|
|
+ int newval;
|
|
+ do
|
|
+ {
|
|
+ newval = cancelhandling & ~CANCELTYPE_BITMASK;
|
|
+ }
|
|
+ while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &cancelhandling,
|
|
+ newval));
|
|
+ }
|
|
+
|
|
+ ibuf->priv.data.canceltype = (cancelhandling & CANCELTYPE_BITMASK
|
|
+ ? PTHREAD_CANCEL_ASYNCHRONOUS
|
|
+ : PTHREAD_CANCEL_DEFERRED);
|
|
|
|
/* Store the new cleanup handler info. */
|
|
THREAD_SETMEM (self, cleanup_jmp_buf, (struct pthread_unwind_buf *) buf);
|
|
@@ -55,9 +68,26 @@ ___pthread_unregister_cancel_restore (__pthread_unwind_buf_t *buf)
|
|
|
|
THREAD_SETMEM (self, cleanup_jmp_buf, ibuf->priv.data.prev);
|
|
|
|
- THREAD_SETMEM (self, canceltype, ibuf->priv.data.canceltype);
|
|
- if (ibuf->priv.data.canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)
|
|
- __pthread_testcancel ();
|
|
+ if (ibuf->priv.data.canceltype == PTHREAD_CANCEL_DEFERRED)
|
|
+ return;
|
|
+
|
|
+ int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
|
|
+ if (cancelhandling & CANCELTYPE_BITMASK)
|
|
+ {
|
|
+ int newval;
|
|
+ do
|
|
+ {
|
|
+ newval = cancelhandling | CANCELTYPE_BITMASK;
|
|
+ }
|
|
+ while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &cancelhandling, newval));
|
|
+
|
|
+ if (cancel_enabled_and_canceled (cancelhandling))
|
|
+ {
|
|
+ self->result = PTHREAD_CANCELED;
|
|
+ __do_cancel ();
|
|
+ }
|
|
+ }
|
|
}
|
|
versioned_symbol (libc, ___pthread_unregister_cancel_restore,
|
|
__pthread_unregister_cancel_restore, GLIBC_2_34);
|
|
diff --git a/nptl/descr.h b/nptl/descr.h
|
|
index dabf980e29615db3..dfef9c4bda075d13 100644
|
|
--- a/nptl/descr.h
|
|
+++ b/nptl/descr.h
|
|
@@ -280,18 +280,27 @@ struct pthread
|
|
|
|
/* Flags determining processing of cancellation. */
|
|
int cancelhandling;
|
|
+ /* Bit set if cancellation is disabled. */
|
|
+#define CANCELSTATE_BIT 0
|
|
+#define CANCELSTATE_BITMASK (1 << CANCELSTATE_BIT)
|
|
+ /* Bit set if asynchronous cancellation mode is selected. */
|
|
+#define CANCELTYPE_BIT 1
|
|
+#define CANCELTYPE_BITMASK (1 << CANCELTYPE_BIT)
|
|
+ /* Bit set if canceling has been initiated. */
|
|
+#define CANCELING_BIT 2
|
|
+#define CANCELING_BITMASK (1 << CANCELING_BIT)
|
|
/* Bit set if canceled. */
|
|
#define CANCELED_BIT 3
|
|
-#define CANCELED_BITMASK (0x01 << CANCELED_BIT)
|
|
+#define CANCELED_BITMASK (1 << CANCELED_BIT)
|
|
/* Bit set if thread is exiting. */
|
|
#define EXITING_BIT 4
|
|
-#define EXITING_BITMASK (0x01 << EXITING_BIT)
|
|
+#define EXITING_BITMASK (1 << EXITING_BIT)
|
|
/* Bit set if thread terminated and TCB is freed. */
|
|
#define TERMINATED_BIT 5
|
|
-#define TERMINATED_BITMASK (0x01 << TERMINATED_BIT)
|
|
+#define TERMINATED_BITMASK (1 << TERMINATED_BIT)
|
|
/* Bit set if thread is supposed to change XID. */
|
|
#define SETXID_BIT 6
|
|
-#define SETXID_BITMASK (0x01 << SETXID_BIT)
|
|
+#define SETXID_BITMASK (1 << SETXID_BIT)
|
|
|
|
/* Flags. Including those copied from the thread attribute. */
|
|
int flags;
|
|
@@ -391,14 +400,6 @@ struct pthread
|
|
/* Indicates whether is a C11 thread created by thrd_creat. */
|
|
bool c11;
|
|
|
|
- /* Thread cancel state (PTHREAD_CANCEL_ENABLE or
|
|
- PTHREAD_CANCEL_DISABLE). */
|
|
- unsigned char cancelstate;
|
|
-
|
|
- /* Thread cancel type (PTHREAD_CANCEL_DEFERRED or
|
|
- PTHREAD_CANCEL_ASYNCHRONOUS). */
|
|
- unsigned char canceltype;
|
|
-
|
|
/* Used in __pthread_kill_internal to detected a thread that has
|
|
exited or is about to exit. exit_lock must only be acquired
|
|
after blocking signals. */
|
|
@@ -418,6 +419,22 @@ struct pthread
|
|
(sizeof (struct pthread) - offsetof (struct pthread, end_padding))
|
|
} __attribute ((aligned (TCB_ALIGNMENT)));
|
|
|
|
+static inline bool
|
|
+cancel_enabled_and_canceled (int value)
|
|
+{
|
|
+ return (value & (CANCELSTATE_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
|
|
+ | TERMINATED_BITMASK))
|
|
+ == CANCELED_BITMASK;
|
|
+}
|
|
+
|
|
+static inline bool
|
|
+cancel_enabled_and_canceled_and_async (int value)
|
|
+{
|
|
+ return ((value) & (CANCELSTATE_BITMASK | CANCELTYPE_BITMASK | CANCELED_BITMASK
|
|
+ | EXITING_BITMASK | TERMINATED_BITMASK))
|
|
+ == (CANCELTYPE_BITMASK | CANCELED_BITMASK);
|
|
+}
|
|
+
|
|
/* This yields the pointer that TLS support code calls the thread pointer. */
|
|
#if TLS_TCB_AT_TP
|
|
# define TLS_TPADJ(pd) (pd)
|
|
diff --git a/nptl/libc-cleanup.c b/nptl/libc-cleanup.c
|
|
index 180d15bc9e9a8368..fccb1abe69aa693c 100644
|
|
--- a/nptl/libc-cleanup.c
|
|
+++ b/nptl/libc-cleanup.c
|
|
@@ -27,9 +27,24 @@ __libc_cleanup_push_defer (struct _pthread_cleanup_buffer *buffer)
|
|
|
|
buffer->__prev = THREAD_GETMEM (self, cleanup);
|
|
|
|
+ int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
|
|
+
|
|
/* Disable asynchronous cancellation for now. */
|
|
- buffer->__canceltype = THREAD_GETMEM (self, canceltype);
|
|
- THREAD_SETMEM (self, canceltype, PTHREAD_CANCEL_DEFERRED);
|
|
+ if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
|
|
+ {
|
|
+ int newval;
|
|
+ do
|
|
+ {
|
|
+ newval = cancelhandling & ~CANCELTYPE_BITMASK;
|
|
+ }
|
|
+ while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &cancelhandling,
|
|
+ newval));
|
|
+ }
|
|
+
|
|
+ buffer->__canceltype = (cancelhandling & CANCELTYPE_BITMASK
|
|
+ ? PTHREAD_CANCEL_ASYNCHRONOUS
|
|
+ : PTHREAD_CANCEL_DEFERRED);
|
|
|
|
THREAD_SETMEM (self, cleanup, buffer);
|
|
}
|
|
@@ -42,8 +57,22 @@ __libc_cleanup_pop_restore (struct _pthread_cleanup_buffer *buffer)
|
|
|
|
THREAD_SETMEM (self, cleanup, buffer->__prev);
|
|
|
|
- THREAD_SETMEM (self, canceltype, buffer->__canceltype);
|
|
- if (buffer->__canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)
|
|
- __pthread_testcancel ();
|
|
+ int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
|
|
+ if (cancelhandling & CANCELTYPE_BITMASK)
|
|
+ {
|
|
+ int newval;
|
|
+ do
|
|
+ {
|
|
+ newval = cancelhandling | CANCELTYPE_BITMASK;
|
|
+ }
|
|
+ while (!atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &cancelhandling, newval));
|
|
+
|
|
+ if (cancel_enabled_and_canceled (cancelhandling))
|
|
+ {
|
|
+ self->result = PTHREAD_CANCELED;
|
|
+ __do_cancel ();
|
|
+ }
|
|
+ }
|
|
}
|
|
libc_hidden_def (__libc_cleanup_pop_restore)
|
|
diff --git a/nptl/pthread_cancel.c b/nptl/pthread_cancel.c
|
|
index 9bac6e3b76a20312..2680b55586e035fe 100644
|
|
--- a/nptl/pthread_cancel.c
|
|
+++ b/nptl/pthread_cancel.c
|
|
@@ -43,18 +43,29 @@ sigcancel_handler (int sig, siginfo_t *si, void *ctx)
|
|
|
|
struct pthread *self = THREAD_SELF;
|
|
|
|
- int ch = atomic_load_relaxed (&self->cancelhandling);
|
|
- /* Cancelation not enabled, not cancelled, or already exitting. */
|
|
- if (self->cancelstate == PTHREAD_CANCEL_DISABLE
|
|
- || (ch & CANCELED_BITMASK) == 0
|
|
- || (ch & EXITING_BITMASK) != 0)
|
|
- return;
|
|
-
|
|
- /* Set the return value. */
|
|
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
|
|
- /* Make sure asynchronous cancellation is still enabled. */
|
|
- if (self->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)
|
|
- __do_cancel ();
|
|
+ int oldval = atomic_load_relaxed (&self->cancelhandling);
|
|
+ while (1)
|
|
+ {
|
|
+ /* We are canceled now. When canceled by another thread this flag
|
|
+ is already set but if the signal is directly send (internally or
|
|
+ from another process) is has to be done here. */
|
|
+ int newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
|
|
+
|
|
+ if (oldval == newval || (oldval & EXITING_BITMASK) != 0)
|
|
+ /* Already canceled or exiting. */
|
|
+ break;
|
|
+
|
|
+ if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &oldval, newval))
|
|
+ {
|
|
+ self->result = PTHREAD_CANCELED;
|
|
+
|
|
+ /* Make sure asynchronous cancellation is still enabled. */
|
|
+ if ((oldval & CANCELTYPE_BITMASK) != 0)
|
|
+ /* Run the registered destructors and terminate the thread. */
|
|
+ __do_cancel ();
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
int
|
|
@@ -93,29 +104,70 @@ __pthread_cancel (pthread_t th)
|
|
}
|
|
#endif
|
|
|
|
- int oldch = atomic_fetch_or_acquire (&pd->cancelhandling, CANCELED_BITMASK);
|
|
- if ((oldch & CANCELED_BITMASK) != 0)
|
|
- return 0;
|
|
-
|
|
- if (pd == THREAD_SELF)
|
|
+ /* Some syscalls are never restarted after being interrupted by a signal
|
|
+ handler, regardless of the use of SA_RESTART (they always fail with
|
|
+ EINTR). So pthread_cancel cannot send SIGCANCEL unless the cancellation
|
|
+ is enabled and set as asynchronous (in this case the cancellation will
|
|
+ be acted in the cancellation handler instead by the syscall wrapper).
|
|
+ Otherwise the target thread is set as 'cancelling' (CANCELING_BITMASK)
|
|
+ by atomically setting 'cancelhandling' and the cancelation will be acted
|
|
+ upon on next cancellation entrypoing in the target thread.
|
|
+
|
|
+ It also requires to atomically check if cancellation is enabled and
|
|
+ asynchronous, so both cancellation state and type are tracked on
|
|
+ 'cancelhandling'. */
|
|
+
|
|
+ int result = 0;
|
|
+ int oldval = atomic_load_relaxed (&pd->cancelhandling);
|
|
+ int newval;
|
|
+ do
|
|
{
|
|
- /* A single-threaded process should be able to kill itself, since there
|
|
- is nothing in the POSIX specification that says that it cannot. So
|
|
- we set multiple_threads to true so that cancellation points get
|
|
- executed. */
|
|
- THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
|
|
+ newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK;
|
|
+ if (oldval == newval)
|
|
+ break;
|
|
+
|
|
+ /* If the cancellation is handled asynchronously just send a
|
|
+ signal. We avoid this if possible since it's more
|
|
+ expensive. */
|
|
+ if (cancel_enabled_and_canceled_and_async (newval))
|
|
+ {
|
|
+ /* Mark the cancellation as "in progress". */
|
|
+ int newval2 = oldval | CANCELING_BITMASK;
|
|
+ if (!atomic_compare_exchange_weak_acquire (&pd->cancelhandling,
|
|
+ &oldval, newval2))
|
|
+ continue;
|
|
+
|
|
+ if (pd == THREAD_SELF)
|
|
+ /* This is not merely an optimization: An application may
|
|
+ call pthread_cancel (pthread_self ()) without calling
|
|
+ pthread_create, so the signal handler may not have been
|
|
+ set up for a self-cancel. */
|
|
+ {
|
|
+ pd->result = PTHREAD_CANCELED;
|
|
+ if ((newval & CANCELTYPE_BITMASK) != 0)
|
|
+ __do_cancel ();
|
|
+ }
|
|
+ else
|
|
+ /* The cancellation handler will take care of marking the
|
|
+ thread as canceled. */
|
|
+ result = __pthread_kill_internal (th, SIGCANCEL);
|
|
+
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /* A single-threaded process should be able to kill itself, since
|
|
+ there is nothing in the POSIX specification that says that it
|
|
+ cannot. So we set multiple_threads to true so that cancellation
|
|
+ points get executed. */
|
|
+ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1);
|
|
#ifndef TLS_MULTIPLE_THREADS_IN_TCB
|
|
__libc_multiple_threads = 1;
|
|
#endif
|
|
-
|
|
- THREAD_SETMEM (pd, result, PTHREAD_CANCELED);
|
|
- if (pd->cancelstate == PTHREAD_CANCEL_ENABLE
|
|
- && pd->canceltype == PTHREAD_CANCEL_ASYNCHRONOUS)
|
|
- __do_cancel ();
|
|
- return 0;
|
|
}
|
|
+ while (!atomic_compare_exchange_weak_acquire (&pd->cancelhandling, &oldval,
|
|
+ newval));
|
|
|
|
- return __pthread_kill_internal (th, SIGCANCEL);
|
|
+ return result;
|
|
}
|
|
versioned_symbol (libc, __pthread_cancel, pthread_cancel, GLIBC_2_34);
|
|
|
|
diff --git a/nptl/pthread_join_common.c b/nptl/pthread_join_common.c
|
|
index 7303069316caef13..617056ef10671607 100644
|
|
--- a/nptl/pthread_join_common.c
|
|
+++ b/nptl/pthread_join_common.c
|
|
@@ -57,12 +57,9 @@ __pthread_clockjoin_ex (pthread_t threadid, void **thread_return,
|
|
if ((pd == self
|
|
|| (self->joinid == pd
|
|
&& (pd->cancelhandling
|
|
- & (CANCELED_BITMASK | EXITING_BITMASK
|
|
+ & (CANCELING_BITMASK | CANCELED_BITMASK | EXITING_BITMASK
|
|
| TERMINATED_BITMASK)) == 0))
|
|
- && !(self->cancelstate == PTHREAD_CANCEL_ENABLE
|
|
- && (pd->cancelhandling & (CANCELED_BITMASK | EXITING_BITMASK
|
|
- | TERMINATED_BITMASK))
|
|
- == CANCELED_BITMASK))
|
|
+ && !cancel_enabled_and_canceled (self->cancelhandling))
|
|
/* This is a deadlock situation. The threads are waiting for each
|
|
other to finish. Note that this is a "may" error. To be 100%
|
|
sure we catch this error we would have to lock the data
|
|
diff --git a/nptl/pthread_setcancelstate.c b/nptl/pthread_setcancelstate.c
|
|
index 7e2b6e4974bd58bd..cb567be5926816f1 100644
|
|
--- a/nptl/pthread_setcancelstate.c
|
|
+++ b/nptl/pthread_setcancelstate.c
|
|
@@ -31,9 +31,29 @@ __pthread_setcancelstate (int state, int *oldstate)
|
|
|
|
self = THREAD_SELF;
|
|
|
|
- if (oldstate != NULL)
|
|
- *oldstate = self->cancelstate;
|
|
- self->cancelstate = state;
|
|
+ int oldval = atomic_load_relaxed (&self->cancelhandling);
|
|
+ while (1)
|
|
+ {
|
|
+ int newval = (state == PTHREAD_CANCEL_DISABLE
|
|
+ ? oldval | CANCELSTATE_BITMASK
|
|
+ : oldval & ~CANCELSTATE_BITMASK);
|
|
+
|
|
+ if (oldstate != NULL)
|
|
+ *oldstate = ((oldval & CANCELSTATE_BITMASK)
|
|
+ ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE);
|
|
+
|
|
+ if (oldval == newval)
|
|
+ break;
|
|
+
|
|
+ if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &oldval, newval))
|
|
+ {
|
|
+ if (cancel_enabled_and_canceled_and_async (newval))
|
|
+ __do_cancel ();
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/nptl/pthread_setcanceltype.c b/nptl/pthread_setcanceltype.c
|
|
index e7b24ae733dcc0f2..e08ff7b141f904f1 100644
|
|
--- a/nptl/pthread_setcanceltype.c
|
|
+++ b/nptl/pthread_setcanceltype.c
|
|
@@ -29,11 +29,32 @@ __pthread_setcanceltype (int type, int *oldtype)
|
|
|
|
volatile struct pthread *self = THREAD_SELF;
|
|
|
|
- if (oldtype != NULL)
|
|
- *oldtype = self->canceltype;
|
|
- self->canceltype = type;
|
|
- if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
|
|
- __pthread_testcancel ();
|
|
+ int oldval = atomic_load_relaxed (&self->cancelhandling);
|
|
+ while (1)
|
|
+ {
|
|
+ int newval = (type == PTHREAD_CANCEL_ASYNCHRONOUS
|
|
+ ? oldval | CANCELTYPE_BITMASK
|
|
+ : oldval & ~CANCELTYPE_BITMASK);
|
|
+
|
|
+ if (oldtype != NULL)
|
|
+ *oldtype = ((oldval & CANCELTYPE_BITMASK)
|
|
+ ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED);
|
|
+
|
|
+ if (oldval == newval)
|
|
+ break;
|
|
+
|
|
+ if (atomic_compare_exchange_weak_acquire (&self->cancelhandling,
|
|
+ &oldval, newval))
|
|
+ {
|
|
+ if (cancel_enabled_and_canceled_and_async (newval))
|
|
+ {
|
|
+ THREAD_SETMEM (self, result, PTHREAD_CANCELED);
|
|
+ __do_cancel ();
|
|
+ }
|
|
+
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
|
|
return 0;
|
|
}
|
|
diff --git a/nptl/pthread_testcancel.c b/nptl/pthread_testcancel.c
|
|
index 31185d89f2ab84c6..25230215fd607e8b 100644
|
|
--- a/nptl/pthread_testcancel.c
|
|
+++ b/nptl/pthread_testcancel.c
|
|
@@ -24,13 +24,10 @@ void
|
|
___pthread_testcancel (void)
|
|
{
|
|
struct pthread *self = THREAD_SELF;
|
|
- int cancelhandling = THREAD_GETMEM (self, cancelhandling);
|
|
- if (self->cancelstate == PTHREAD_CANCEL_ENABLE
|
|
- && (cancelhandling & CANCELED_BITMASK)
|
|
- && !(cancelhandling & EXITING_BITMASK)
|
|
- && !(cancelhandling & TERMINATED_BITMASK))
|
|
+ int cancelhandling = atomic_load_relaxed (&self->cancelhandling);
|
|
+ if (cancel_enabled_and_canceled (cancelhandling))
|
|
{
|
|
- THREAD_SETMEM (self, result, PTHREAD_CANCELED);
|
|
+ self->result = PTHREAD_CANCELED;
|
|
__do_cancel ();
|
|
}
|
|
}
|
|
diff --git a/sysdeps/nptl/dl-tls_init_tp.c b/sysdeps/nptl/dl-tls_init_tp.c
|
|
index b39dfbff2c6678d5..23aa4cfc0b784dfc 100644
|
|
--- a/sysdeps/nptl/dl-tls_init_tp.c
|
|
+++ b/sysdeps/nptl/dl-tls_init_tp.c
|
|
@@ -107,7 +107,4 @@ __tls_init_tp (void)
|
|
It will be bigger than it actually is, but for unwind.c/pt-longjmp.c
|
|
purposes this is good enough. */
|
|
THREAD_SETMEM (pd, stackblock_size, (size_t) __libc_stack_end);
|
|
-
|
|
- THREAD_SETMEM (pd, cancelstate, PTHREAD_CANCEL_ENABLE);
|
|
- THREAD_SETMEM (pd, canceltype, PTHREAD_CANCEL_DEFERRED);
|
|
}
|
|
diff --git a/sysdeps/nptl/pthreadP.h b/sysdeps/nptl/pthreadP.h
|
|
index 374657a2fd0ee19a..b968afc4c6b61b92 100644
|
|
--- a/sysdeps/nptl/pthreadP.h
|
|
+++ b/sysdeps/nptl/pthreadP.h
|
|
@@ -276,7 +276,7 @@ __do_cancel (void)
|
|
struct pthread *self = THREAD_SELF;
|
|
|
|
/* Make sure we get no more cancellations. */
|
|
- THREAD_ATOMIC_BIT_SET (self, cancelhandling, EXITING_BIT);
|
|
+ atomic_bit_set (&self->cancelhandling, EXITING_BIT);
|
|
|
|
__pthread_unwind ((__pthread_unwind_buf_t *)
|
|
THREAD_GETMEM (self, cleanup_jmp_buf));
|
|
diff --git a/sysdeps/pthread/Makefile b/sysdeps/pthread/Makefile
|
|
index c65710169697ad95..00419c4d199df912 100644
|
|
--- a/sysdeps/pthread/Makefile
|
|
+++ b/sysdeps/pthread/Makefile
|
|
@@ -69,6 +69,7 @@ tests += tst-cnd-basic tst-mtx-trylock tst-cnd-broadcast \
|
|
tst-cancel12 tst-cancel13 tst-cancel14 tst-cancel15 tst-cancel16 \
|
|
tst-cancel18 tst-cancel19 tst-cancel20 tst-cancel21 \
|
|
tst-cancel22 tst-cancel23 tst-cancel26 tst-cancel27 tst-cancel28 \
|
|
+ tst-cancel29 \
|
|
tst-cleanup0 tst-cleanup1 tst-cleanup2 tst-cleanup3 \
|
|
tst-clock1 \
|
|
tst-cond-except \
|
|
diff --git a/sysdeps/pthread/tst-cancel29.c b/sysdeps/pthread/tst-cancel29.c
|
|
new file mode 100644
|
|
index 0000000000000000..4f0d99e002883be4
|
|
--- /dev/null
|
|
+++ b/sysdeps/pthread/tst-cancel29.c
|
|
@@ -0,0 +1,207 @@
|
|
+/* Check if a thread that disables cancellation and which call functions
|
|
+ that might be interrupted by a signal do not see the internal SIGCANCEL.
|
|
+
|
|
+ Copyright (C) 2022 Free Software Foundation, Inc.
|
|
+ This file is part of the GNU C Library.
|
|
+
|
|
+ The GNU C Library is free software; you can redistribute it and/or
|
|
+ modify it under the terms of the GNU Lesser General Public
|
|
+ License as published by the Free Software Foundation; either
|
|
+ version 2.1 of the License, or (at your option) any later version.
|
|
+
|
|
+ The GNU C Library is distributed in the hope that it will be useful,
|
|
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ Lesser General Public License for more details.
|
|
+
|
|
+ You should have received a copy of the GNU Lesser General Public
|
|
+ License along with the GNU C Library; if not, see
|
|
+ <https://www.gnu.org/licenses/>. */
|
|
+
|
|
+#include <array_length.h>
|
|
+#include <errno.h>
|
|
+#include <inttypes.h>
|
|
+#include <poll.h>
|
|
+#include <support/check.h>
|
|
+#include <support/support.h>
|
|
+#include <support/temp_file.h>
|
|
+#include <support/xthread.h>
|
|
+#include <sys/socket.h>
|
|
+#include <signal.h>
|
|
+#include <stdio.h>
|
|
+#include <unistd.h>
|
|
+
|
|
+/* On Linux some interfaces are never restarted after being interrupted by
|
|
+ a signal handler, regardless of the use of SA_RESTART. It means that
|
|
+ if asynchronous cancellation is not enabled, the pthread_cancel can not
|
|
+ set the internal SIGCANCEL otherwise the interface might see a spurious
|
|
+ EINTR failure. */
|
|
+
|
|
+static pthread_barrier_t b;
|
|
+
|
|
+/* Cleanup handling test. */
|
|
+static int cl_called;
|
|
+static void
|
|
+cl (void *arg)
|
|
+{
|
|
+ ++cl_called;
|
|
+}
|
|
+
|
|
+static void *
|
|
+tf_sigtimedwait (void *arg)
|
|
+{
|
|
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, NULL);
|
|
+ xpthread_barrier_wait (&b);
|
|
+
|
|
+ int r;
|
|
+ pthread_cleanup_push (cl, NULL);
|
|
+
|
|
+ sigset_t mask;
|
|
+ sigemptyset (&mask);
|
|
+ r = sigtimedwait (&mask, NULL, &(struct timespec) { 0, 250000000 });
|
|
+ if (r != -1)
|
|
+ return (void*) -1;
|
|
+ if (errno != EAGAIN)
|
|
+ return (void*) -2;
|
|
+
|
|
+ pthread_cleanup_pop (0);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *
|
|
+tf_poll (void *arg)
|
|
+{
|
|
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, NULL);
|
|
+ xpthread_barrier_wait (&b);
|
|
+
|
|
+ int r;
|
|
+ pthread_cleanup_push (cl, NULL);
|
|
+
|
|
+ r = poll (NULL, 0, 250);
|
|
+ if (r != 0)
|
|
+ return (void*) -1;
|
|
+
|
|
+ pthread_cleanup_pop (0);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *
|
|
+tf_ppoll (void *arg)
|
|
+{
|
|
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, NULL);
|
|
+
|
|
+ xpthread_barrier_wait (&b);
|
|
+
|
|
+ int r;
|
|
+ pthread_cleanup_push (cl, NULL);
|
|
+
|
|
+ r = ppoll (NULL, 0, &(struct timespec) { 0, 250000000 }, NULL);
|
|
+ if (r != 0)
|
|
+ return (void*) -1;
|
|
+
|
|
+ pthread_cleanup_pop (0);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *
|
|
+tf_select (void *arg)
|
|
+{
|
|
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, NULL);
|
|
+ xpthread_barrier_wait (&b);
|
|
+
|
|
+ int r;
|
|
+ pthread_cleanup_push (cl, NULL);
|
|
+
|
|
+ r = select (0, NULL, NULL, NULL, &(struct timeval) { 0, 250000 });
|
|
+ if (r != 0)
|
|
+ return (void*) -1;
|
|
+
|
|
+ pthread_cleanup_pop (0);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *
|
|
+tf_pselect (void *arg)
|
|
+{
|
|
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, NULL);
|
|
+ xpthread_barrier_wait (&b);
|
|
+
|
|
+ int r;
|
|
+ pthread_cleanup_push (cl, NULL);
|
|
+
|
|
+ r = pselect (0, NULL, NULL, NULL, &(struct timespec) { 0, 250000000 }, NULL);
|
|
+ if (r != 0)
|
|
+ return (void*) -1;
|
|
+
|
|
+ pthread_cleanup_pop (0);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+static void *
|
|
+tf_clock_nanosleep (void *arg)
|
|
+{
|
|
+ pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, NULL);
|
|
+ xpthread_barrier_wait (&b);
|
|
+
|
|
+ int r;
|
|
+ pthread_cleanup_push (cl, NULL);
|
|
+
|
|
+ r = clock_nanosleep (CLOCK_REALTIME, 0, &(struct timespec) { 0, 250000000 },
|
|
+ NULL);
|
|
+ if (r != 0)
|
|
+ return (void*) -1;
|
|
+
|
|
+ pthread_cleanup_pop (0);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+struct cancel_test_t
|
|
+{
|
|
+ const char *name;
|
|
+ void * (*cf) (void *);
|
|
+} tests[] =
|
|
+{
|
|
+ { "sigtimedwait", tf_sigtimedwait, },
|
|
+ { "poll", tf_poll, },
|
|
+ { "ppoll", tf_ppoll, },
|
|
+ { "select", tf_select, },
|
|
+ { "pselect", tf_pselect , },
|
|
+ { "clock_nanosleep", tf_clock_nanosleep, },
|
|
+};
|
|
+
|
|
+static int
|
|
+do_test (void)
|
|
+{
|
|
+ for (int i = 0; i < array_length (tests); i++)
|
|
+ {
|
|
+ xpthread_barrier_init (&b, NULL, 2);
|
|
+
|
|
+ cl_called = 0;
|
|
+
|
|
+ pthread_t th = xpthread_create (NULL, tests[i].cf, NULL);
|
|
+
|
|
+ xpthread_barrier_wait (&b);
|
|
+
|
|
+ struct timespec ts = { .tv_sec = 0, .tv_nsec = 100000000 };
|
|
+ while (nanosleep (&ts, &ts) != 0)
|
|
+ continue;
|
|
+
|
|
+ xpthread_cancel (th);
|
|
+
|
|
+ void *status = xpthread_join (th);
|
|
+ if (status != NULL)
|
|
+ printf ("test '%s' failed: %" PRIdPTR "\n", tests[i].name,
|
|
+ (intptr_t) status);
|
|
+ TEST_VERIFY (status == NULL);
|
|
+
|
|
+ xpthread_barrier_destroy (&b);
|
|
+
|
|
+ TEST_COMPARE (cl_called, 0);
|
|
+
|
|
+ printf ("in-time cancel test of '%s' successful\n", tests[i].name);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#include <support/test-driver.c>
|