aboutsummaryrefslogtreecommitdiff
path: root/nptl/sysdeps
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2003-06-08 05:28:14 +0000
committerUlrich Drepper <drepper@redhat.com>2003-06-08 05:28:14 +0000
commit7726edc27354afe163f492c0e6a8d4354fddb494 (patch)
tree22a712872298767abf2b081d9f766a33cad8fa20 /nptl/sysdeps
parentede0f73aeabe62589f6ca19a6987f48aa0d06184 (diff)
downloadglibc-7726edc27354afe163f492c0e6a8d4354fddb494.tar
glibc-7726edc27354afe163f492c0e6a8d4354fddb494.tar.gz
glibc-7726edc27354afe163f492c0e6a8d4354fddb494.tar.bz2
glibc-7726edc27354afe163f492c0e6a8d4354fddb494.zip
Update.
2003-06-07 Ulrich Drepper <drepper@redhat.com> * cleanup_routine.c: New file. * Versions (libpthread) [GLIBC_2.3.3]: Add __pthread_cleanup_routine. * sysdeps/pthread/pthread.h: Add support for fully exception-based cleanup handling. * Makefile (libpthread-routines): Add cleanup_routine. Add more CFLAGS variables to compile with exceptions. Add comments why which file needs unwind tables. (tests) [have-forced-unwind==yes]: Add tst-cancelx* and tst-cleanupx* tests. * tst-cancelx1.c: New file. * tst-cancelx2.c: New file. * tst-cancelx3.c: New file. * tst-cancelx4.c: New file. * tst-cancelx5.c: New file. * tst-cancelx6.c: New file. * tst-cancelx7.c: New file. * tst-cancelx8.c: New file. * tst-cancelx9.c: New file. * tst-cancelx10.c: New file. * tst-cancelx11.c: New file. * tst-cancelx12.c: New file. * tst-cancelx13.c: New file. * tst-cancelx14.c: New file. * tst-cancelx15.c: New file. * tst-cleanupx0.c: New file. * tst-cleanupx0.expect: New file. * tst-cleanupx1.c: New file. * tst-cleanupx2.c: New file. * tst-cleanupx3.c: New file. * tst-cleanup0.c: Make standard compliant. * tst-cleanup1.c: Likewise. * sysdeps/unix/sysv/linux/sem_timedwait.c: Add cancellation support. * sysdeps/unix/sysv/linux/sem_wait.c: Likewise. * sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S: Likewise. * sysdeps/unix/sysv/linux/i386/i486/sem_wait.S: Likewise. * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Likewise. * sysdeps/unix/sysv/linux/x86_64/sem_wait.S: Likewise. * sysdeps/i386/tcb-offsets.sym: Add RESULT, CANCELHANDLING, and CLEANUP_JMP_BUF. * sysdeps/x86_64/tcb-offsets.sym: Likewise. * tst-cancel12.c: New file. * tst-cancel13.c: New file. * tst-cancel14.c: New file. * tst-cancel15.c: New file. * Makefile (tests): Add tst-cancel12, tst-cancel13, tst-cancel14, and tst-cancel15. * tst-cancel1.c: Add some comments. * sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S: Compute relative timeout correctly.
Diffstat (limited to 'nptl/sysdeps')
-rw-r--r--nptl/sysdeps/i386/tcb-offsets.sym3
-rw-r--r--nptl/sysdeps/pthread/pthread.h137
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S78
-rw-r--r--nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S62
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sem_timedwait.c12
-rw-r--r--nptl/sysdeps/unix/sysv/linux/sem_wait.c10
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S75
-rw-r--r--nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S66
-rw-r--r--nptl/sysdeps/x86_64/tcb-offsets.sym3
9 files changed, 386 insertions, 60 deletions
diff --git a/nptl/sysdeps/i386/tcb-offsets.sym b/nptl/sysdeps/i386/tcb-offsets.sym
index 12cb6f00f9..a83c3c9a9b 100644
--- a/nptl/sysdeps/i386/tcb-offsets.sym
+++ b/nptl/sysdeps/i386/tcb-offsets.sym
@@ -1,7 +1,10 @@
#include <sysdep.h>
#include <tls.h>
+RESULT offsetof (struct pthread, result)
TID offsetof (struct pthread, tid)
+CANCELHANDLING offsetof (struct pthread, cancelhandling)
+CLEANUP_JMP_BUF offsetof (struct pthread, cleanup_jmp_buf)
MULTIPLE_THREADS_OFFSET offsetof (tcbhead_t, multiple_threads)
SYSINFO_OFFSET offsetof (tcbhead_t, sysinfo)
CLEANUP offsetof (struct pthread, cleanup)
diff --git a/nptl/sysdeps/pthread/pthread.h b/nptl/sysdeps/pthread/pthread.h
index c0375ae223..1f0a34baa2 100644
--- a/nptl/sysdeps/pthread/pthread.h
+++ b/nptl/sysdeps/pthread/pthread.h
@@ -419,14 +419,132 @@ typedef struct
#endif
+/* Structure to hold the cleanup handler information. */
+struct __pthread_cleanup_frame
+{
+ void (*__cancel_routine) (void *);
+ void *__cancel_arg;
+ int __do_it;
+ int __cancel_type;
+};
+
+#if defined __GNUC__ && defined __EXCEPTIONS
+# ifdef __cplusplus
+/* Class to handle cancellation handler invocation. */
+class __pthread_cleanup_class
+{
+ void (*__cancel_routine) (void *);
+ void *__cancel_arg;
+ int __do_it;
+ int __cancel_type;
+
+ public:
+ __pthread_cleanup_class (void (*__fct) (void *), void *__arg)
+ : __cancel_routine (__fct), __cancel_arg (__arg), __do_it (1) { }
+ ~__pthread_cleanup_class () { if (__do_it) __cancel_routine (__cancel_arg); }
+ __setdoit (int __newval) { __do_it = __newval; }
+ __defer () { pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED,
+ &__cancel_type);
+ __restore () const { pthread_setcanceltype (__cancel_type, 0);
+};
+
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
+ be called with arguments ARG when the matching pthread_cleanup_pop
+ is executed with non-zero EXECUTE argument.
+
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
+ be used in matching pairs at the same nesting level of braces. */
+# define pthread_cleanup_push(routine, arg) \
+ do { \
+ __pthread_cleanup_class __clframe (routine, arg)
+
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
+ If EXECUTE is non-zero, the handler function is called. */
+# define pthread_cleanup_pop(execute) \
+ __clframe.__setdoit (execute); \
+ } while (0)
+
+# ifdef __USE_GNU
+/* Install a cleanup handler as pthread_cleanup_push does, but also
+ saves the current cancellation type and sets it to deferred
+ cancellation. */
+# define pthread_cleanup_push_defer_np(routine, arg) \
+ do { \
+ __pthread_cleanup_class __clframe (routine, arg); \
+ __clframe.__defer ()
+
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
+ restores the cancellation type that was in effect when the matching
+ pthread_cleanup_push_defer was called. */
+# define pthread_cleanup_pop_restore_np(execute) \
+ __clframe.__restore (); \
+ __clframe.__setdoit (execute); \
+ } while (0)
+# endif
+# else
+/* Function called to call the cleanup handler. As an extern inline
+ function the compiler is free to decide inlining the change when
+ needed or fall back on the copy which must exist somewhere
+ else. */
+extern inline void
+__pthread_cleanup_routine (struct __pthread_cleanup_frame *__frame)
+{
+ if (__frame->__do_it)
+ __frame->__cancel_routine (__frame->__cancel_arg);
+}
+
/* Install a cleanup handler: ROUTINE will be called with arguments ARG
- when the thread is cancelled or calls pthread_exit. ROUTINE will also
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
be called with arguments ARG when the matching pthread_cleanup_pop
is executed with non-zero EXECUTE argument.
pthread_cleanup_push and pthread_cleanup_pop are macros and must always
be used in matching pairs at the same nesting level of braces. */
-#define pthread_cleanup_push(routine, arg) \
+# define pthread_cleanup_push(routine, arg) \
+ do { \
+ struct __pthread_cleanup_frame __clframe \
+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \
+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \
+ .__do_it = 1 };
+
+/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
+ If EXECUTE is non-zero, the handler function is called. */
+# define pthread_cleanup_pop(execute) \
+ __clframe.__do_it = (execute); \
+ } while (0)
+
+# ifdef __USE_GNU
+/* Install a cleanup handler as pthread_cleanup_push does, but also
+ saves the current cancellation type and sets it to deferred
+ cancellation. */
+# define pthread_cleanup_push_defer_np(routine, arg) \
+ do { \
+ struct __pthread_cleanup_frame __clframe \
+ __attribute__ ((__cleanup__ (__pthread_cleanup_routine))) \
+ = { .__cancel_routine = (routine), .__cancel_arg = (arg), \
+ .__do_it = 1 }; \
+ (void) pthread_setcanceltype (PTHREAD_CANCEL_DEFERRED, \
+ &__clframe.__cancel_type)
+
+/* Remove a cleanup handler as pthread_cleanup_pop does, but also
+ restores the cancellation type that was in effect when the matching
+ pthread_cleanup_push_defer was called. */
+# define pthread_cleanup_pop_restore_np(execute) \
+ (void) pthread_setcanceltype (__clframe.__cancel_type, NULL); \
+ __clframe.__do_it = (execute); \
+ } while (0)
+# endif
+# endif
+#else
+/* Install a cleanup handler: ROUTINE will be called with arguments ARG
+ when the thread is canceled or calls pthread_exit. ROUTINE will also
+ be called with arguments ARG when the matching pthread_cleanup_pop
+ is executed with non-zero EXECUTE argument.
+
+ pthread_cleanup_push and pthread_cleanup_pop are macros and must always
+ be used in matching pairs at the same nesting level of braces. */
+# define pthread_cleanup_push(routine, arg) \
do { \
__pthread_unwind_buf_t __cancel_buf; \
void (*__cancel_routine) (void *) = (routine); \
@@ -447,7 +565,7 @@ extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
/* Remove a cleanup handler installed by the matching pthread_cleanup_push.
If EXECUTE is non-zero, the handler function is called. */
-#define pthread_cleanup_pop(execute) \
+# define pthread_cleanup_pop(execute) \
} while (0); \
__pthread_unregister_cancel (&__cancel_buf); \
if (execute) \
@@ -456,11 +574,11 @@ extern void __pthread_register_cancel (__pthread_unwind_buf_t *__buf)
extern void __pthread_unregister_cancel (__pthread_unwind_buf_t *__buf)
__cleanup_fct_attribute;
-#ifdef __USE_GNU
+# ifdef __USE_GNU
/* Install a cleanup handler as pthread_cleanup_push does, but also
saves the current cancellation type and sets it to deferred
cancellation. */
-# define pthread_cleanup_push_defer(routine, arg) \
+# define pthread_cleanup_push_defer_np(routine, arg) \
do { \
__pthread_unwind_buf_t __cancel_buf; \
void (*__cancel_routine) (void *) = (routine); \
@@ -482,7 +600,7 @@ extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf)
/* Remove a cleanup handler as pthread_cleanup_pop does, but also
restores the cancellation type that was in effect when the matching
pthread_cleanup_push_defer was called. */
-# define pthread_cleanup_pop_cleanup(execute) \
+# define pthread_cleanup_pop_restore_np(execute) \
} while (0); \
__pthread_unregister_cancel_restore (&__cancel_buf); \
if (execute) \
@@ -490,15 +608,16 @@ extern void __pthread_register_cancel_defer (__pthread_unwind_buf_t *__buf)
} while (0)
extern void __pthread_unregister_cancel_restore (__pthread_unwind_buf_t *__buf)
__cleanup_fct_attribute;
-#endif
+# endif
/* Internal interface to initiate cleanup. */
extern void __pthread_unwind_next (__pthread_unwind_buf_t *__buf)
__cleanup_fct_attribute __attribute ((__noreturn__))
-#ifndef SHARED
+# ifndef SHARED
__attribute ((__weak__))
-#endif
+# endif
;
+#endif
/* Function used in the macros. */
struct __jmp_buf_tag;
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
index aa3d74593d..9afe85f205 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_timedwait.S
@@ -37,7 +37,14 @@
.globl sem_timedwait
.type sem_timedwait,@function
.align 16
+ cfi_startproc
sem_timedwait:
+ /* First check for cancellation. */
+ movl %gs:CANCELHANDLING, %eax
+ andl $0xfffffff9, %eax
+ cmpl $8, %eax
+ je 10f
+
movl 4(%esp), %ecx
movl (%ecx), %eax
@@ -54,19 +61,28 @@ sem_timedwait:
/* Check whether the timeout value is valid. */
1: pushl %esi
+ cfi_adjust_cfa_offset(4)
pushl %edi
+ cfi_adjust_cfa_offset(4)
pushl %ebx
- subl $8, %esp
+ cfi_adjust_cfa_offset(4)
+ subl $12, %esp
+ cfi_adjust_cfa_offset(12)
- movl %esp, %esi
- movl 28(%esp), %edi
+ movl 32(%esp), %edi
+ cfi_offset(7, -12) /* %edi */
/* Check for invalid nanosecond field. */
cmpl $1000000000, 4(%edi)
- movl $EINVAL, %eax
+ movl $EINVAL, %esi
+ cfi_offset(6, -8) /* %esi */
jae 6f
-7: xorl %ecx, %ecx
+ cfi_offset(3, -16) /* %ebx */
+7: call __pthread_enable_asynccancel
+ movl %eax, 8(%esp)
+
+ xorl %ecx, %ecx
movl %esp, %ebx
movl %ecx, %edx
movl $SYS_gettimeofday, %eax
@@ -84,20 +100,25 @@ sem_timedwait:
addl $1000000000, %edx
subl $1, %ecx
5: testl %ecx, %ecx
- movl $ETIMEDOUT, %eax
+ movl $ETIMEDOUT, %esi
js 6f /* Time is already up. */
movl %ecx, (%esp) /* Store relative timeout. */
movl %edx, 4(%esp)
- movl 24(%esp), %ebx
+ movl 28(%esp), %ebx
xorl %ecx, %ecx
+ movl %esp, %esi
movl $SYS_futex, %eax
xorl %edx, %edx
ENTER_KERNEL
+ movl %eax, %esi
+
+ movl 8(%esp), %eax
+ call __pthread_disable_asynccancel
- testl %eax, %eax
+ testl %esi, %esi
je,pt 9f
- cmpl $-EWOULDBLOCK, %eax
+ cmpl $-EWOULDBLOCK, %esi
jne 3f
9: movl (%ebx), %eax
@@ -109,14 +130,27 @@ sem_timedwait:
cmpxchgl %ecx, (%ebx)
jne,pn 8b
- addl $8, %esp
+ addl $12, %esp
+ cfi_adjust_cfa_offset(-12)
xorl %eax, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(3)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(7)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(6)
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(6)
ret
-3: negl %eax
+ cfi_adjust_cfa_offset(24)
+ cfi_offset(6, -8) /* %esi */
+ cfi_offset(7, -12) /* %edi */
+ cfi_offset(3, -16) /* %ebx */
+3: negl %esi
6:
#ifdef PIC
call __i686.get_pc_thunk.bx
@@ -128,17 +162,31 @@ sem_timedwait:
#if USE___THREAD
movl %gs:0, %edx
subl errno@gottpoff(%ebx), %edx
- movl %eax, (%edx)
+ movl %esi, (%edx)
#else
- movl %eax, %edx
call __errno_location@plt
- movl %edx, (%eax)
+ movl %esi, (%eax)
#endif
- addl $8, %esp
+ addl $12, %esp
+ cfi_adjust_cfa_offset(-12)
orl $-1, %eax
popl %ebx
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(3)
popl %edi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(7)
popl %esi
+ cfi_adjust_cfa_offset(-4)
+ cfi_restore(6)
ret
+
+10: /* Canceled. */
+ movl $0xffffffff, %gs:RESULT
+ LOCK
+ orl $0x10, %gs:CANCELHANDLING
+ movl %gs:CLEANUP_JMP_BUF, %eax
+ jmp __pthread_unwind
+ cfi_endproc
.size sem_timedwait,.-sem_timedwait
diff --git a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
index eb01ca84f7..ba4f54cd64 100644
--- a/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/i386/i486/sem_wait.S
@@ -36,12 +36,25 @@
.globl __new_sem_wait
.type __new_sem_wait,@function
.align 16
+ cfi_startproc
__new_sem_wait:
+ /* First check for cancellation. */
+ movl %gs:CANCELHANDLING, %eax
+ andl $0xfffffff9, %eax
+ cmpl $8, %eax
+ je 5f
+
pushl %ebx
+ cfi_adjust_cfa_offset(4)
pushl %esi
+ cfi_adjust_cfa_offset(4)
+ subl $4, %esp
+ cfi_adjust_cfa_offset(4)
- movl 12(%esp), %ebx
+ movl 16(%esp), %ebx
+ cfi_offset(3, -8) /* %ebx */
+ cfi_offset(6, -12) /* %esi */
3: movl (%ebx), %eax
2: testl %eax, %eax
je,pn 1f
@@ -52,21 +65,35 @@ __new_sem_wait:
jne,pn 2b
xorl %eax, %eax
- popl %esi
- popl %ebx
+ movl 4(%esp), %esi
+ cfi_restore(6)
+ movl 8(%esp), %ebx
+ cfi_restore(3)
+ addl $12, %esp
+ cfi_adjust_cfa_offset(-12)
ret
-1: xorl %esi, %esi
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(3, -8) /* %ebx */
+ cfi_offset(6, -12) /* %esi */
+1: call __pthread_enable_asynccancel
+ movl %eax, (%esp)
+
+ xorl %esi, %esi
movl $SYS_futex, %eax
movl %esi, %ecx
movl %esi, %edx
ENTER_KERNEL
+ movl %eax, %esi
+
+ movl (%esp), %eax
+ call __pthread_disable_asynccancel
- testl %eax, %eax
+ testl %esi, %esi
je 3b
- cmpl $-EWOULDBLOCK, %eax
+ cmpl $-EWOULDBLOCK, %esi
je 3b
- negl %eax
+ negl %esi
#ifdef PIC
call __i686.get_pc_thunk.bx
#else
@@ -77,16 +104,27 @@ __new_sem_wait:
#if USE___THREAD
movl %gs:0, %edx
subl errno@gottpoff(%ebx), %edx
- movl %eax, (%edx)
+ movl %esi, (%edx)
#else
- movl %eax, %edx
call __errno_location@plt
- movl %edx, (%eax)
+ movl %esi, (%eax)
#endif
orl $-1, %eax
- popl %esi
- popl %ebx
+ movl 4(%esp), %esi
+ cfi_restore(6)
+ movl 8(%esp), %ebx
+ cfi_restore(3)
+ addl $12, %esp
+ cfi_adjust_cfa_offset(-12)
ret
+
+5: /* Canceled. */
+ movl $0xffffffff, %gs:RESULT
+ LOCK
+ orl $0x10, %gs:CANCELHANDLING
+ movl %gs:CLEANUP_JMP_BUF, %eax
+ jmp __pthread_unwind
+ cfi_endproc
.size __new_sem_wait,.-__new_sem_wait
versioned_symbol(libpthread, __new_sem_wait, sem_wait, GLIBC_2_1)
#if SHLIB_COMPAT(libpthread, GLIBC_2_0, GLIBC_2_1)
diff --git a/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c b/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
index 8a65ce2567..ef897c1e93 100644
--- a/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
+++ b/nptl/sysdeps/unix/sysv/linux/sem_timedwait.c
@@ -24,12 +24,16 @@
#include <internaltypes.h>
#include <semaphore.h>
+#include <pthreadP.h>
#include <shlib-compat.h>
int
sem_timedwait (sem_t *sem, const struct timespec *abstime)
{
+ /* First check for cancellation. */
+ CANCELLATION_P (THREAD_SELF);
+
int *futex = (int *) sem;
int val;
int err;
@@ -71,7 +75,15 @@ sem_timedwait (sem_t *sem, const struct timespec *abstime)
/* Do wait. */
rt.tv_sec = sec;
rt.tv_nsec = nsec;
+
+ /* Enable asynchronous cancellation. Required by the standard. */
+ int oldtype = __pthread_enable_asynccancel ();
+
err = lll_futex_timed_wait (futex, 0, &rt);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (oldtype);
+
if (err != 0 && err != -EWOULDBLOCK)
goto error_return;
diff --git a/nptl/sysdeps/unix/sysv/linux/sem_wait.c b/nptl/sysdeps/unix/sysv/linux/sem_wait.c
index 1d39b7c408..36bb158e09 100644
--- a/nptl/sysdeps/unix/sysv/linux/sem_wait.c
+++ b/nptl/sysdeps/unix/sysv/linux/sem_wait.c
@@ -24,12 +24,16 @@
#include <internaltypes.h>
#include <semaphore.h>
+#include <pthreadP.h>
#include <shlib-compat.h>
int
__new_sem_wait (sem_t *sem)
{
+ /* First check for cancellation. */
+ CANCELLATION_P (THREAD_SELF);
+
int *futex = (int *) sem;
int val;
int err;
@@ -43,7 +47,13 @@ __new_sem_wait (sem_t *sem)
return 0;
}
+ /* Enable asynchronous cancellation. Required by the standard. */
+ int oldtype = __pthread_enable_asynccancel ();
+
err = lll_futex_wait (futex, 0);
+
+ /* Disable asynchronous cancellation. */
+ __pthread_disable_asynccancel (oldtype);
}
while (err == 0 || err == -EWOULDBLOCK);
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
index 29bc1bcf46..7626d7b250 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_timedwait.S
@@ -38,7 +38,14 @@
.globl sem_timedwait
.type sem_timedwait,@function
.align 16
+ cfi_startproc
sem_timedwait:
+ /* First check for cancellation. */
+ movl %fs:CANCELHANDLING, %eax
+ andl $0xfffffff9, %eax
+ cmpl $8, %eax
+ je 11f
+
movl (%rdi), %eax
2: testl %eax, %eax
je 1f
@@ -53,18 +60,29 @@ sem_timedwait:
/* Check whether the timeout value is valid. */
1: pushq %r12
+ cfi_adjust_cfa_offset(8)
pushq %r13
- subq $16, %rsp
+ cfi_adjust_cfa_offset(8)
+ pushq %r14
+ cfi_adjust_cfa_offset(8)
+ subq $24, %rsp
+ cfi_adjust_cfa_offset(24)
movq %rdi, %r12
+ cfi_offset(12, -16) /* %r12 */
movq %rsi, %r13
+ cfi_offset(13, -24) /* %r13 */
/* Check for invalid nanosecond field. */
cmpq $1000000000, 8(%r13)
- movl $EINVAL, %eax
+ movl $EINVAL, %r14d
+ cfi_offset(14, -24) /* %r14 */
jae 6f
-7: xorq %rsi, %rsi
+7: call __pthread_enable_asynccancel
+ movl %eax, 16(%rsp)
+
+ xorq %rsi, %rsi
movq %rsp, %rdi
movq $VSYSCALL_ADDR_vgettimeofday, %rax
callq *%rax
@@ -74,14 +92,14 @@ sem_timedwait:
movq $1000, %rdi
mul %rdi /* Milli seconds to nano seconds. */
movq (%r13), %rdi
- movq 8(%r13), %rdi
+ movq 8(%r13), %rsi
subq (%rsp), %rdi
- subq %rax, %rdi
+ subq %rax, %rsi
jns 5f
addq $1000000000, %rsi
decq %rdi
5: testq %rdi, %rdi
- movl $ETIMEDOUT, %eax
+ movl $ETIMEDOUT, %r14d
js 6f /* Time is already up. */
movq %rdi, (%rsp) /* Store relative timeout. */
@@ -93,38 +111,65 @@ sem_timedwait:
movq $SYS_futex, %rax
xorl %edx, %edx
syscall
+ movq %rax, %r14
- testq %rax, %rax
+ movl 16(%rsp), %edi
+ call __pthread_disable_asynccancel
+
+ testq %r14, %r14
je 9f
- cmpq $-EWOULDBLOCK, %rax
+ cmpq $-EWOULDBLOCK, %r14
jne 3f
-9: movl (%rdi), %eax
+9: movl (%r12), %eax
8: testl %eax, %eax
je 7b
leaq -1(%rax), %rcx
LOCK
- cmpxchgl %ecx, (%rdi)
+ cmpxchgl %ecx, (%r12)
jne 8b
xorl %eax, %eax
-10: addq $16, %rsp
+10: addq $24, %rsp
+ cfi_adjust_cfa_offset(-24)
+ popq %r14
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(14)
popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(13)
popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(12)
retq
-3: negq %rax
+ cfi_adjust_cfa_offset(48)
+ cfi_offset(12, -16) /* %r12 */
+ cfi_offset(13, -24) /* %r13 */
+ cfi_offset(14, -32) /* %r14 */
+3: negq %r14
6:
#if USE___THREAD
movq errno@gottpoff(%rip), %rdx
- movl %eax, %fs:(%rdx)
+ movl %r14d, %fs:(%rdx)
#else
- movl %eax, %edx
callq __errno_location@plt
- movl %edx, (%rax)
+ movl %r14d, (%rax)
#endif
orl $-1, %eax
jmp 10b
+ cfi_adjust_cfa_offset(-48)
+ cfi_restore(14)
+ cfi_restore(13)
+ cfi_restore(12)
+
+11: /* Canceled. */
+ movq $0xffffffffffffffff, %fs:RESULT
+ LOCK
+ orl $0x10, %fs:CANCELHANDLING
+ movq %fs:CLEANUP_JMP_BUF, %rdi
+ jmp __pthread_unwind
+ cfi_endproc
.size sem_timedwait,.-sem_timedwait
diff --git a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
index 877ee4c4dc..32742309fb 100644
--- a/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
+++ b/nptl/sysdeps/unix/sysv/linux/x86_64/sem_wait.S
@@ -35,38 +35,86 @@
.globl sem_wait
.type sem_wait,@function
.align 16
+ cfi_startproc
sem_wait:
-3: movl (%rdi), %eax
+ /* First check for cancellation. */
+ movl %fs:CANCELHANDLING, %eax
+ andl $0xfffffff9, %eax
+ cmpl $8, %eax
+ je 4f
+
+ pushq %r12
+ cfi_adjust_cfa_offset(8)
+ cfi_offset(12, -16)
+ pushq %r13
+ cfi_adjust_cfa_offset(8)
+ movq %rdi, %r13
+ cfi_offset(13, -24)
+
+3: movl (%r13), %eax
2: testl %eax, %eax
je 1f
leaq -1(%rax), %rdx
LOCK
- cmpxchgl %edx, (%rdi)
+ cmpxchgl %edx, (%r13)
jne 2b
xorl %eax, %eax
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(12)
+
retq
-1: xorq %r10, %r10
+ cfi_adjust_cfa_offset(16)
+ cfi_offset(12, -16)
+ cfi_offset(13, -24)
+1: call __pthread_enable_asynccancel
+ movl %eax, %r8d
+
+ xorq %r10, %r10
movq $SYS_futex, %rax
+ movq %r13, %rdi
movq %r10, %rsi
movq %r10, %rdx
syscall
+ movq %rax, %r12
+
+ movl %r8d, %edi
+ call __pthread_disable_asynccancel
- testq %rax, %rax
+ testq %r12, %r12
je 3b
- cmpq $-EWOULDBLOCK, %rax
+ cmpq $-EWOULDBLOCK, %r12
je 3b
- negq %rax
+ negq %r12
#if USE___THREAD
movq errno@gottpoff(%rip), %rdx
- movl %eax, %fs:(%rdx)
+ movl %r12d, %fs:(%rdx)
#else
- movl %eax, %edx
callq __errno_location@plt
- movl %edx, (%rax)
+ movl %r12d, (%rax)
#endif
orl $-1, %eax
+
+ popq %r13
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(13)
+ popq %r12
+ cfi_adjust_cfa_offset(-8)
+ cfi_restore(12)
+
retq
+
+4: /* Canceled. */
+ movq $0xffffffffffffffff, %fs:RESULT
+ LOCK
+ orl $0x10, %fs:CANCELHANDLING
+ movq %fs:CLEANUP_JMP_BUF, %rdi
+ jmp __pthread_unwind
+ cfi_endproc
.size sem_wait,.-sem_wait
diff --git a/nptl/sysdeps/x86_64/tcb-offsets.sym b/nptl/sysdeps/x86_64/tcb-offsets.sym
index e230e3dfb3..dc6e5c4504 100644
--- a/nptl/sysdeps/x86_64/tcb-offsets.sym
+++ b/nptl/sysdeps/x86_64/tcb-offsets.sym
@@ -1,7 +1,10 @@
#include <sysdep.h>
#include <tls.h>
+RESULT offsetof (struct pthread, result)
TID offsetof (struct pthread, tid)
+CANCELHANDLING offsetof (struct pthread, cancelhandling)
+CLEANUP_JMP_BUF offsetof (struct pthread, cleanup_jmp_buf)
CLEANUP offsetof (struct pthread, cleanup)
CLEANUP_PREV offsetof (struct _pthread_cleanup_buffer, __prev)
MUTEX_FUTEX offsetof (pthread_mutex_t, __data.__lock)