aboutsummaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
authorTorvald Riegel <triegel@redhat.com>2016-12-22 10:20:43 +0100
committerTorvald Riegel <triegel@redhat.com>2017-01-13 17:16:07 +0100
commit65810f0ef05e8c9e333f17a44e77808b163ca298 (patch)
tree09928382f6015251c3b351650204f65fc8d6ec21 /sysdeps
parentf32941d80c7f532031061f8dd4704fab9c275cfe (diff)
downloadglibc-65810f0ef05e8c9e333f17a44e77808b163ca298.tar
glibc-65810f0ef05e8c9e333f17a44e77808b163ca298.tar.gz
glibc-65810f0ef05e8c9e333f17a44e77808b163ca298.tar.bz2
glibc-65810f0ef05e8c9e333f17a44e77808b163ca298.zip
robust mutexes: Fix broken x86 assembly by removing it
lll_robust_unlock on i386 and x86_64 first sets the futex word to FUTEX_WAITERS|0 before calling __lll_unlock_wake, which will set the futex word to 0. If the thread is killed between these steps, then the futex word will be FUTEX_WAITERS|0, and the kernel (at least current upstream) will not set it to FUTEX_OWNER_DIED|FUTEX_WAITERS because 0 is not equal to the TID of the crashed thread. The lll_robust_lock assembly code on i386 and x86_64 is not prepared to deal with this case because the fastpath tries to only CAS 0 to TID and not FUTEX_WAITERS|0 to TID; the slowpath simply waits until it can CAS 0 to TID or the futex_word has the FUTEX_OWNER_DIED bit set. This issue is fixed by removing the custom x86 assembly code and using the generic C code instead. However, instead of adding more duplicate code to the custom x86 lowlevellock.h, the code of the lll_robust* functions is inlined into the single call sites that exist for each of these functions in the pthread_mutex_* functions. The robust mutex paths in the latter have been slightly reorganized to make them simpler. This patch is meant to be easy to backport, so C11-style atomics are not used. [BZ #20985] * nptl/Makefile: Adapt. * nptl/pthread_mutex_cond_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove. (LLL_ROBUST_MUTEX_LOCK_MODIFIER): New. * nptl/pthread_mutex_lock.c (LLL_ROBUST_MUTEX_LOCK): Remove. (LLL_ROBUST_MUTEX_LOCK_MODIFIER): New. (__pthread_mutex_lock_full): Inline lll_robust* functions and adapt. * nptl/pthread_mutex_timedlock.c (pthread_mutex_timedlock): Inline lll_robust* functions and adapt. * nptl/pthread_mutex_unlock.c (__pthread_mutex_unlock_full): Likewise. * sysdeps/nptl/lowlevellock.h (__lll_robust_lock_wait, __lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait, __lll_robust_timedlock, __lll_robust_unlock): Remove. * sysdeps/unix/sysv/linux/i386/lowlevellock.h (lll_robust_lock, lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove. * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h (lll_robust_lock, lll_robust_cond_lock, lll_robust_timedlock, lll_robust_unlock): Remove. * sysdeps/unix/sysv/linux/sparc/lowlevellock.h (__lll_robust_lock_wait, __lll_robust_lock, lll_robust_cond_lock, __lll_robust_timedlock_wait, __lll_robust_timedlock, __lll_robust_unlock): Remove. * nptl/lowlevelrobustlock.c: Remove file. * nptl/lowlevelrobustlock.sym: Likewise. * sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S: Likewise. * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/nptl/lowlevellock.h68
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevellock.h60
-rw-r--r--sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S232
-rw-r--r--sysdeps/unix/sysv/linux/sparc/lowlevellock.h40
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevellock.h74
-rw-r--r--sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S306
6 files changed, 0 insertions, 780 deletions
diff --git a/sysdeps/nptl/lowlevellock.h b/sysdeps/nptl/lowlevellock.h
index 36a76fab08..42d9658d9d 100644
--- a/sysdeps/nptl/lowlevellock.h
+++ b/sysdeps/nptl/lowlevellock.h
@@ -74,7 +74,6 @@
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
/* This is an expression rather than a statement even though its value is
void, so that it can be used in a comma expression or as an expression
@@ -103,28 +102,6 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
__lll_lock (&(futex), private)
-/* If FUTEX is 0 (not acquired), set to ID (acquired with no waiters) and
- return 0. Otherwise, ensure that it is set to FUTEX | FUTEX_WAITERS
- (acquired, possibly with waiters) and block until we acquire the lock.
- FUTEX will now be ID | FUTEX_WAITERS and we return 0.
- If the previous owner of the lock dies before we acquire the lock then FUTEX
- will be the value of id as set by the previous owner, with FUTEX_OWNER_DIED
- set (FUTEX_WAITERS may or may not be set). We return this value to indicate
- that the lock is not acquired. */
-#define __lll_robust_lock(futex, id, private) \
- ({ \
- int *__futex = (futex); \
- int __val = 0; \
- \
- if (__glibc_unlikely \
- (atomic_compare_and_exchange_bool_acq (__futex, id, 0))) \
- __val = __lll_robust_lock_wait (__futex, private); \
- __val; \
- })
-#define lll_robust_lock(futex, id, private) \
- __lll_robust_lock (&(futex), id, private)
-
-
/* This is an expression rather than a statement even though its value is
void, so that it can be used in a comma expression or as an expression
that's cast to void. */
@@ -142,16 +119,8 @@ extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
-/* As __lll_robust_lock, but set to ID | FUTEX_WAITERS (acquired, possibly with
- waiters) if FUTEX is 0. */
-#define lll_robust_cond_lock(futex, id, private) \
- __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
-
-
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
- int private) attribute_hidden;
/* As __lll_lock, but with a timeout. If the timeout occurs then return
@@ -170,22 +139,6 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
__lll_timedlock (&(futex), abstime, private)
-/* As __lll_robust_lock, but with a timeout. If the timeout occurs then return
- ETIMEDOUT. If ABSTIME is invalid, return EINVAL. */
-#define __lll_robust_timedlock(futex, abstime, id, private) \
- ({ \
- int *__futex = (futex); \
- int __val = 0; \
- \
- if (__glibc_unlikely \
- (atomic_compare_and_exchange_bool_acq (__futex, id, 0))) \
- __val = __lll_robust_timedlock_wait (__futex, abstime, private); \
- __val; \
- })
-#define lll_robust_timedlock(futex, abstime, id, private) \
- __lll_robust_timedlock (&(futex), abstime, id, private)
-
-
/* This is an expression rather than a statement even though its value is
void, so that it can be used in a comma expression or as an expression
that's cast to void. */
@@ -211,27 +164,6 @@ extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
__lll_unlock (&(futex), private)
-/* This is an expression rather than a statement even though its value is
- void, so that it can be used in a comma expression or as an expression
- that's cast to void. */
-/* Unconditionally set FUTEX to 0 (not acquired), releasing the lock. If FUTEX
- had FUTEX_WAITERS set then wake any waiters. The waiter that acquires the
- lock will set FUTEX_WAITERS.
- Evaluate PRIVATE before releasing the lock so that we do not violate the
- mutex destruction requirements (see __lll_unlock). */
-#define __lll_robust_unlock(futex, private) \
- ((void) \
- ({ \
- int *__futex = (futex); \
- int __private = (private); \
- int __oldval = atomic_exchange_rel (__futex, 0); \
- if (__glibc_unlikely (__oldval & FUTEX_WAITERS)) \
- lll_futex_wake (__futex, 1, __private); \
- }))
-#define lll_robust_unlock(futex, private) \
- __lll_robust_unlock (&(futex), private)
-
-
#define lll_islocked(futex) \
((futex) != LLL_LOCK_INITIALIZER)
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.h b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
index 31946a5172..197bb1fcd9 100644
--- a/sysdeps/unix/sysv/linux/i386/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.h
@@ -132,20 +132,6 @@
} \
})
-#define lll_robust_lock(futex, id, private) \
- ({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jz 18f\n\t" \
- "1:\tleal %2, %%edx\n" \
- "0:\tmovl %7, %%ecx\n" \
- "2:\tcall __lll_robust_lock_wait\n" \
- "18:" \
- : "=a" (result), "=c" (ignore1), "=m" (futex), \
- "=&d" (ignore2) \
- : "0" (0), "1" (id), "m" (futex), "g" ((int) (private))\
- : "memory"); \
- result; })
-
/* Special version of lll_lock which causes the unlock function to
always wakeup waiters. */
@@ -165,22 +151,6 @@
})
-#define lll_robust_cond_lock(futex, id, private) \
- ({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %2\n\t" \
- "jz 18f\n\t" \
- "1:\tleal %2, %%edx\n" \
- "0:\tmovl %7, %%ecx\n" \
- "2:\tcall __lll_robust_lock_wait\n" \
- "18:" \
- : "=a" (result), "=c" (ignore1), "=m" (futex), \
- "=&d" (ignore2) \
- : "0" (0), "1" (id | FUTEX_WAITERS), "m" (futex), \
- "g" ((int) (private)) \
- : "memory"); \
- result; })
-
-
#define lll_timedlock(futex, timeout, private) \
({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
@@ -203,21 +173,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_timedlock_elision(futex, adapt_count, timeout, private) \
__lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
-#define lll_robust_timedlock(futex, timeout, id, private) \
- ({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %3\n\t" \
- "jz 18f\n\t" \
- "1:\tleal %3, %%ecx\n" \
- "0:\tmovl %8, %%edx\n" \
- "2:\tcall __lll_robust_timedlock_wait\n" \
- "18:" \
- : "=a" (result), "=c" (ignore1), "=&d" (ignore2), \
- "=m" (futex), "=S" (ignore3) \
- : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
- "4" ((int) (private)) \
- : "memory"); \
- result; })
-
#if !IS_IN (libc) || defined UP
# define __lll_unlock_asm LOCK_INSTR "subl $1, %0\n\t"
#else
@@ -255,21 +210,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
} \
})
-#define lll_robust_unlock(futex, private) \
- (void) \
- ({ int ignore, ignore2; \
- __asm __volatile (LOCK_INSTR "andl %3, %0\n\t" \
- "je 18f\n\t" \
- "1:\tleal %0, %%eax\n" \
- "0:\tmovl %5, %%ecx\n" \
- "2:\tcall __lll_unlock_wake\n" \
- "18:" \
- : "=m" (futex), "=&a" (ignore), "=&c" (ignore2) \
- : "i" (FUTEX_WAITERS), "m" (futex), \
- "g" ((int) (private)) \
- : "memory"); \
- })
-
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
diff --git a/sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S
deleted file mode 100644
index f3a68c0f44..0000000000
--- a/sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S
+++ /dev/null
@@ -1,232 +0,0 @@
-/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-#include <lowlevellock.h>
-#include <lowlevelrobustlock.h>
-#include <kernel-features.h>
-
- .text
-
-#define FUTEX_WAITERS 0x80000000
-#define FUTEX_OWNER_DIED 0x40000000
-
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-#else
-# if FUTEX_WAIT == 0
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %gs:PRIVATE_FUTEX, reg
-# else
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %gs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT, reg
-# endif
-#endif
-
- .globl __lll_robust_lock_wait
- .type __lll_robust_lock_wait,@function
- .hidden __lll_robust_lock_wait
- .align 16
-__lll_robust_lock_wait:
- cfi_startproc
- pushl %edx
- cfi_adjust_cfa_offset(4)
- pushl %ebx
- cfi_adjust_cfa_offset(4)
- pushl %esi
- cfi_adjust_cfa_offset(4)
- cfi_offset(%edx, -8)
- cfi_offset(%ebx, -12)
- cfi_offset(%esi, -16)
-
- movl %edx, %ebx
- xorl %esi, %esi /* No timeout. */
- LOAD_FUTEX_WAIT (%ecx)
-
-4: movl %eax, %edx
- orl $FUTEX_WAITERS, %edx
-
- testl $FUTEX_OWNER_DIED, %eax
- jnz 3f
-
- cmpl %edx, %eax /* NB: %edx == 2 */
- je 1f
-
- LOCK
- cmpxchgl %edx, (%ebx)
- jnz 2f
-
-1: movl $SYS_futex, %eax
- ENTER_KERNEL
-
- movl (%ebx), %eax
-
-2: test %eax, %eax
- jne 4b
-
- movl %gs:TID, %edx
- orl $FUTEX_WAITERS, %edx
- LOCK
- cmpxchgl %edx, (%ebx)
- jnz 4b
- /* NB: %eax == 0 */
-
-3: popl %esi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%esi)
- popl %ebx
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%ebx)
- popl %edx
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%edx)
- ret
- cfi_endproc
- .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
-
-
- .globl __lll_robust_timedlock_wait
- .type __lll_robust_timedlock_wait,@function
- .hidden __lll_robust_timedlock_wait
- .align 16
-__lll_robust_timedlock_wait:
- cfi_startproc
- /* Check for a valid timeout value. */
- cmpl $1000000000, 4(%edx)
- jae 3f
-
- pushl %edi
- cfi_adjust_cfa_offset(4)
- pushl %esi
- cfi_adjust_cfa_offset(4)
- pushl %ebx
- cfi_adjust_cfa_offset(4)
- pushl %ebp
- cfi_adjust_cfa_offset(4)
- cfi_offset(%edi, -8)
- cfi_offset(%esi, -12)
- cfi_offset(%ebx, -16)
- cfi_offset(%ebp, -20)
-
- /* Stack frame for the timespec and timeval structs. */
- subl $12, %esp
- cfi_adjust_cfa_offset(12)
-
- movl %ecx, %ebp
- movl %edx, %edi
-
-1: movl %eax, 8(%esp)
-
- /* Get current time. */
- movl %esp, %ebx
- xorl %ecx, %ecx
- movl $__NR_gettimeofday, %eax
- ENTER_KERNEL
-
- /* Compute relative timeout. */
- movl 4(%esp), %eax
- movl $1000, %edx
- mul %edx /* Milli seconds to nano seconds. */
- movl (%edi), %ecx
- movl 4(%edi), %edx
- subl (%esp), %ecx
- subl %eax, %edx
- jns 4f
- addl $1000000000, %edx
- subl $1, %ecx
-4: testl %ecx, %ecx
- js 8f /* Time is already up. */
-
- /* Store relative timeout. */
- movl %ecx, (%esp)
- movl %edx, 4(%esp)
-
- movl %ebp, %ebx
-
- movl 8(%esp), %edx
- movl %edx, %eax
- orl $FUTEX_WAITERS, %edx
-
- testl $FUTEX_OWNER_DIED, %eax
- jnz 6f
-
- cmpl %eax, %edx
- je 2f
-
- LOCK
- cmpxchgl %edx, (%ebx)
- movl $0, %ecx /* Must use mov to avoid changing cc. */
- jnz 5f
-
-2:
- /* Futex call. */
- movl %esp, %esi
- movl 20(%esp), %ecx
- LOAD_FUTEX_WAIT (%ecx)
- movl $SYS_futex, %eax
- ENTER_KERNEL
- movl %eax, %ecx
-
- movl (%ebx), %eax
-
-5: testl %eax, %eax
- jne 7f
-
- movl %gs:TID, %edx
- orl $FUTEX_WAITERS, %edx
- LOCK
- cmpxchgl %edx, (%ebx)
- jnz 7f
-
-6: addl $12, %esp
- cfi_adjust_cfa_offset(-12)
- popl %ebp
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%ebp)
- popl %ebx
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%ebx)
- popl %esi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%esi)
- popl %edi
- cfi_adjust_cfa_offset(-4)
- cfi_restore(%edi)
- ret
-
-3: movl $EINVAL, %eax
- ret
-
- cfi_adjust_cfa_offset(28)
- cfi_offset(%edi, -8)
- cfi_offset(%esi, -12)
- cfi_offset(%ebx, -16)
- cfi_offset(%ebp, -20)
- /* Check whether the time expired. */
-7: cmpl $-ETIMEDOUT, %ecx
- jne 1b
-
-8: movl $ETIMEDOUT, %eax
- jmp 6b
- cfi_endproc
- .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait
diff --git a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
index a0c6f7efda..e36fde6cae 100644
--- a/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/sparc/lowlevellock.h
@@ -46,7 +46,6 @@ __lll_cond_trylock (int *futex)
extern void __lll_lock_wait_private (int *futex) attribute_hidden;
extern void __lll_lock_wait (int *futex, int private) attribute_hidden;
-extern int __lll_robust_lock_wait (int *futex, int private) attribute_hidden;
static inline void
__attribute__ ((always_inline))
@@ -64,18 +63,6 @@ __lll_lock (int *futex, int private)
}
#define lll_lock(futex, private) __lll_lock (&(futex), private)
-static inline int
-__attribute__ ((always_inline))
-__lll_robust_lock (int *futex, int id, int private)
-{
- int result = 0;
- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_lock_wait (futex, private);
- return result;
-}
-#define lll_robust_lock(futex, id, private) \
- __lll_robust_lock (&(futex), id, private)
-
static inline void
__attribute__ ((always_inline))
__lll_cond_lock (int *futex, int private)
@@ -87,14 +74,9 @@ __lll_cond_lock (int *futex, int private)
}
#define lll_cond_lock(futex, private) __lll_cond_lock (&(futex), private)
-#define lll_robust_cond_lock(futex, id, private) \
- __lll_robust_lock (&(futex), (id) | FUTEX_WAITERS, private)
-
extern int __lll_timedlock_wait (int *futex, const struct timespec *,
int private) attribute_hidden;
-extern int __lll_robust_timedlock_wait (int *futex, const struct timespec *,
- int private) attribute_hidden;
static inline int
__attribute__ ((always_inline))
@@ -110,19 +92,6 @@ __lll_timedlock (int *futex, const struct timespec *abstime, int private)
#define lll_timedlock(futex, abstime, private) \
__lll_timedlock (&(futex), abstime, private)
-static inline int
-__attribute__ ((always_inline))
-__lll_robust_timedlock (int *futex, const struct timespec *abstime,
- int id, int private)
-{
- int result = 0;
- if (atomic_compare_and_exchange_bool_acq (futex, id, 0) != 0)
- result = __lll_robust_timedlock_wait (futex, abstime, private);
- return result;
-}
-#define lll_robust_timedlock(futex, abstime, id, private) \
- __lll_robust_timedlock (&(futex), abstime, id, private)
-
#define lll_unlock(lock, private) \
((void) ({ \
int *__futex = &(lock); \
@@ -132,15 +101,6 @@ __lll_robust_timedlock (int *futex, const struct timespec *abstime,
lll_futex_wake (__futex, 1, __private); \
}))
-#define lll_robust_unlock(lock, private) \
- ((void) ({ \
- int *__futex = &(lock); \
- int __private = (private); \
- int __val = atomic_exchange_rel (__futex, 0); \
- if (__glibc_unlikely (__val & FUTEX_WAITERS)) \
- lll_futex_wake (__futex, 1, __private); \
- }))
-
#define lll_islocked(futex) \
(futex != 0)
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
index 70421ff561..cbf6597e00 100644
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
+++ b/sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
@@ -136,23 +136,6 @@
: "cx", "r11", "cc", "memory"); \
}) \
-#define lll_robust_lock(futex, id, private) \
- ({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
- "jz 24f\n" \
- "1:\tlea %2, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_robust_lock_wait\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
- "=a" (result) \
- : "1" (id), "m" (futex), "3" (0), "0" (private) \
- : "cx", "r11", "cc", "memory"); \
- result; })
-
#define lll_cond_lock(futex, private) \
(void) \
({ int ignore1, ignore2, ignore3; \
@@ -171,24 +154,6 @@
: "cx", "r11", "cc", "memory"); \
})
-#define lll_robust_cond_lock(futex, id, private) \
- ({ int result, ignore1, ignore2; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %4, %2\n\t" \
- "jz 24f\n" \
- "1:\tlea %2, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_robust_lock_wait\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=S" (ignore1), "=D" (ignore2), "=m" (futex), \
- "=a" (result) \
- : "1" (id | FUTEX_WAITERS), "m" (futex), "3" (0), \
- "0" (private) \
- : "cx", "r11", "cc", "memory"); \
- result; })
-
#define lll_timedlock(futex, timeout, private) \
({ int result, ignore1, ignore2, ignore3; \
__asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
@@ -215,25 +180,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
#define lll_timedlock_elision(futex, adapt_count, timeout, private) \
__lll_timedlock_elision(&(futex), &(adapt_count), timeout, private)
-#define lll_robust_timedlock(futex, timeout, id, private) \
- ({ int result, ignore1, ignore2, ignore3; \
- __asm __volatile (LOCK_INSTR "cmpxchgl %1, %4\n\t" \
- "jz 24f\n\t" \
- "1:\tlea %4, %%" RDI_LP "\n" \
- "0:\tmov %8, %%" RDX_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_robust_timedlock_wait\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=a" (result), "=D" (ignore1), "=S" (ignore2), \
- "=&d" (ignore3), "=m" (futex) \
- : "0" (0), "1" (id), "m" (futex), "m" (timeout), \
- "2" (private) \
- : "memory", "cx", "cc", "r10", "r11"); \
- result; })
-
#if !IS_IN (libc) || defined UP
# define __lll_unlock_asm_start LOCK_INSTR "decl %0\n\t" \
"je 24f\n\t"
@@ -276,26 +222,6 @@ extern int __lll_timedlock_elision (int *futex, short *adapt_count,
: "ax", "cx", "r11", "cc", "memory"); \
})
-#define lll_robust_unlock(futex, private) \
- do \
- { \
- int ignore; \
- __asm __volatile (LOCK_INSTR "andl %2, %0\n\t" \
- "je 24f\n\t" \
- "1:\tlea %0, %%" RDI_LP "\n" \
- "2:\tsub $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset 128\n" \
- "3:\tcallq __lll_unlock_wake\n" \
- "4:\tadd $128, %%" RSP_LP "\n" \
- ".cfi_adjust_cfa_offset -128\n" \
- "24:" \
- : "=m" (futex), "=&D" (ignore) \
- : "i" (FUTEX_WAITERS), "m" (futex), \
- "S" (private) \
- : "ax", "cx", "r11", "cc", "memory"); \
- } \
- while (0)
-
#define lll_islocked(futex) \
(futex != LLL_LOCK_INITIALIZER)
diff --git a/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
deleted file mode 100644
index e901ec46d3..0000000000
--- a/sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S
+++ /dev/null
@@ -1,306 +0,0 @@
-/* Copyright (C) 2002-2017 Free Software Foundation, Inc.
- This file is part of the GNU C Library.
- Contributed by Ulrich Drepper <drepper@redhat.com>, 2002.
-
- The GNU C Library is free software; you can redistribute it and/or
- modify it under the terms of the GNU Lesser General Public
- License as published by the Free Software Foundation; either
- version 2.1 of the License, or (at your option) any later version.
-
- The GNU C Library is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- Lesser General Public License for more details.
-
- You should have received a copy of the GNU Lesser General Public
- License along with the GNU C Library; if not, see
- <http://www.gnu.org/licenses/>. */
-
-#include <sysdep.h>
-#include <pthread-errnos.h>
-#include <lowlevellock.h>
-#include <lowlevelrobustlock.h>
-#include <kernel-features.h>
-
- .text
-
-#define FUTEX_WAITERS 0x80000000
-#define FUTEX_OWNER_DIED 0x40000000
-
-#ifdef __ASSUME_PRIVATE_FUTEX
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
-# define LOAD_FUTEX_WAIT_ABS(reg) \
- xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg
-#else
-# if FUTEX_WAIT == 0
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %fs:PRIVATE_FUTEX, reg
-# else
-# define LOAD_FUTEX_WAIT(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT, reg
-# endif
-# define LOAD_FUTEX_WAIT_ABS(reg) \
- xorl $FUTEX_PRIVATE_FLAG, reg ; \
- andl %fs:PRIVATE_FUTEX, reg ; \
- orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg
-#endif
-
-
- .globl __lll_robust_lock_wait
- .type __lll_robust_lock_wait,@function
- .hidden __lll_robust_lock_wait
- .align 16
-__lll_robust_lock_wait:
- cfi_startproc
- pushq %r10
- cfi_adjust_cfa_offset(8)
- pushq %rdx
- cfi_adjust_cfa_offset(8)
- cfi_offset(%r10, -16)
- cfi_offset(%rdx, -24)
-
- xorq %r10, %r10 /* No timeout. */
- LOAD_FUTEX_WAIT (%esi)
-
-4: movl %eax, %edx
- orl $FUTEX_WAITERS, %edx
-
- testl $FUTEX_OWNER_DIED, %eax
- jnz 3f
-
- cmpl %edx, %eax
- je 1f
-
- LOCK
- cmpxchgl %edx, (%rdi)
- jnz 2f
-
-1: movl $SYS_futex, %eax
- syscall
-
- movl (%rdi), %eax
-
-2: testl %eax, %eax
- jne 4b
-
- movl %fs:TID, %edx
- orl $FUTEX_WAITERS, %edx
- LOCK
- cmpxchgl %edx, (%rdi)
- jnz 4b
- /* NB: %rax == 0 */
-
-3: popq %rdx
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%rdx)
- popq %r10
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r10)
- retq
- cfi_endproc
- .size __lll_robust_lock_wait,.-__lll_robust_lock_wait
-
-
- .globl __lll_robust_timedlock_wait
- .type __lll_robust_timedlock_wait,@function
- .hidden __lll_robust_timedlock_wait
- .align 16
-__lll_robust_timedlock_wait:
- cfi_startproc
-# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-# ifdef PIC
- cmpl $0, __have_futex_clock_realtime(%rip)
-# else
- cmpl $0, __have_futex_clock_realtime
-# endif
- je .Lreltmo
-# endif
-
- cmpq $0, (%rdx)
- js 7f
-
- pushq %r9
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r9, 0)
- movq %rdx, %r10
- movl $0xffffffff, %r9d
- LOAD_FUTEX_WAIT_ABS (%esi)
-
-1: testl $FUTEX_OWNER_DIED, %eax
- jnz 3f
-
- movl %eax, %edx
- orl $FUTEX_WAITERS, %edx
-
- cmpl %eax, %edx
- je 5f
-
- LOCK
- cmpxchgl %edx, (%rdi)
- movq $0, %rcx /* Must use mov to avoid changing cc. */
- jnz 6f
-
-5: movl $SYS_futex, %eax
- syscall
- movl %eax, %ecx
-
- movl (%rdi), %eax
-
-6: testl %eax, %eax
- jne 2f
-
- movl %fs:TID, %edx
- orl $FUTEX_WAITERS, %edx
- LOCK
- cmpxchgl %edx, (%rdi)
- jnz 2f
-
-3: popq %r9
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r9)
- retq
-
- cfi_adjust_cfa_offset(8)
- cfi_rel_offset(%r9, 0)
- /* Check whether the time expired. */
-2: cmpl $-ETIMEDOUT, %ecx
- je 4f
- cmpl $-EINVAL, %ecx
- jne 1b
-
-4: movl %ecx, %eax
- negl %eax
- jmp 3b
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r9)
-
-7: movl $ETIMEDOUT, %eax
- retq
-
-
-# ifndef __ASSUME_FUTEX_CLOCK_REALTIME
-.Lreltmo:
- /* Check for a valid timeout value. */
- cmpq $1000000000, 8(%rdx)
- jae 3f
-
- pushq %r8
- cfi_adjust_cfa_offset(8)
- pushq %r9
- cfi_adjust_cfa_offset(8)
- pushq %r12
- cfi_adjust_cfa_offset(8)
- pushq %r13
- cfi_adjust_cfa_offset(8)
- cfi_offset(%r8, -16)
- cfi_offset(%r9, -24)
- cfi_offset(%r12, -32)
- cfi_offset(%r13, -40)
- pushq %rsi
- cfi_adjust_cfa_offset(8)
-
- /* Stack frame for the timespec and timeval structs. */
- subq $32, %rsp
- cfi_adjust_cfa_offset(32)
-
- movq %rdi, %r12
- movq %rdx, %r13
-
-1: movq %rax, 16(%rsp)
-
- /* Get current time. */
- movq %rsp, %rdi
- xorl %esi, %esi
- /* This call works because we directly jump to a system call entry
- which preserves all the registers. */
- call JUMPTARGET(__gettimeofday)
-
- /* Compute relative timeout. */
- movq 8(%rsp), %rax
- movl $1000, %edi
- mul %rdi /* Milli seconds to nano seconds. */
- movq (%r13), %rdi
- movq 8(%r13), %rsi
- subq (%rsp), %rdi
- subq %rax, %rsi
- jns 4f
- addq $1000000000, %rsi
- decq %rdi
-4: testq %rdi, %rdi
- js 8f /* Time is already up. */
-
- /* Futex call. */
- movq %rdi, (%rsp) /* Store relative timeout. */
- movq %rsi, 8(%rsp)
-
- movq 16(%rsp), %rdx
- movl %edx, %eax
- orl $FUTEX_WAITERS, %edx
-
- testl $FUTEX_OWNER_DIED, %eax
- jnz 6f
-
- cmpl %eax, %edx
- je 2f
-
- LOCK
- cmpxchgl %edx, (%r12)
- movq $0, %rcx /* Must use mov to avoid changing cc. */
- jnz 5f
-
-2: movq %rsp, %r10
- movl 32(%rsp), %esi
- LOAD_FUTEX_WAIT (%esi)
- movq %r12, %rdi
- movl $SYS_futex, %eax
- syscall
- movq %rax, %rcx
-
- movl (%r12), %eax
-
-5: testl %eax, %eax
- jne 7f
-
- movl %fs:TID, %edx
- orl $FUTEX_WAITERS, %edx
- LOCK
- cmpxchgl %edx, (%r12)
- jnz 7f
-
-6: addq $40, %rsp
- cfi_adjust_cfa_offset(-40)
- popq %r13
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r13)
- popq %r12
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r12)
- popq %r9
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r9)
- popq %r8
- cfi_adjust_cfa_offset(-8)
- cfi_restore(%r8)
- retq
-
-3: movl $EINVAL, %eax
- retq
-
- cfi_adjust_cfa_offset(72)
- cfi_offset(%r8, -16)
- cfi_offset(%r9, -24)
- cfi_offset(%r12, -32)
- cfi_offset(%r13, -40)
- /* Check whether the time expired. */
-7: cmpl $-ETIMEDOUT, %ecx
- jne 1b
-
-8: movl $ETIMEDOUT, %eax
- jmp 6b
-#endif
- cfi_endproc
- .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait