From f6d1d86d0c2cca118fd6a7734ea0ccd72b51c4b4 Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Wed, 30 Sep 2015 10:12:26 -0700 Subject: Move sysdeps/unix/sysv/linux/i386/i486/*.? to i386 Since glibc doesn't support i386 any more, we can move sysdeps/unix/sysv/linux/i386/i486/*.? to i386. [BZ #19006] * sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S: Moved to ... * sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S: Here. * sysdeps/unix/sysv/linux/i386/i486/lll_timedlock_wait.c: Moved to ... * sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c: Here. * sysdeps/unix/sysv/linux/i386/i486/lll_timedwait_tid.c: Moved to ... * sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c: Here. * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Moved to ... * sysdeps/unix/sysv/linux/i386/lowlevellock.S: Here. * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Moved to ... * sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S: Here. * sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S: Moved to ... * sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S: Here. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S: Moved to ... * sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S: Here. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S: Moved to ... * sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S: Here. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Moved to ... * sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S: Here. * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Moved to ... * sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S: Here. * sysdeps/unix/sysv/linux/i386/i586/libc-lowlevellock.S: Removed. * sysdeps/unix/sysv/linux/i386/i586/lll_timedlock_wait.c: Likewise. * sysdeps/unix/sysv/linux/i386/i586/lll_timedwait_tid.c: Likewise. * sysdeps/unix/sysv/linux/i386/i586/lowlevellock.S: Likewise. * sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S: Likewise. * sysdeps/unix/sysv/linux/i386/i586/pthread_barrier_wait.S: Likewise. * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S: Likewise. * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S: Likewise. * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S: Likewise. * sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/libc-lowlevellock.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/lll_timedlock_wait.c: Likewise. * sysdeps/unix/sysv/linux/i386/i686/lll_timedwait_tid.c: Likewise. * sysdeps/unix/sysv/linux/i386/i686/lowlevellock.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/pthread_barrier_wait.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S: Likewise. * sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S: Replace ../i486/pthread_cond_timedwait.S with ../pthread_cond_timedwait.S. --- .../unix/sysv/linux/i386/i486/libc-lowlevellock.S | 19 - .../unix/sysv/linux/i386/i486/lll_timedlock_wait.c | 1 - .../unix/sysv/linux/i386/i486/lll_timedwait_tid.c | 1 - sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S | 466 ---------- .../unix/sysv/linux/i386/i486/lowlevelrobustlock.S | 232 ----- .../sysv/linux/i386/i486/pthread_barrier_wait.S | 187 ---- .../sysv/linux/i386/i486/pthread_cond_broadcast.S | 241 ----- .../sysv/linux/i386/i486/pthread_cond_signal.S | 216 ----- .../sysv/linux/i386/i486/pthread_cond_timedwait.S | 973 --------------------- .../unix/sysv/linux/i386/i486/pthread_cond_wait.S | 641 -------------- .../unix/sysv/linux/i386/i586/libc-lowlevellock.S | 1 - .../unix/sysv/linux/i386/i586/lll_timedlock_wait.c | 1 - .../unix/sysv/linux/i386/i586/lll_timedwait_tid.c | 1 - sysdeps/unix/sysv/linux/i386/i586/lowlevellock.S | 19 - .../unix/sysv/linux/i386/i586/lowlevelrobustlock.S | 19 - .../sysv/linux/i386/i586/pthread_barrier_wait.S | 19 - .../sysv/linux/i386/i586/pthread_cond_broadcast.S | 19 - .../sysv/linux/i386/i586/pthread_cond_signal.S | 19 - .../sysv/linux/i386/i586/pthread_cond_timedwait.S | 19 - .../unix/sysv/linux/i386/i586/pthread_cond_wait.S | 19 - .../unix/sysv/linux/i386/i686/libc-lowlevellock.S | 1 - .../unix/sysv/linux/i386/i686/lll_timedlock_wait.c | 1 - .../unix/sysv/linux/i386/i686/lll_timedwait_tid.c | 1 - sysdeps/unix/sysv/linux/i386/i686/lowlevellock.S | 19 - .../unix/sysv/linux/i386/i686/lowlevelrobustlock.S | 19 - .../sysv/linux/i386/i686/pthread_barrier_wait.S | 19 - .../sysv/linux/i386/i686/pthread_cond_broadcast.S | 19 - .../sysv/linux/i386/i686/pthread_cond_signal.S | 19 - .../sysv/linux/i386/i686/pthread_cond_timedwait.S | 2 +- .../unix/sysv/linux/i386/i686/pthread_cond_wait.S | 19 - sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S | 19 + sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c | 1 + sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c | 1 + sysdeps/unix/sysv/linux/i386/lowlevellock.S | 466 ++++++++++ sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S | 232 +++++ .../unix/sysv/linux/i386/pthread_barrier_wait.S | 187 ++++ .../unix/sysv/linux/i386/pthread_cond_broadcast.S | 241 +++++ sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S | 216 +++++ .../unix/sysv/linux/i386/pthread_cond_timedwait.S | 973 +++++++++++++++++++++ sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S | 641 ++++++++++++++ 40 files changed, 2978 insertions(+), 3231 deletions(-) delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/lll_timedlock_wait.c delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/lll_timedwait_tid.c delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/libc-lowlevellock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/lll_timedlock_wait.c delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/lll_timedwait_tid.c delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/lowlevellock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/pthread_barrier_wait.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/libc-lowlevellock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/lll_timedlock_wait.c delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/lll_timedwait_tid.c delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/lowlevellock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/pthread_barrier_wait.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S delete mode 100644 sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S create mode 100644 sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S create mode 100644 sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c create mode 100644 sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c create mode 100644 sysdeps/unix/sysv/linux/i386/lowlevellock.S create mode 100644 sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S create mode 100644 sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S create mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S create mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S create mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S create mode 100644 sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S (limited to 'sysdeps') diff --git a/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S deleted file mode 100644 index 111e9c88ed..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/libc-lowlevellock.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "lowlevellock.S" diff --git a/sysdeps/unix/sysv/linux/i386/i486/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/i386/i486/lll_timedlock_wait.c deleted file mode 100644 index f6875b8f89..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/lll_timedlock_wait.c +++ /dev/null @@ -1 +0,0 @@ -/* __lll_timedlock_wait is in lowlevellock.S. */ diff --git a/sysdeps/unix/sysv/linux/i386/i486/lll_timedwait_tid.c b/sysdeps/unix/sysv/linux/i386/i486/lll_timedwait_tid.c deleted file mode 100644 index 43900c6294..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/lll_timedwait_tid.c +++ /dev/null @@ -1 +0,0 @@ -/* __lll_timedwait_tid is in lowlevellock.S. */ diff --git a/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S deleted file mode 100644 index 94bb59b384..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S +++ /dev/null @@ -1,466 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include -#include - -#include - - .text - -#ifdef __ASSUME_PRIVATE_FUTEX -# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ - movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg -# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ - movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg -# define LOAD_FUTEX_WAIT(reg) \ - xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg -# define LOAD_FUTEX_WAIT_ABS(reg) \ - xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg -# define LOAD_FUTEX_WAKE(reg) \ - xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg -#else -# if FUTEX_WAIT == 0 -# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ - movl %gs:PRIVATE_FUTEX, reg -# else -# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ - movl %gs:PRIVATE_FUTEX, reg ; \ - orl $FUTEX_WAIT, reg -# endif -# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ - movl %gs:PRIVATE_FUTEX, reg ; \ - orl $FUTEX_WAKE, reg -# if FUTEX_WAIT == 0 -# define LOAD_FUTEX_WAIT(reg) \ - xorl $FUTEX_PRIVATE_FLAG, reg ; \ - andl %gs:PRIVATE_FUTEX, reg -# else -# define LOAD_FUTEX_WAIT(reg) \ - xorl $FUTEX_PRIVATE_FLAG, reg ; \ - andl %gs:PRIVATE_FUTEX, reg ; \ - orl $FUTEX_WAIT, reg -# endif -# define LOAD_FUTEX_WAIT_ABS(reg) \ - xorl $FUTEX_PRIVATE_FLAG, reg ; \ - andl %gs:PRIVATE_FUTEX, reg ; \ - orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg -# define LOAD_FUTEX_WAKE(reg) \ - xorl $FUTEX_PRIVATE_FLAG, reg ; \ - andl %gs:PRIVATE_FUTEX, reg ; \ - orl $FUTEX_WAKE, reg -#endif - - .globl __lll_lock_wait_private - .type __lll_lock_wait_private,@function - .hidden __lll_lock_wait_private - .align 16 -__lll_lock_wait_private: - cfi_startproc - pushl %edx - cfi_adjust_cfa_offset(4) - pushl %ebx - cfi_adjust_cfa_offset(4) - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_offset(%edx, -8) - cfi_offset(%ebx, -12) - cfi_offset(%esi, -16) - - movl $2, %edx - movl %ecx, %ebx - xorl %esi, %esi /* No timeout. */ - LOAD_PRIVATE_FUTEX_WAIT (%ecx) - - cmpl %edx, %eax /* NB: %edx == 2 */ - jne 2f - -1: LIBC_PROBE (lll_lock_wait_private, 1, %ebx) - movl $SYS_futex, %eax - ENTER_KERNEL - -2: movl %edx, %eax - xchgl %eax, (%ebx) /* NB: lock is implied */ - - testl %eax, %eax - jnz 1b - - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %edx - cfi_adjust_cfa_offset(-4) - cfi_restore(%edx) - ret - cfi_endproc - .size __lll_lock_wait_private,.-__lll_lock_wait_private - -#if !IS_IN (libc) - .globl __lll_lock_wait - .type __lll_lock_wait,@function - .hidden __lll_lock_wait - .align 16 -__lll_lock_wait: - cfi_startproc - pushl %edx - cfi_adjust_cfa_offset(4) - pushl %ebx - cfi_adjust_cfa_offset(4) - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_offset(%edx, -8) - cfi_offset(%ebx, -12) - cfi_offset(%esi, -16) - - movl %edx, %ebx - movl $2, %edx - xorl %esi, %esi /* No timeout. */ - LOAD_FUTEX_WAIT (%ecx) - - cmpl %edx, %eax /* NB: %edx == 2 */ - jne 2f - -1: movl $SYS_futex, %eax - ENTER_KERNEL - -2: movl %edx, %eax - xchgl %eax, (%ebx) /* NB: lock is implied */ - - testl %eax, %eax - jnz 1b - - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %edx - cfi_adjust_cfa_offset(-4) - cfi_restore(%edx) - ret - cfi_endproc - .size __lll_lock_wait,.-__lll_lock_wait - - /* %ecx: futex - %esi: flags - %edx: timeout - %eax: futex value - */ - .globl __lll_timedlock_wait - .type __lll_timedlock_wait,@function - .hidden __lll_timedlock_wait - .align 16 -__lll_timedlock_wait: - cfi_startproc - pushl %ebp - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebp, 0) - pushl %ebx - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebx, 0) - -# ifndef __ASSUME_FUTEX_CLOCK_REALTIME -# ifdef PIC - LOAD_PIC_REG (bx) - cmpl $0, __have_futex_clock_realtime@GOTOFF(%ebx) -# else - cmpl $0, __have_futex_clock_realtime -# endif - je .Lreltmo -# endif - - cmpl $0, (%edx) - js 8f - - movl %ecx, %ebx - movl %esi, %ecx - movl %edx, %esi - movl $0xffffffff, %ebp - LOAD_FUTEX_WAIT_ABS (%ecx) - - movl $2, %edx - cmpl %edx, %eax - jne 2f - -1: movl $SYS_futex, %eax - movl $2, %edx - ENTER_KERNEL - -2: xchgl %edx, (%ebx) /* NB: lock is implied */ - - testl %edx, %edx - jz 3f - - cmpl $-ETIMEDOUT, %eax - je 4f - cmpl $-EINVAL, %eax - jne 1b -4: movl %eax, %edx - negl %edx - -3: movl %edx, %eax -7: popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %ebp - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebp) - ret - -8: movl $ETIMEDOUT, %eax - jmp 7b - -# ifndef __ASSUME_FUTEX_CLOCK_REALTIME -.Lreltmo: - /* Check for a valid timeout value. */ - cmpl $1000000000, 4(%edx) - jae 3f - - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%esi, 0) - pushl %edi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%edi, 0) - - /* Stack frame for the timespec and timeval structs. */ - subl $8, %esp - cfi_adjust_cfa_offset(8) - - movl %ecx, %ebp - movl %edx, %edi - - movl $2, %edx - xchgl %edx, (%ebp) - - test %edx, %edx - je 6f - -1: - /* Get current time. */ - movl %esp, %ebx - xorl %ecx, %ecx - movl $__NR_gettimeofday, %eax - ENTER_KERNEL - - /* Compute relative timeout. */ - movl 4(%esp), %eax - movl $1000, %edx - mul %edx /* Milli seconds to nano seconds. */ - movl (%edi), %ecx - movl 4(%edi), %edx - subl (%esp), %ecx - subl %eax, %edx - jns 4f - addl $1000000000, %edx - subl $1, %ecx -4: testl %ecx, %ecx - js 2f /* Time is already up. */ - - /* Store relative timeout. */ - movl %ecx, (%esp) - movl %edx, 4(%esp) - - /* Futex call. */ - movl %ebp, %ebx - movl $2, %edx - movl %esp, %esi - movl 16(%esp), %ecx - LOAD_FUTEX_WAIT (%ecx) - movl $SYS_futex, %eax - ENTER_KERNEL - - /* NB: %edx == 2 */ - xchgl %edx, (%ebp) - - testl %edx, %edx - je 6f - - cmpl $-ETIMEDOUT, %eax - jne 1b -2: movl $ETIMEDOUT, %edx - -6: addl $8, %esp - cfi_adjust_cfa_offset(-8) - popl %edi - cfi_adjust_cfa_offset(-4) - cfi_restore(%edi) - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) -7: popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %ebp - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebp) - movl %edx, %eax - ret - -3: movl $EINVAL, %edx - jmp 7b -# endif - cfi_endproc - .size __lll_timedlock_wait,.-__lll_timedlock_wait -#endif - - .globl __lll_unlock_wake_private - .type __lll_unlock_wake_private,@function - .hidden __lll_unlock_wake_private - .align 16 -__lll_unlock_wake_private: - cfi_startproc - pushl %ebx - cfi_adjust_cfa_offset(4) - pushl %ecx - cfi_adjust_cfa_offset(4) - pushl %edx - cfi_adjust_cfa_offset(4) - cfi_offset(%ebx, -8) - cfi_offset(%ecx, -12) - cfi_offset(%edx, -16) - - movl %eax, %ebx - movl $0, (%eax) - LOAD_PRIVATE_FUTEX_WAKE (%ecx) - movl $1, %edx /* Wake one thread. */ - movl $SYS_futex, %eax - ENTER_KERNEL - - popl %edx - cfi_adjust_cfa_offset(-4) - cfi_restore(%edx) - popl %ecx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ecx) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - ret - cfi_endproc - .size __lll_unlock_wake_private,.-__lll_unlock_wake_private - -#if !IS_IN (libc) - .globl __lll_unlock_wake - .type __lll_unlock_wake,@function - .hidden __lll_unlock_wake - .align 16 -__lll_unlock_wake: - cfi_startproc - pushl %ebx - cfi_adjust_cfa_offset(4) - pushl %ecx - cfi_adjust_cfa_offset(4) - pushl %edx - cfi_adjust_cfa_offset(4) - cfi_offset(%ebx, -8) - cfi_offset(%ecx, -12) - cfi_offset(%edx, -16) - - movl %eax, %ebx - movl $0, (%eax) - LOAD_FUTEX_WAKE (%ecx) - movl $1, %edx /* Wake one thread. */ - movl $SYS_futex, %eax - ENTER_KERNEL - - popl %edx - cfi_adjust_cfa_offset(-4) - cfi_restore(%edx) - popl %ecx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ecx) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - ret - cfi_endproc - .size __lll_unlock_wake,.-__lll_unlock_wake - - .globl __lll_timedwait_tid - .type __lll_timedwait_tid,@function - .hidden __lll_timedwait_tid - .align 16 -__lll_timedwait_tid: - pushl %edi - pushl %esi - pushl %ebx - pushl %ebp - - movl %eax, %ebp - movl %edx, %edi - subl $8, %esp - - /* Get current time. */ -2: movl %esp, %ebx - xorl %ecx, %ecx - movl $__NR_gettimeofday, %eax - ENTER_KERNEL - - /* Compute relative timeout. */ - movl 4(%esp), %eax - movl $1000, %edx - mul %edx /* Milli seconds to nano seconds. */ - movl (%edi), %ecx - movl 4(%edi), %edx - subl (%esp), %ecx - subl %eax, %edx - jns 5f - addl $1000000000, %edx - subl $1, %ecx -5: testl %ecx, %ecx - js 6f /* Time is already up. */ - - movl %ecx, (%esp) /* Store relative timeout. */ - movl %edx, 4(%esp) - - movl (%ebp), %edx - testl %edx, %edx - jz 4f - - movl %esp, %esi - /* XXX The kernel so far uses global futex for the wakeup at - all times. */ - xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */ - movl %ebp, %ebx - movl $SYS_futex, %eax - ENTER_KERNEL - - cmpl $0, (%ebx) - jne 1f -4: xorl %eax, %eax - -3: addl $8, %esp - popl %ebp - popl %ebx - popl %esi - popl %edi - ret - -1: cmpl $-ETIMEDOUT, %eax - jne 2b -6: movl $ETIMEDOUT, %eax - jmp 3b - .size __lll_timedwait_tid,.-__lll_timedwait_tid -#endif diff --git a/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S deleted file mode 100644 index aefe77cce9..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S +++ /dev/null @@ -1,232 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include -#include -#include - - .text - -#define FUTEX_WAITERS 0x80000000 -#define FUTEX_OWNER_DIED 0x40000000 - -#ifdef __ASSUME_PRIVATE_FUTEX -# define LOAD_FUTEX_WAIT(reg) \ - xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg -#else -# if FUTEX_WAIT == 0 -# define LOAD_FUTEX_WAIT(reg) \ - xorl $FUTEX_PRIVATE_FLAG, reg ; \ - andl %gs:PRIVATE_FUTEX, reg -# else -# define LOAD_FUTEX_WAIT(reg) \ - xorl $FUTEX_PRIVATE_FLAG, reg ; \ - andl %gs:PRIVATE_FUTEX, reg ; \ - orl $FUTEX_WAIT, reg -# endif -#endif - - .globl __lll_robust_lock_wait - .type __lll_robust_lock_wait,@function - .hidden __lll_robust_lock_wait - .align 16 -__lll_robust_lock_wait: - cfi_startproc - pushl %edx - cfi_adjust_cfa_offset(4) - pushl %ebx - cfi_adjust_cfa_offset(4) - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_offset(%edx, -8) - cfi_offset(%ebx, -12) - cfi_offset(%esi, -16) - - movl %edx, %ebx - xorl %esi, %esi /* No timeout. */ - LOAD_FUTEX_WAIT (%ecx) - -4: movl %eax, %edx - orl $FUTEX_WAITERS, %edx - - testl $FUTEX_OWNER_DIED, %eax - jnz 3f - - cmpl %edx, %eax /* NB: %edx == 2 */ - je 1f - - LOCK - cmpxchgl %edx, (%ebx) - jnz 2f - -1: movl $SYS_futex, %eax - ENTER_KERNEL - - movl (%ebx), %eax - -2: test %eax, %eax - jne 4b - - movl %gs:TID, %edx - orl $FUTEX_WAITERS, %edx - LOCK - cmpxchgl %edx, (%ebx) - jnz 4b - /* NB: %eax == 0 */ - -3: popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %edx - cfi_adjust_cfa_offset(-4) - cfi_restore(%edx) - ret - cfi_endproc - .size __lll_robust_lock_wait,.-__lll_robust_lock_wait - - - .globl __lll_robust_timedlock_wait - .type __lll_robust_timedlock_wait,@function - .hidden __lll_robust_timedlock_wait - .align 16 -__lll_robust_timedlock_wait: - cfi_startproc - /* Check for a valid timeout value. */ - cmpl $1000000000, 4(%edx) - jae 3f - - pushl %edi - cfi_adjust_cfa_offset(4) - pushl %esi - cfi_adjust_cfa_offset(4) - pushl %ebx - cfi_adjust_cfa_offset(4) - pushl %ebp - cfi_adjust_cfa_offset(4) - cfi_offset(%edi, -8) - cfi_offset(%esi, -12) - cfi_offset(%ebx, -16) - cfi_offset(%ebp, -20) - - /* Stack frame for the timespec and timeval structs. */ - subl $12, %esp - cfi_adjust_cfa_offset(12) - - movl %ecx, %ebp - movl %edx, %edi - -1: movl %eax, 8(%esp) - - /* Get current time. */ - movl %esp, %ebx - xorl %ecx, %ecx - movl $__NR_gettimeofday, %eax - ENTER_KERNEL - - /* Compute relative timeout. */ - movl 4(%esp), %eax - movl $1000, %edx - mul %edx /* Milli seconds to nano seconds. */ - movl (%edi), %ecx - movl 4(%edi), %edx - subl (%esp), %ecx - subl %eax, %edx - jns 4f - addl $1000000000, %edx - subl $1, %ecx -4: testl %ecx, %ecx - js 8f /* Time is already up. */ - - /* Store relative timeout. */ - movl %ecx, (%esp) - movl %edx, 4(%esp) - - movl %ebp, %ebx - - movl 8(%esp), %edx - movl %edx, %eax - orl $FUTEX_WAITERS, %edx - - testl $FUTEX_OWNER_DIED, %eax - jnz 6f - - cmpl %eax, %edx - je 2f - - LOCK - cmpxchgl %edx, (%ebx) - movl $0, %ecx /* Must use mov to avoid changing cc. */ - jnz 5f - -2: - /* Futex call. */ - movl %esp, %esi - movl 20(%esp), %ecx - LOAD_FUTEX_WAIT (%ecx) - movl $SYS_futex, %eax - ENTER_KERNEL - movl %eax, %ecx - - movl (%ebx), %eax - -5: testl %eax, %eax - jne 7f - - movl %gs:TID, %edx - orl $FUTEX_WAITERS, %edx - LOCK - cmpxchgl %edx, (%ebx) - jnz 7f - -6: addl $12, %esp - cfi_adjust_cfa_offset(-12) - popl %ebp - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebp) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %edi - cfi_adjust_cfa_offset(-4) - cfi_restore(%edi) - ret - -3: movl $EINVAL, %eax - ret - - cfi_adjust_cfa_offset(28) - cfi_offset(%edi, -8) - cfi_offset(%esi, -12) - cfi_offset(%ebx, -16) - cfi_offset(%ebp, -20) - /* Check whether the time expired. */ -7: cmpl $-ETIMEDOUT, %ecx - jne 1b - -8: movl $ETIMEDOUT, %eax - jmp 6b - cfi_endproc - .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S deleted file mode 100644 index ad1a774fbd..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/pthread_barrier_wait.S +++ /dev/null @@ -1,187 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include - - .text - - .globl __pthread_barrier_wait - .type __pthread_barrier_wait,@function - .align 16 -__pthread_barrier_wait: - cfi_startproc - pushl %ebx - cfi_adjust_cfa_offset(4) - cfi_offset(%ebx, -8) - - movl 8(%esp), %ebx - - /* Get the mutex. */ - movl $1, %edx - xorl %eax, %eax - LOCK - cmpxchgl %edx, MUTEX(%ebx) - jnz 1f - - /* One less waiter. If this was the last one needed wake - everybody. */ -2: subl $1, LEFT(%ebx) - je 3f - - /* There are more threads to come. */ - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_offset(%esi, -12) - -#if CURR_EVENT == 0 - movl (%ebx), %edx -#else - movl CURR_EVENT(%ebx), %edx -#endif - - /* Release the mutex. */ - LOCK - subl $1, MUTEX(%ebx) - jne 6f - - /* Wait for the remaining threads. The call will return immediately - if the CURR_EVENT memory has meanwhile been changed. */ -7: -#if FUTEX_WAIT == 0 - movl PRIVATE(%ebx), %ecx -#else - movl $FUTEX_WAIT, %ecx - orl PRIVATE(%ebx), %ecx -#endif - xorl %esi, %esi -8: movl $SYS_futex, %eax - ENTER_KERNEL - - /* Don't return on spurious wakeups. The syscall does not change - any register except %eax so there is no need to reload any of - them. */ -#if CURR_EVENT == 0 - cmpl %edx, (%ebx) -#else - cmpl %edx, CURR_EVENT(%ebx) -#endif - je 8b - - /* Increment LEFT. If this brings the count back to the - initial count unlock the object. */ - movl $1, %edx - movl INIT_COUNT(%ebx), %ecx - LOCK - xaddl %edx, LEFT(%ebx) - subl $1, %ecx - cmpl %ecx, %edx - jne 10f - - /* Release the mutex. We cannot release the lock before - waking the waiting threads since otherwise a new thread might - arrive and gets waken up, too. */ - LOCK - subl $1, MUTEX(%ebx) - jne 9f - - /* Note: %esi is still zero. */ -10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */ - - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - ret - - cfi_adjust_cfa_offset(4) - cfi_offset(%ebx, -8) - - /* The necessary number of threads arrived. */ -3: -#if CURR_EVENT == 0 - addl $1, (%ebx) -#else - addl $1, CURR_EVENT(%ebx) -#endif - - /* Wake up all waiters. The count is a signed number in the kernel - so 0x7fffffff is the highest value. */ - movl $0x7fffffff, %edx - movl $FUTEX_WAKE, %ecx - orl PRIVATE(%ebx), %ecx - movl $SYS_futex, %eax - ENTER_KERNEL - - /* Increment LEFT. If this brings the count back to the - initial count unlock the object. */ - movl $1, %edx - movl INIT_COUNT(%ebx), %ecx - LOCK - xaddl %edx, LEFT(%ebx) - subl $1, %ecx - cmpl %ecx, %edx - jne 5f - - /* Release the mutex. We cannot release the lock before - waking the waiting threads since otherwise a new thread might - arrive and gets waken up, too. */ - LOCK - subl $1, MUTEX(%ebx) - jne 4f - -5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */ - - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - ret - - cfi_adjust_cfa_offset(4) - cfi_offset(%ebx, -8) -1: movl PRIVATE(%ebx), %ecx - leal MUTEX(%ebx), %edx - xorl $LLL_SHARED, %ecx - call __lll_lock_wait - jmp 2b - -4: movl PRIVATE(%ebx), %ecx - leal MUTEX(%ebx), %eax - xorl $LLL_SHARED, %ecx - call __lll_unlock_wake - jmp 5b - - cfi_adjust_cfa_offset(4) - cfi_offset(%esi, -12) -6: movl PRIVATE(%ebx), %ecx - leal MUTEX(%ebx), %eax - xorl $LLL_SHARED, %ecx - call __lll_unlock_wake - jmp 7b - -9: movl PRIVATE(%ebx), %ecx - leal MUTEX(%ebx), %eax - xorl $LLL_SHARED, %ecx - call __lll_unlock_wake - jmp 10b - cfi_endproc - .size __pthread_barrier_wait,.-__pthread_barrier_wait -weak_alias (__pthread_barrier_wait, pthread_barrier_wait) diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S deleted file mode 100644 index 5ddd5ac592..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.S +++ /dev/null @@ -1,241 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include -#include -#include -#include -#include -#include - - .text - - /* int pthread_cond_broadcast (pthread_cond_t *cond) */ - .globl __pthread_cond_broadcast - .type __pthread_cond_broadcast, @function - .align 16 -__pthread_cond_broadcast: - cfi_startproc - pushl %ebx - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebx, 0) - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%esi, 0) - pushl %edi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%edi, 0) - pushl %ebp - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebp, 0) - cfi_remember_state - - movl 20(%esp), %ebx - - LIBC_PROBE (cond_broadcast, 1, %edx) - - /* Get internal lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%ebx) -#else - cmpxchgl %edx, cond_lock(%ebx) -#endif - jnz 1f - -2: addl $cond_futex, %ebx - movl total_seq+4-cond_futex(%ebx), %eax - movl total_seq-cond_futex(%ebx), %ebp - cmpl wakeup_seq+4-cond_futex(%ebx), %eax - ja 3f - jb 4f - cmpl wakeup_seq-cond_futex(%ebx), %ebp - jna 4f - - /* Cause all currently waiting threads to recognize they are - woken up. */ -3: movl %ebp, wakeup_seq-cond_futex(%ebx) - movl %eax, wakeup_seq-cond_futex+4(%ebx) - movl %ebp, woken_seq-cond_futex(%ebx) - movl %eax, woken_seq-cond_futex+4(%ebx) - addl %ebp, %ebp - addl $1, broadcast_seq-cond_futex(%ebx) - movl %ebp, (%ebx) - - /* Get the address of the mutex used. */ - movl dep_mutex-cond_futex(%ebx), %edi - - /* Unlock. */ - LOCK - subl $1, cond_lock-cond_futex(%ebx) - jne 7f - - /* Don't use requeue for pshared condvars. */ -8: cmpl $-1, %edi - je 9f - - /* Do not use requeue for pshared condvars. */ - testl $PS_BIT, MUTEX_KIND(%edi) - jne 9f - - /* Requeue to a non-robust PI mutex if the PI bit is set and - the robust bit is not set. */ - movl MUTEX_KIND(%edi), %eax - andl $(ROBUST_BIT|PI_BIT), %eax - cmpl $PI_BIT, %eax - je 81f - - /* Wake up all threads. */ -#ifdef __ASSUME_PRIVATE_FUTEX - movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx -#else - movl %gs:PRIVATE_FUTEX, %ecx - orl $FUTEX_CMP_REQUEUE, %ecx -#endif - movl $SYS_futex, %eax - movl $0x7fffffff, %esi - movl $1, %edx - /* Get the address of the futex involved. */ -# if MUTEX_FUTEX != 0 - addl $MUTEX_FUTEX, %edi -# endif -/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for sysenter. - ENTER_KERNEL */ - int $0x80 - - /* For any kind of error, which mainly is EAGAIN, we try again - with WAKE. The general test also covers running on old - kernels. */ - cmpl $0xfffff001, %eax - jae 9f - -6: xorl %eax, %eax - popl %ebp - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebp) - popl %edi - cfi_adjust_cfa_offset(-4) - cfi_restore(%edi) - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - ret - - cfi_restore_state - -81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx - movl $SYS_futex, %eax - movl $0x7fffffff, %esi - movl $1, %edx - /* Get the address of the futex involved. */ -# if MUTEX_FUTEX != 0 - addl $MUTEX_FUTEX, %edi -# endif - int $0x80 - - /* For any kind of error, which mainly is EAGAIN, we try again - with WAKE. The general test also covers running on old - kernels. */ - cmpl $0xfffff001, %eax - jb 6b - jmp 9f - - /* Initial locking failed. */ -1: -#if cond_lock == 0 - movl %ebx, %edx -#else - leal cond_lock(%ebx), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - jmp 2b - - .align 16 - /* Unlock. */ -4: LOCK - subl $1, cond_lock-cond_futex(%ebx) - je 6b - - /* Unlock in loop requires wakeup. */ -5: leal cond_lock-cond_futex(%ebx), %eax -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_futex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - jmp 6b - - /* Unlock in loop requires wakeup. */ -7: leal cond_lock-cond_futex(%ebx), %eax -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_futex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - jmp 8b - -9: /* The futex requeue functionality is not available. */ - movl $0x7fffffff, %edx -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_futex(%ebx) - sete %cl - subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE, %ecx - movl $SYS_futex, %eax - ENTER_KERNEL - jmp 6b - cfi_endproc - .size __pthread_cond_broadcast, .-__pthread_cond_broadcast -versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast, - GLIBC_2_3_2) diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S deleted file mode 100644 index 8f4d937ec1..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_signal.S +++ /dev/null @@ -1,216 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include -#include -#include -#include -#include -#include - - .text - - /* int pthread_cond_signal (pthread_cond_t *cond) */ - .globl __pthread_cond_signal - .type __pthread_cond_signal, @function - .align 16 -__pthread_cond_signal: - - cfi_startproc - pushl %ebx - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebx, 0) - pushl %edi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%edi, 0) - cfi_remember_state - - movl 12(%esp), %edi - - LIBC_PROBE (cond_signal, 1, %edi) - - /* Get internal lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%edi) -#else - cmpxchgl %edx, cond_lock(%edi) -#endif - jnz 1f - -2: leal cond_futex(%edi), %ebx - movl total_seq+4(%edi), %eax - movl total_seq(%edi), %ecx - cmpl wakeup_seq+4(%edi), %eax -#if cond_lock != 0 - /* Must use leal to preserve the flags. */ - leal cond_lock(%edi), %edi -#endif - ja 3f - jb 4f - cmpl wakeup_seq-cond_futex(%ebx), %ecx - jbe 4f - - /* Bump the wakeup number. */ -3: addl $1, wakeup_seq-cond_futex(%ebx) - adcl $0, wakeup_seq-cond_futex+4(%ebx) - addl $1, (%ebx) - - /* Wake up one thread. */ - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%esi, 0) - pushl %ebp - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebp, 0) - -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_futex(%ebx) - sete %cl - je 8f - - movl dep_mutex-cond_futex(%ebx), %edx - /* Requeue to a non-robust PI mutex if the PI bit is set and - the robust bit is not set. */ - movl MUTEX_KIND(%edx), %eax - andl $(ROBUST_BIT|PI_BIT), %eax - cmpl $PI_BIT, %eax - je 9f - -8: subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE_OP, %ecx - movl $SYS_futex, %eax - movl $1, %edx - movl $1, %esi - movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp - /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for - sysenter. - ENTER_KERNEL */ - int $0x80 - popl %ebp - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebp) - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - - /* For any kind of error, we try again with WAKE. - The general test also covers running on old kernels. */ - cmpl $-4095, %eax - jae 7f - -6: xorl %eax, %eax - popl %edi - cfi_adjust_cfa_offset(-4) - cfi_restore(%edi) - popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - ret - - cfi_restore_state - -9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx - movl $SYS_futex, %eax - movl $1, %edx - xorl %esi, %esi - movl dep_mutex-cond_futex(%ebx), %edi - movl (%ebx), %ebp - /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for - sysenter. - ENTER_KERNEL */ - int $0x80 - popl %ebp - popl %esi - - leal -cond_futex(%ebx), %edi - - /* For any kind of error, we try again with WAKE. - The general test also covers running on old kernels. */ - cmpl $-4095, %eax - jb 4f - -7: -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - orl $FUTEX_WAKE, %ecx - - movl $SYS_futex, %eax - /* %edx should be 1 already from $FUTEX_WAKE_OP syscall. - movl $1, %edx */ - ENTER_KERNEL - - /* Unlock. Note that at this point %edi always points to - cond_lock. */ -4: LOCK - subl $1, (%edi) - je 6b - - /* Unlock in loop requires wakeup. */ -5: movl %edi, %eax -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_futex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - jmp 6b - - /* Initial locking failed. */ -1: -#if cond_lock == 0 - movl %edi, %edx -#else - leal cond_lock(%edi), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%edi) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - jmp 2b - - cfi_endproc - .size __pthread_cond_signal, .-__pthread_cond_signal -versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, - GLIBC_2_3_2) diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S deleted file mode 100644 index 130c090c9c..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S +++ /dev/null @@ -1,973 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include -#include -#include -#include -#include -#include - - .text - -/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, - const struct timespec *abstime) */ - .globl __pthread_cond_timedwait - .type __pthread_cond_timedwait, @function - .align 16 -__pthread_cond_timedwait: -.LSTARTCODE: - cfi_startproc -#ifdef SHARED - cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, - DW.ref.__gcc_personality_v0) - cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) -#else - cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) - cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) -#endif - - pushl %ebp - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebp, 0) - pushl %edi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%edi, 0) - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%esi, 0) - pushl %ebx - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebx, 0) - - movl 20(%esp), %ebx - movl 28(%esp), %ebp - - LIBC_PROBE (cond_timedwait, 3, %ebx, 24(%esp), %ebp) - - cmpl $1000000000, 4(%ebp) - movl $EINVAL, %eax - jae 18f - - /* Stack frame: - - esp + 32 - +--------------------------+ - esp + 24 | timeout value | - +--------------------------+ - esp + 20 | futex pointer | - +--------------------------+ - esp + 16 | pi-requeued flag | - +--------------------------+ - esp + 12 | old broadcast_seq value | - +--------------------------+ - esp + 4 | old wake_seq value | - +--------------------------+ - esp + 0 | old cancellation mode | - +--------------------------+ - */ - -#ifndef __ASSUME_FUTEX_CLOCK_REALTIME -# ifdef PIC - LOAD_PIC_REG (cx) - cmpl $0, __have_futex_clock_realtime@GOTOFF(%ecx) -# else - cmpl $0, __have_futex_clock_realtime -# endif - je .Lreltmo -#endif - - /* Get internal lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%ebx) -#else - cmpxchgl %edx, cond_lock(%ebx) -#endif - jnz 1f - - /* Store the reference to the mutex. If there is already a - different value in there this is a bad user bug. */ -2: cmpl $-1, dep_mutex(%ebx) - movl 24(%esp), %eax - je 17f - movl %eax, dep_mutex(%ebx) - - /* Unlock the mutex. */ -17: xorl %edx, %edx - call __pthread_mutex_unlock_usercnt - - testl %eax, %eax - jne 16f - - addl $1, total_seq(%ebx) - adcl $0, total_seq+4(%ebx) - addl $1, cond_futex(%ebx) - addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) - -#ifdef __ASSUME_FUTEX_CLOCK_REALTIME -# define FRAME_SIZE 24 -#else -# define FRAME_SIZE 32 -#endif - subl $FRAME_SIZE, %esp - cfi_adjust_cfa_offset(FRAME_SIZE) - cfi_remember_state - - /* Get and store current wakeup_seq value. */ - movl wakeup_seq(%ebx), %edi - movl wakeup_seq+4(%ebx), %edx - movl broadcast_seq(%ebx), %eax - movl %edi, 4(%esp) - movl %edx, 8(%esp) - movl %eax, 12(%esp) - - /* Reset the pi-requeued flag. */ - movl $0, 16(%esp) - - cmpl $0, (%ebp) - movl $-ETIMEDOUT, %esi - js 6f - -8: movl cond_futex(%ebx), %edi - movl %edi, 20(%esp) - - /* Unlock. */ - LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - jne 3f - -.LcleanupSTART: -4: call __pthread_enable_asynccancel - movl %eax, (%esp) - - leal (%ebp), %esi -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - sete %cl - je 40f - - movl dep_mutex(%ebx), %edi - /* Requeue to a non-robust PI mutex if the PI bit is set and - the robust bit is not set. */ - movl MUTEX_KIND(%edi), %eax - andl $(ROBUST_BIT|PI_BIT), %eax - cmpl $PI_BIT, %eax - jne 40f - - movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx - /* The following only works like this because we only support - two clocks, represented using a single bit. */ - testl $1, cond_nwaiters(%ebx) - /* XXX Need to implement using sete instead of a jump. */ - jne 42f - orl $FUTEX_CLOCK_REALTIME, %ecx - -42: movl 20(%esp), %edx - addl $cond_futex, %ebx -.Ladd_cond_futex_pi: - movl $SYS_futex, %eax - ENTER_KERNEL - subl $cond_futex, %ebx -.Lsub_cond_futex_pi: - movl %eax, %esi - /* Set the pi-requeued flag only if the kernel has returned 0. The - kernel does not hold the mutex on ETIMEDOUT or any other error. */ - cmpl $0, %eax - sete 16(%esp) - je 41f - - /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns - successfully, it has already locked the mutex for us and the - pi_flag (16(%esp)) is set to denote that fact. However, if another - thread changed the futex value before we entered the wait, the - syscall may return an EAGAIN and the mutex is not locked. We go - ahead with a success anyway since later we look at the pi_flag to - decide if we got the mutex or not. The sequence numbers then make - sure that only one of the threads actually wake up. We retry using - normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal - and PI futexes don't mix. - - Note that we don't check for EAGAIN specifically; we assume that the - only other error the futex function could return is EAGAIN (barring - the ETIMEOUT of course, for the timeout case in futex) since - anything else would mean an error in our function. It is too - expensive to do that check for every call (which is quite common in - case of a large number of threads), so it has been skipped. */ - cmpl $-ENOSYS, %eax - jne 41f - xorl %ecx, %ecx - -40: subl $1, %ecx - movl $0, 16(%esp) -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAIT_BITSET, %ecx - /* The following only works like this because we only support - two clocks, represented using a single bit. */ - testl $1, cond_nwaiters(%ebx) - jne 30f - orl $FUTEX_CLOCK_REALTIME, %ecx -30: - movl 20(%esp), %edx - movl $0xffffffff, %ebp - addl $cond_futex, %ebx -.Ladd_cond_futex: - movl $SYS_futex, %eax - ENTER_KERNEL - subl $cond_futex, %ebx -.Lsub_cond_futex: - movl 28+FRAME_SIZE(%esp), %ebp - movl %eax, %esi - -41: movl (%esp), %eax - call __pthread_disable_asynccancel -.LcleanupEND: - - /* Lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%ebx) -#else - cmpxchgl %edx, cond_lock(%ebx) -#endif - jnz 5f - -6: movl broadcast_seq(%ebx), %eax - cmpl 12(%esp), %eax - jne 23f - - movl woken_seq(%ebx), %eax - movl woken_seq+4(%ebx), %ecx - - movl wakeup_seq(%ebx), %edi - movl wakeup_seq+4(%ebx), %edx - - cmpl 8(%esp), %edx - jne 7f - cmpl 4(%esp), %edi - je 15f - -7: cmpl %ecx, %edx - jne 9f - cmp %eax, %edi - jne 9f - -15: cmpl $-ETIMEDOUT, %esi - je 28f - - /* We need to go back to futex_wait. If we're using requeue_pi, then - release the mutex we had acquired and go back. */ - movl 16(%esp), %edx - test %edx, %edx - jz 8b - - /* Adjust the mutex values first and then unlock it. The unlock - should always succeed or else the kernel did not lock the mutex - correctly. */ - movl dep_mutex(%ebx), %eax - call __pthread_mutex_cond_lock_adjust - xorl %edx, %edx - call __pthread_mutex_unlock_usercnt - jmp 8b - -28: addl $1, wakeup_seq(%ebx) - adcl $0, wakeup_seq+4(%ebx) - addl $1, cond_futex(%ebx) - movl $ETIMEDOUT, %esi - jmp 14f - -23: xorl %esi, %esi - jmp 24f - -9: xorl %esi, %esi -14: addl $1, woken_seq(%ebx) - adcl $0, woken_seq+4(%ebx) - -24: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) - - /* Wake up a thread which wants to destroy the condvar object. */ - movl total_seq(%ebx), %eax - andl total_seq+4(%ebx), %eax - cmpl $0xffffffff, %eax - jne 25f - movl cond_nwaiters(%ebx), %eax - andl $~((1 << nwaiters_shift) - 1), %eax - jne 25f - - addl $cond_nwaiters, %ebx - movl $SYS_futex, %eax -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_nwaiters(%ebx) - sete %cl - subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE, %ecx - movl $1, %edx - ENTER_KERNEL - subl $cond_nwaiters, %ebx - -25: LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - jne 10f - -11: movl 24+FRAME_SIZE(%esp), %eax - /* With requeue_pi, the mutex lock is held in the kernel. */ - movl 16(%esp), %ecx - testl %ecx, %ecx - jnz 27f - - call __pthread_mutex_cond_lock -26: addl $FRAME_SIZE, %esp - cfi_adjust_cfa_offset(-FRAME_SIZE) - - /* We return the result of the mutex_lock operation if it failed. */ - testl %eax, %eax -#ifdef HAVE_CMOV - cmovel %esi, %eax -#else - jne 22f - movl %esi, %eax -22: -#endif - -18: popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %edi - cfi_adjust_cfa_offset(-4) - cfi_restore(%edi) - popl %ebp - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebp) - - ret - - cfi_restore_state - -27: call __pthread_mutex_cond_lock_adjust - xorl %eax, %eax - jmp 26b - - cfi_adjust_cfa_offset(-FRAME_SIZE); - /* Initial locking failed. */ -1: -#if cond_lock == 0 - movl %ebx, %edx -#else - leal cond_lock(%ebx), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - jmp 2b - - /* The initial unlocking of the mutex failed. */ -16: - LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - jne 18b - - movl %eax, %esi -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - - movl %esi, %eax - jmp 18b - - cfi_adjust_cfa_offset(FRAME_SIZE) - - /* Unlock in loop requires wakeup. */ -3: -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - jmp 4b - - /* Locking in loop failed. */ -5: -#if cond_lock == 0 - movl %ebx, %edx -#else - leal cond_lock(%ebx), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - jmp 6b - - /* Unlock after loop requires wakeup. */ -10: -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - jmp 11b - -#ifndef __ASSUME_FUTEX_CLOCK_REALTIME - cfi_adjust_cfa_offset(-FRAME_SIZE) -.Lreltmo: - /* Get internal lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -# if cond_lock == 0 - cmpxchgl %edx, (%ebx) -# else - cmpxchgl %edx, cond_lock(%ebx) -# endif - jnz 101f - - /* Store the reference to the mutex. If there is already a - different value in there this is a bad user bug. */ -102: cmpl $-1, dep_mutex(%ebx) - movl 24(%esp), %eax - je 117f - movl %eax, dep_mutex(%ebx) - - /* Unlock the mutex. */ -117: xorl %edx, %edx - call __pthread_mutex_unlock_usercnt - - testl %eax, %eax - jne 16b - - addl $1, total_seq(%ebx) - adcl $0, total_seq+4(%ebx) - addl $1, cond_futex(%ebx) - addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) - - subl $FRAME_SIZE, %esp - cfi_adjust_cfa_offset(FRAME_SIZE) - - /* Get and store current wakeup_seq value. */ - movl wakeup_seq(%ebx), %edi - movl wakeup_seq+4(%ebx), %edx - movl broadcast_seq(%ebx), %eax - movl %edi, 4(%esp) - movl %edx, 8(%esp) - movl %eax, 12(%esp) - - /* Reset the pi-requeued flag. */ - movl $0, 16(%esp) - - /* Get the current time. */ -108: movl %ebx, %edx -# ifdef __NR_clock_gettime - /* Get the clock number. */ - movl cond_nwaiters(%ebx), %ebx - andl $((1 << nwaiters_shift) - 1), %ebx - /* Only clocks 0 and 1 are allowed so far. Both are handled in the - kernel. */ - leal 24(%esp), %ecx - movl $__NR_clock_gettime, %eax - ENTER_KERNEL - movl %edx, %ebx - - /* Compute relative timeout. */ - movl (%ebp), %ecx - movl 4(%ebp), %edx - subl 24(%esp), %ecx - subl 28(%esp), %edx -# else - /* Get the current time. */ - leal 24(%esp), %ebx - xorl %ecx, %ecx - movl $__NR_gettimeofday, %eax - ENTER_KERNEL - movl %edx, %ebx - - /* Compute relative timeout. */ - movl 28(%esp), %eax - movl $1000, %edx - mul %edx /* Milli seconds to nano seconds. */ - movl (%ebp), %ecx - movl 4(%ebp), %edx - subl 24(%esp), %ecx - subl %eax, %edx -# endif - jns 112f - addl $1000000000, %edx - subl $1, %ecx -112: testl %ecx, %ecx - movl $-ETIMEDOUT, %esi - js 106f - - /* Store relative timeout. */ -121: movl %ecx, 24(%esp) - movl %edx, 28(%esp) - - movl cond_futex(%ebx), %edi - movl %edi, 20(%esp) - - /* Unlock. */ - LOCK -# if cond_lock == 0 - subl $1, (%ebx) -# else - subl $1, cond_lock(%ebx) -# endif - jne 103f - -.LcleanupSTART2: -104: call __pthread_enable_asynccancel - movl %eax, (%esp) - - leal 24(%esp), %esi -# if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -# endif - cmpl $-1, dep_mutex(%ebx) - sete %cl - subl $1, %ecx -# ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -# else - andl %gs:PRIVATE_FUTEX, %ecx -# endif -# if FUTEX_WAIT != 0 - addl $FUTEX_WAIT, %ecx -# endif - movl 20(%esp), %edx - addl $cond_futex, %ebx -.Ladd_cond_futex2: - movl $SYS_futex, %eax - ENTER_KERNEL - subl $cond_futex, %ebx -.Lsub_cond_futex2: - movl %eax, %esi - -141: movl (%esp), %eax - call __pthread_disable_asynccancel -.LcleanupEND2: - - - /* Lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -# if cond_lock == 0 - cmpxchgl %edx, (%ebx) -# else - cmpxchgl %edx, cond_lock(%ebx) -# endif - jnz 105f - -106: movl broadcast_seq(%ebx), %eax - cmpl 12(%esp), %eax - jne 23b - - movl woken_seq(%ebx), %eax - movl woken_seq+4(%ebx), %ecx - - movl wakeup_seq(%ebx), %edi - movl wakeup_seq+4(%ebx), %edx - - cmpl 8(%esp), %edx - jne 107f - cmpl 4(%esp), %edi - je 115f - -107: cmpl %ecx, %edx - jne 9b - cmp %eax, %edi - jne 9b - -115: cmpl $-ETIMEDOUT, %esi - je 28b - - jmp 8b - - cfi_adjust_cfa_offset(-FRAME_SIZE) - /* Initial locking failed. */ -101: -# if cond_lock == 0 - movl %ebx, %edx -# else - leal cond_lock(%ebx), %edx -# endif -# if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -# endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -# if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -# endif - call __lll_lock_wait - jmp 102b - - cfi_adjust_cfa_offset(FRAME_SIZE) - - /* Unlock in loop requires wakeup. */ -103: -# if cond_lock == 0 - movl %ebx, %eax -# else - leal cond_lock(%ebx), %eax -# endif -# if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -# endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -# if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -# endif - call __lll_unlock_wake - jmp 104b - - /* Locking in loop failed. */ -105: -# if cond_lock == 0 - movl %ebx, %edx -# else - leal cond_lock(%ebx), %edx -# endif -# if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -# endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -# if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -# endif - call __lll_lock_wait - jmp 106b -#endif - - .size __pthread_cond_timedwait, .-__pthread_cond_timedwait -versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, - GLIBC_2_3_2) - - - .type __condvar_tw_cleanup2, @function -__condvar_tw_cleanup2: - subl $cond_futex, %ebx - .size __condvar_tw_cleanup2, .-__condvar_tw_cleanup2 - .type __condvar_tw_cleanup, @function -__condvar_tw_cleanup: - movl %eax, %esi - - /* Get internal lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%ebx) -#else - cmpxchgl %edx, cond_lock(%ebx) -#endif - jz 1f - -#if cond_lock == 0 - movl %ebx, %edx -#else - leal cond_lock(%ebx), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - -1: movl broadcast_seq(%ebx), %eax - cmpl 12(%esp), %eax - jne 3f - - /* We increment the wakeup_seq counter only if it is lower than - total_seq. If this is not the case the thread was woken and - then canceled. In this case we ignore the signal. */ - movl total_seq(%ebx), %eax - movl total_seq+4(%ebx), %edi - cmpl wakeup_seq+4(%ebx), %edi - jb 6f - ja 7f - cmpl wakeup_seq(%ebx), %eax - jbe 7f - -6: addl $1, wakeup_seq(%ebx) - adcl $0, wakeup_seq+4(%ebx) - addl $1, cond_futex(%ebx) - -7: addl $1, woken_seq(%ebx) - adcl $0, woken_seq+4(%ebx) - -3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) - - /* Wake up a thread which wants to destroy the condvar object. */ - xorl %edi, %edi - movl total_seq(%ebx), %eax - andl total_seq+4(%ebx), %eax - cmpl $0xffffffff, %eax - jne 4f - movl cond_nwaiters(%ebx), %eax - andl $~((1 << nwaiters_shift) - 1), %eax - jne 4f - - addl $cond_nwaiters, %ebx - movl $SYS_futex, %eax -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_nwaiters(%ebx) - sete %cl - subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE, %ecx - movl $1, %edx - ENTER_KERNEL - subl $cond_nwaiters, %ebx - movl $1, %edi - -4: LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - je 2f - -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - - /* Wake up all waiters to make sure no signal gets lost. */ -2: testl %edi, %edi - jnz 5f - addl $cond_futex, %ebx -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_futex(%ebx) - sete %cl - subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE, %ecx - movl $SYS_futex, %eax - movl $0x7fffffff, %edx - ENTER_KERNEL - - /* Lock the mutex only if we don't own it already. This only happens - in case of PI mutexes, if we got cancelled after a successful - return of the futex syscall and before disabling async - cancellation. */ -5: movl 24+FRAME_SIZE(%esp), %eax - movl MUTEX_KIND(%eax), %ebx - andl $(ROBUST_BIT|PI_BIT), %ebx - cmpl $PI_BIT, %ebx - jne 8f - - movl (%eax), %ebx - andl $TID_MASK, %ebx - cmpl %ebx, %gs:TID - jne 8f - /* We managed to get the lock. Fix it up before returning. */ - call __pthread_mutex_cond_lock_adjust - jmp 9f - -8: call __pthread_mutex_cond_lock - -9: movl %esi, (%esp) -.LcallUR: - call _Unwind_Resume - hlt -.LENDCODE: - cfi_endproc - .size __condvar_tw_cleanup, .-__condvar_tw_cleanup - - - .section .gcc_except_table,"a",@progbits -.LexceptSTART: - .byte DW_EH_PE_omit # @LPStart format (omit) - .byte DW_EH_PE_omit # @TType format (omit) - .byte DW_EH_PE_sdata4 # call-site format - # DW_EH_PE_sdata4 - .uleb128 .Lcstend-.Lcstbegin -.Lcstbegin: - .long .LcleanupSTART-.LSTARTCODE - .long .Ladd_cond_futex_pi-.LcleanupSTART - .long __condvar_tw_cleanup-.LSTARTCODE - .uleb128 0 - .long .Ladd_cond_futex_pi-.LSTARTCODE - .long .Lsub_cond_futex_pi-.Ladd_cond_futex_pi - .long __condvar_tw_cleanup2-.LSTARTCODE - .uleb128 0 - .long .Lsub_cond_futex_pi-.LSTARTCODE - .long .Ladd_cond_futex-.Lsub_cond_futex_pi - .long __condvar_tw_cleanup-.LSTARTCODE - .uleb128 0 - .long .Ladd_cond_futex-.LSTARTCODE - .long .Lsub_cond_futex-.Ladd_cond_futex - .long __condvar_tw_cleanup2-.LSTARTCODE - .uleb128 0 - .long .Lsub_cond_futex-.LSTARTCODE - .long .LcleanupEND-.Lsub_cond_futex - .long __condvar_tw_cleanup-.LSTARTCODE - .uleb128 0 -#ifndef __ASSUME_FUTEX_CLOCK_REALTIME - .long .LcleanupSTART2-.LSTARTCODE - .long .Ladd_cond_futex2-.LcleanupSTART2 - .long __condvar_tw_cleanup-.LSTARTCODE - .uleb128 0 - .long .Ladd_cond_futex2-.LSTARTCODE - .long .Lsub_cond_futex2-.Ladd_cond_futex2 - .long __condvar_tw_cleanup2-.LSTARTCODE - .uleb128 0 - .long .Lsub_cond_futex2-.LSTARTCODE - .long .LcleanupEND2-.Lsub_cond_futex2 - .long __condvar_tw_cleanup-.LSTARTCODE - .uleb128 0 -#endif - .long .LcallUR-.LSTARTCODE - .long .LENDCODE-.LcallUR - .long 0 - .uleb128 0 -.Lcstend: - - -#ifdef SHARED - .hidden DW.ref.__gcc_personality_v0 - .weak DW.ref.__gcc_personality_v0 - .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits - .align 4 - .type DW.ref.__gcc_personality_v0, @object - .size DW.ref.__gcc_personality_v0, 4 -DW.ref.__gcc_personality_v0: - .long __gcc_personality_v0 -#endif diff --git a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S deleted file mode 100644 index ec3538f561..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S +++ /dev/null @@ -1,641 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - - - .text - -/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */ - .globl __pthread_cond_wait - .type __pthread_cond_wait, @function - .align 16 -__pthread_cond_wait: -.LSTARTCODE: - cfi_startproc -#ifdef SHARED - cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, - DW.ref.__gcc_personality_v0) - cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) -#else - cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) - cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) -#endif - - pushl %ebp - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebp, 0) - pushl %edi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%edi, 0) - pushl %esi - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%esi, 0) - pushl %ebx - cfi_adjust_cfa_offset(4) - cfi_rel_offset(%ebx, 0) - - xorl %esi, %esi - movl 20(%esp), %ebx - - LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx) - - /* Get internal lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%ebx) -#else - cmpxchgl %edx, cond_lock(%ebx) -#endif - jnz 1f - - /* Store the reference to the mutex. If there is already a - different value in there this is a bad user bug. */ -2: cmpl $-1, dep_mutex(%ebx) - movl 24(%esp), %eax - je 15f - movl %eax, dep_mutex(%ebx) - - /* Unlock the mutex. */ -15: xorl %edx, %edx - call __pthread_mutex_unlock_usercnt - - testl %eax, %eax - jne 12f - - addl $1, total_seq(%ebx) - adcl $0, total_seq+4(%ebx) - addl $1, cond_futex(%ebx) - addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) - -#define FRAME_SIZE 20 - subl $FRAME_SIZE, %esp - cfi_adjust_cfa_offset(FRAME_SIZE) - cfi_remember_state - - /* Get and store current wakeup_seq value. */ - movl wakeup_seq(%ebx), %edi - movl wakeup_seq+4(%ebx), %edx - movl broadcast_seq(%ebx), %eax - movl %edi, 4(%esp) - movl %edx, 8(%esp) - movl %eax, 12(%esp) - - /* Reset the pi-requeued flag. */ -8: movl $0, 16(%esp) - movl cond_futex(%ebx), %ebp - - /* Unlock. */ - LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - jne 3f - -.LcleanupSTART: -4: call __pthread_enable_asynccancel - movl %eax, (%esp) - - xorl %ecx, %ecx - cmpl $-1, dep_mutex(%ebx) - sete %cl - je 18f - - movl dep_mutex(%ebx), %edi - /* Requeue to a non-robust PI mutex if the PI bit is set and - the robust bit is not set. */ - movl MUTEX_KIND(%edi), %eax - andl $(ROBUST_BIT|PI_BIT), %eax - cmpl $PI_BIT, %eax - jne 18f - - movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx - movl %ebp, %edx - xorl %esi, %esi - addl $cond_futex, %ebx -.Ladd_cond_futex_pi: - movl $SYS_futex, %eax - ENTER_KERNEL - subl $cond_futex, %ebx -.Lsub_cond_futex_pi: - /* Set the pi-requeued flag only if the kernel has returned 0. The - kernel does not hold the mutex on error. */ - cmpl $0, %eax - sete 16(%esp) - je 19f - - /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns - successfully, it has already locked the mutex for us and the - pi_flag (16(%esp)) is set to denote that fact. However, if another - thread changed the futex value before we entered the wait, the - syscall may return an EAGAIN and the mutex is not locked. We go - ahead with a success anyway since later we look at the pi_flag to - decide if we got the mutex or not. The sequence numbers then make - sure that only one of the threads actually wake up. We retry using - normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal - and PI futexes don't mix. - - Note that we don't check for EAGAIN specifically; we assume that the - only other error the futex function could return is EAGAIN since - anything else would mean an error in our function. It is too - expensive to do that check for every call (which is quite common in - case of a large number of threads), so it has been skipped. */ - cmpl $-ENOSYS, %eax - jne 19f - xorl %ecx, %ecx - -18: subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif -#if FUTEX_WAIT != 0 - addl $FUTEX_WAIT, %ecx -#endif - movl %ebp, %edx - addl $cond_futex, %ebx -.Ladd_cond_futex: - movl $SYS_futex, %eax - ENTER_KERNEL - subl $cond_futex, %ebx -.Lsub_cond_futex: - -19: movl (%esp), %eax - call __pthread_disable_asynccancel -.LcleanupEND: - - /* Lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%ebx) -#else - cmpxchgl %edx, cond_lock(%ebx) -#endif - jnz 5f - -6: movl broadcast_seq(%ebx), %eax - cmpl 12(%esp), %eax - jne 16f - - movl woken_seq(%ebx), %eax - movl woken_seq+4(%ebx), %ecx - - movl wakeup_seq(%ebx), %edi - movl wakeup_seq+4(%ebx), %edx - - cmpl 8(%esp), %edx - jne 7f - cmpl 4(%esp), %edi - je 22f - -7: cmpl %ecx, %edx - jne 9f - cmp %eax, %edi - je 22f - -9: addl $1, woken_seq(%ebx) - adcl $0, woken_seq+4(%ebx) - - /* Unlock */ -16: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) - - /* Wake up a thread which wants to destroy the condvar object. */ - movl total_seq(%ebx), %eax - andl total_seq+4(%ebx), %eax - cmpl $0xffffffff, %eax - jne 17f - movl cond_nwaiters(%ebx), %eax - andl $~((1 << nwaiters_shift) - 1), %eax - jne 17f - - addl $cond_nwaiters, %ebx - movl $SYS_futex, %eax -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_nwaiters(%ebx) - sete %cl - subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE, %ecx - movl $1, %edx - ENTER_KERNEL - subl $cond_nwaiters, %ebx - -17: LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - jne 10f - - /* With requeue_pi, the mutex lock is held in the kernel. */ -11: movl 24+FRAME_SIZE(%esp), %eax - movl 16(%esp), %ecx - testl %ecx, %ecx - jnz 21f - - call __pthread_mutex_cond_lock -20: addl $FRAME_SIZE, %esp - cfi_adjust_cfa_offset(-FRAME_SIZE); - -14: popl %ebx - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebx) - popl %esi - cfi_adjust_cfa_offset(-4) - cfi_restore(%esi) - popl %edi - cfi_adjust_cfa_offset(-4) - cfi_restore(%edi) - popl %ebp - cfi_adjust_cfa_offset(-4) - cfi_restore(%ebp) - - /* We return the result of the mutex_lock operation. */ - ret - - cfi_restore_state - -21: call __pthread_mutex_cond_lock_adjust - xorl %eax, %eax - jmp 20b - - cfi_adjust_cfa_offset(-FRAME_SIZE); - - /* We need to go back to futex_wait. If we're using requeue_pi, then - release the mutex we had acquired and go back. */ -22: movl 16(%esp), %edx - test %edx, %edx - jz 8b - - /* Adjust the mutex values first and then unlock it. The unlock - should always succeed or else the kernel did not lock the mutex - correctly. */ - movl dep_mutex(%ebx), %eax - call __pthread_mutex_cond_lock_adjust - xorl %edx, %edx - call __pthread_mutex_unlock_usercnt - jmp 8b - - /* Initial locking failed. */ -1: -#if cond_lock == 0 - movl %ebx, %edx -#else - leal cond_lock(%ebx), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - jmp 2b - - /* The initial unlocking of the mutex failed. */ -12: - LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - jne 14b - - movl %eax, %esi -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - - movl %esi, %eax - jmp 14b - - cfi_adjust_cfa_offset(FRAME_SIZE) - - /* Unlock in loop requires wakeup. */ -3: -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - jmp 4b - - /* Locking in loop failed. */ -5: -#if cond_lock == 0 - movl %ebx, %edx -#else - leal cond_lock(%ebx), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - jmp 6b - - /* Unlock after loop requires wakeup. */ -10: -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - jmp 11b - - .size __pthread_cond_wait, .-__pthread_cond_wait -versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, - GLIBC_2_3_2) - - - .type __condvar_w_cleanup2, @function -__condvar_w_cleanup2: - subl $cond_futex, %ebx - .size __condvar_w_cleanup2, .-__condvar_w_cleanup2 -.LSbl4: - .type __condvar_w_cleanup, @function -__condvar_w_cleanup: - movl %eax, %esi - - /* Get internal lock. */ - movl $1, %edx - xorl %eax, %eax - LOCK -#if cond_lock == 0 - cmpxchgl %edx, (%ebx) -#else - cmpxchgl %edx, cond_lock(%ebx) -#endif - jz 1f - -#if cond_lock == 0 - movl %ebx, %edx -#else - leal cond_lock(%ebx), %edx -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_lock_wait - -1: movl broadcast_seq(%ebx), %eax - cmpl 12(%esp), %eax - jne 3f - - /* We increment the wakeup_seq counter only if it is lower than - total_seq. If this is not the case the thread was woken and - then canceled. In this case we ignore the signal. */ - movl total_seq(%ebx), %eax - movl total_seq+4(%ebx), %edi - cmpl wakeup_seq+4(%ebx), %edi - jb 6f - ja 7f - cmpl wakeup_seq(%ebx), %eax - jbe 7f - -6: addl $1, wakeup_seq(%ebx) - adcl $0, wakeup_seq+4(%ebx) - addl $1, cond_futex(%ebx) - -7: addl $1, woken_seq(%ebx) - adcl $0, woken_seq+4(%ebx) - -3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) - - /* Wake up a thread which wants to destroy the condvar object. */ - xorl %edi, %edi - movl total_seq(%ebx), %eax - andl total_seq+4(%ebx), %eax - cmpl $0xffffffff, %eax - jne 4f - movl cond_nwaiters(%ebx), %eax - andl $~((1 << nwaiters_shift) - 1), %eax - jne 4f - - addl $cond_nwaiters, %ebx - movl $SYS_futex, %eax -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_nwaiters(%ebx) - sete %cl - subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE, %ecx - movl $1, %edx - ENTER_KERNEL - subl $cond_nwaiters, %ebx - movl $1, %edi - -4: LOCK -#if cond_lock == 0 - subl $1, (%ebx) -#else - subl $1, cond_lock(%ebx) -#endif - je 2f - -#if cond_lock == 0 - movl %ebx, %eax -#else - leal cond_lock(%ebx), %eax -#endif -#if (LLL_SHARED-LLL_PRIVATE) > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex(%ebx) - setne %cl - subl $1, %ecx - andl $(LLL_SHARED-LLL_PRIVATE), %ecx -#if LLL_PRIVATE != 0 - addl $LLL_PRIVATE, %ecx -#endif - call __lll_unlock_wake - - /* Wake up all waiters to make sure no signal gets lost. */ -2: testl %edi, %edi - jnz 5f - addl $cond_futex, %ebx -#if FUTEX_PRIVATE_FLAG > 255 - xorl %ecx, %ecx -#endif - cmpl $-1, dep_mutex-cond_futex(%ebx) - sete %cl - subl $1, %ecx -#ifdef __ASSUME_PRIVATE_FUTEX - andl $FUTEX_PRIVATE_FLAG, %ecx -#else - andl %gs:PRIVATE_FUTEX, %ecx -#endif - addl $FUTEX_WAKE, %ecx - movl $SYS_futex, %eax - movl $0x7fffffff, %edx - ENTER_KERNEL - - /* Lock the mutex only if we don't own it already. This only happens - in case of PI mutexes, if we got cancelled after a successful - return of the futex syscall and before disabling async - cancellation. */ -5: movl 24+FRAME_SIZE(%esp), %eax - movl MUTEX_KIND(%eax), %ebx - andl $(ROBUST_BIT|PI_BIT), %ebx - cmpl $PI_BIT, %ebx - jne 8f - - movl (%eax), %ebx - andl $TID_MASK, %ebx - cmpl %ebx, %gs:TID - jne 8f - /* We managed to get the lock. Fix it up before returning. */ - call __pthread_mutex_cond_lock_adjust - jmp 9f - -8: call __pthread_mutex_cond_lock - -9: movl %esi, (%esp) -.LcallUR: - call _Unwind_Resume - hlt -.LENDCODE: - cfi_endproc - .size __condvar_w_cleanup, .-__condvar_w_cleanup - - - .section .gcc_except_table,"a",@progbits -.LexceptSTART: - .byte DW_EH_PE_omit # @LPStart format (omit) - .byte DW_EH_PE_omit # @TType format (omit) - .byte DW_EH_PE_sdata4 # call-site format - # DW_EH_PE_sdata4 - .uleb128 .Lcstend-.Lcstbegin -.Lcstbegin: - .long .LcleanupSTART-.LSTARTCODE - .long .Ladd_cond_futex_pi-.LcleanupSTART - .long __condvar_w_cleanup-.LSTARTCODE - .uleb128 0 - .long .Ladd_cond_futex_pi-.LSTARTCODE - .long .Lsub_cond_futex_pi-.Ladd_cond_futex_pi - .long __condvar_w_cleanup2-.LSTARTCODE - .uleb128 0 - .long .Lsub_cond_futex_pi-.LSTARTCODE - .long .Ladd_cond_futex-.Lsub_cond_futex_pi - .long __condvar_w_cleanup-.LSTARTCODE - .uleb128 0 - .long .Ladd_cond_futex-.LSTARTCODE - .long .Lsub_cond_futex-.Ladd_cond_futex - .long __condvar_w_cleanup2-.LSTARTCODE - .uleb128 0 - .long .Lsub_cond_futex-.LSTARTCODE - .long .LcleanupEND-.Lsub_cond_futex - .long __condvar_w_cleanup-.LSTARTCODE - .uleb128 0 - .long .LcallUR-.LSTARTCODE - .long .LENDCODE-.LcallUR - .long 0 - .uleb128 0 -.Lcstend: - -#ifdef SHARED - .hidden DW.ref.__gcc_personality_v0 - .weak DW.ref.__gcc_personality_v0 - .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits - .align 4 - .type DW.ref.__gcc_personality_v0, @object - .size DW.ref.__gcc_personality_v0, 4 -DW.ref.__gcc_personality_v0: - .long __gcc_personality_v0 -#endif diff --git a/sysdeps/unix/sysv/linux/i386/i586/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i586/libc-lowlevellock.S deleted file mode 100644 index f567c1d6d5..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/libc-lowlevellock.S +++ /dev/null @@ -1 +0,0 @@ -#include "../i486/libc-lowlevellock.S" diff --git a/sysdeps/unix/sysv/linux/i386/i586/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/i386/i586/lll_timedlock_wait.c deleted file mode 100644 index fa4357bf5d..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/lll_timedlock_wait.c +++ /dev/null @@ -1 +0,0 @@ -#include "../i486/lll_timedlock_wait.c" diff --git a/sysdeps/unix/sysv/linux/i386/i586/lll_timedwait_tid.c b/sysdeps/unix/sysv/linux/i386/i586/lll_timedwait_tid.c deleted file mode 100644 index 1586104c08..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/lll_timedwait_tid.c +++ /dev/null @@ -1 +0,0 @@ -#include "../i486/lll_timedwait_tid.c" diff --git a/sysdeps/unix/sysv/linux/i386/i586/lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i586/lowlevellock.S deleted file mode 100644 index 80ae109f5d..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/lowlevellock.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/lowlevellock.S" diff --git a/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S deleted file mode 100644 index caa6615f5a..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/lowlevelrobustlock.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/lowlevelrobustlock.S" diff --git a/sysdeps/unix/sysv/linux/i386/i586/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/i386/i586/pthread_barrier_wait.S deleted file mode 100644 index d48f2bc375..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/pthread_barrier_wait.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_barrier_wait.S" diff --git a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S deleted file mode 100644 index 9a4006af40..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_broadcast.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_cond_broadcast.S" diff --git a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S deleted file mode 100644 index 59f93b6729..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_signal.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_cond_signal.S" diff --git a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S deleted file mode 100644 index d96af08649..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_timedwait.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_cond_timedwait.S" diff --git a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S deleted file mode 100644 index 9696972c74..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i586/pthread_cond_wait.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_cond_wait.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i686/libc-lowlevellock.S deleted file mode 100644 index f567c1d6d5..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/libc-lowlevellock.S +++ /dev/null @@ -1 +0,0 @@ -#include "../i486/libc-lowlevellock.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/i386/i686/lll_timedlock_wait.c deleted file mode 100644 index fa4357bf5d..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/lll_timedlock_wait.c +++ /dev/null @@ -1 +0,0 @@ -#include "../i486/lll_timedlock_wait.c" diff --git a/sysdeps/unix/sysv/linux/i386/i686/lll_timedwait_tid.c b/sysdeps/unix/sysv/linux/i386/i686/lll_timedwait_tid.c deleted file mode 100644 index 1586104c08..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/lll_timedwait_tid.c +++ /dev/null @@ -1 +0,0 @@ -#include "../i486/lll_timedwait_tid.c" diff --git a/sysdeps/unix/sysv/linux/i386/i686/lowlevellock.S b/sysdeps/unix/sysv/linux/i386/i686/lowlevellock.S deleted file mode 100644 index 80ae109f5d..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/lowlevellock.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/lowlevellock.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S deleted file mode 100644 index caa6615f5a..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/lowlevelrobustlock.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/lowlevelrobustlock.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/i386/i686/pthread_barrier_wait.S deleted file mode 100644 index d48f2bc375..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/pthread_barrier_wait.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2002-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2002. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_barrier_wait.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S deleted file mode 100644 index 9a4006af40..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_broadcast.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_cond_broadcast.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S deleted file mode 100644 index 59f93b6729..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_signal.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_cond_signal.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S index 0e8d7ff894..763144c6d6 100644 --- a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S +++ b/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_timedwait.S @@ -17,4 +17,4 @@ . */ #define HAVE_CMOV 1 -#include "../i486/pthread_cond_timedwait.S" +#include "../pthread_cond_timedwait.S" diff --git a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S deleted file mode 100644 index 9696972c74..0000000000 --- a/sysdeps/unix/sysv/linux/i386/i686/pthread_cond_wait.S +++ /dev/null @@ -1,19 +0,0 @@ -/* Copyright (C) 2003-2015 Free Software Foundation, Inc. - This file is part of the GNU C Library. - Contributed by Ulrich Drepper , 2003. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - . */ - -#include "../i486/pthread_cond_wait.S" diff --git a/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S b/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S new file mode 100644 index 0000000000..111e9c88ed --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/libc-lowlevellock.S @@ -0,0 +1,19 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include "lowlevellock.S" diff --git a/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c b/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c new file mode 100644 index 0000000000..f6875b8f89 --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/lll_timedlock_wait.c @@ -0,0 +1 @@ +/* __lll_timedlock_wait is in lowlevellock.S. */ diff --git a/sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c b/sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c new file mode 100644 index 0000000000..43900c6294 --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/lll_timedwait_tid.c @@ -0,0 +1 @@ +/* __lll_timedwait_tid is in lowlevellock.S. */ diff --git a/sysdeps/unix/sysv/linux/i386/lowlevellock.S b/sysdeps/unix/sysv/linux/i386/lowlevellock.S new file mode 100644 index 0000000000..94bb59b384 --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/lowlevellock.S @@ -0,0 +1,466 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include + +#include + + .text + +#ifdef __ASSUME_PRIVATE_FUTEX +# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ + movl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg +# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ + movl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg +# define LOAD_FUTEX_WAIT(reg) \ + xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg +# define LOAD_FUTEX_WAIT_ABS(reg) \ + xorl $(FUTEX_WAIT_BITSET | FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME), reg +# define LOAD_FUTEX_WAKE(reg) \ + xorl $(FUTEX_WAKE | FUTEX_PRIVATE_FLAG), reg +#else +# if FUTEX_WAIT == 0 +# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ + movl %gs:PRIVATE_FUTEX, reg +# else +# define LOAD_PRIVATE_FUTEX_WAIT(reg) \ + movl %gs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAIT, reg +# endif +# define LOAD_PRIVATE_FUTEX_WAKE(reg) \ + movl %gs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAKE, reg +# if FUTEX_WAIT == 0 +# define LOAD_FUTEX_WAIT(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %gs:PRIVATE_FUTEX, reg +# else +# define LOAD_FUTEX_WAIT(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %gs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAIT, reg +# endif +# define LOAD_FUTEX_WAIT_ABS(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %gs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAIT_BITSET | FUTEX_CLOCK_REALTIME, reg +# define LOAD_FUTEX_WAKE(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %gs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAKE, reg +#endif + + .globl __lll_lock_wait_private + .type __lll_lock_wait_private,@function + .hidden __lll_lock_wait_private + .align 16 +__lll_lock_wait_private: + cfi_startproc + pushl %edx + cfi_adjust_cfa_offset(4) + pushl %ebx + cfi_adjust_cfa_offset(4) + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_offset(%edx, -8) + cfi_offset(%ebx, -12) + cfi_offset(%esi, -16) + + movl $2, %edx + movl %ecx, %ebx + xorl %esi, %esi /* No timeout. */ + LOAD_PRIVATE_FUTEX_WAIT (%ecx) + + cmpl %edx, %eax /* NB: %edx == 2 */ + jne 2f + +1: LIBC_PROBE (lll_lock_wait_private, 1, %ebx) + movl $SYS_futex, %eax + ENTER_KERNEL + +2: movl %edx, %eax + xchgl %eax, (%ebx) /* NB: lock is implied */ + + testl %eax, %eax + jnz 1b + + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %edx + cfi_adjust_cfa_offset(-4) + cfi_restore(%edx) + ret + cfi_endproc + .size __lll_lock_wait_private,.-__lll_lock_wait_private + +#if !IS_IN (libc) + .globl __lll_lock_wait + .type __lll_lock_wait,@function + .hidden __lll_lock_wait + .align 16 +__lll_lock_wait: + cfi_startproc + pushl %edx + cfi_adjust_cfa_offset(4) + pushl %ebx + cfi_adjust_cfa_offset(4) + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_offset(%edx, -8) + cfi_offset(%ebx, -12) + cfi_offset(%esi, -16) + + movl %edx, %ebx + movl $2, %edx + xorl %esi, %esi /* No timeout. */ + LOAD_FUTEX_WAIT (%ecx) + + cmpl %edx, %eax /* NB: %edx == 2 */ + jne 2f + +1: movl $SYS_futex, %eax + ENTER_KERNEL + +2: movl %edx, %eax + xchgl %eax, (%ebx) /* NB: lock is implied */ + + testl %eax, %eax + jnz 1b + + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %edx + cfi_adjust_cfa_offset(-4) + cfi_restore(%edx) + ret + cfi_endproc + .size __lll_lock_wait,.-__lll_lock_wait + + /* %ecx: futex + %esi: flags + %edx: timeout + %eax: futex value + */ + .globl __lll_timedlock_wait + .type __lll_timedlock_wait,@function + .hidden __lll_timedlock_wait + .align 16 +__lll_timedlock_wait: + cfi_startproc + pushl %ebp + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebp, 0) + pushl %ebx + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebx, 0) + +# ifndef __ASSUME_FUTEX_CLOCK_REALTIME +# ifdef PIC + LOAD_PIC_REG (bx) + cmpl $0, __have_futex_clock_realtime@GOTOFF(%ebx) +# else + cmpl $0, __have_futex_clock_realtime +# endif + je .Lreltmo +# endif + + cmpl $0, (%edx) + js 8f + + movl %ecx, %ebx + movl %esi, %ecx + movl %edx, %esi + movl $0xffffffff, %ebp + LOAD_FUTEX_WAIT_ABS (%ecx) + + movl $2, %edx + cmpl %edx, %eax + jne 2f + +1: movl $SYS_futex, %eax + movl $2, %edx + ENTER_KERNEL + +2: xchgl %edx, (%ebx) /* NB: lock is implied */ + + testl %edx, %edx + jz 3f + + cmpl $-ETIMEDOUT, %eax + je 4f + cmpl $-EINVAL, %eax + jne 1b +4: movl %eax, %edx + negl %edx + +3: movl %edx, %eax +7: popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %ebp + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebp) + ret + +8: movl $ETIMEDOUT, %eax + jmp 7b + +# ifndef __ASSUME_FUTEX_CLOCK_REALTIME +.Lreltmo: + /* Check for a valid timeout value. */ + cmpl $1000000000, 4(%edx) + jae 3f + + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%esi, 0) + pushl %edi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%edi, 0) + + /* Stack frame for the timespec and timeval structs. */ + subl $8, %esp + cfi_adjust_cfa_offset(8) + + movl %ecx, %ebp + movl %edx, %edi + + movl $2, %edx + xchgl %edx, (%ebp) + + test %edx, %edx + je 6f + +1: + /* Get current time. */ + movl %esp, %ebx + xorl %ecx, %ecx + movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ + movl 4(%esp), %eax + movl $1000, %edx + mul %edx /* Milli seconds to nano seconds. */ + movl (%edi), %ecx + movl 4(%edi), %edx + subl (%esp), %ecx + subl %eax, %edx + jns 4f + addl $1000000000, %edx + subl $1, %ecx +4: testl %ecx, %ecx + js 2f /* Time is already up. */ + + /* Store relative timeout. */ + movl %ecx, (%esp) + movl %edx, 4(%esp) + + /* Futex call. */ + movl %ebp, %ebx + movl $2, %edx + movl %esp, %esi + movl 16(%esp), %ecx + LOAD_FUTEX_WAIT (%ecx) + movl $SYS_futex, %eax + ENTER_KERNEL + + /* NB: %edx == 2 */ + xchgl %edx, (%ebp) + + testl %edx, %edx + je 6f + + cmpl $-ETIMEDOUT, %eax + jne 1b +2: movl $ETIMEDOUT, %edx + +6: addl $8, %esp + cfi_adjust_cfa_offset(-8) + popl %edi + cfi_adjust_cfa_offset(-4) + cfi_restore(%edi) + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) +7: popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %ebp + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebp) + movl %edx, %eax + ret + +3: movl $EINVAL, %edx + jmp 7b +# endif + cfi_endproc + .size __lll_timedlock_wait,.-__lll_timedlock_wait +#endif + + .globl __lll_unlock_wake_private + .type __lll_unlock_wake_private,@function + .hidden __lll_unlock_wake_private + .align 16 +__lll_unlock_wake_private: + cfi_startproc + pushl %ebx + cfi_adjust_cfa_offset(4) + pushl %ecx + cfi_adjust_cfa_offset(4) + pushl %edx + cfi_adjust_cfa_offset(4) + cfi_offset(%ebx, -8) + cfi_offset(%ecx, -12) + cfi_offset(%edx, -16) + + movl %eax, %ebx + movl $0, (%eax) + LOAD_PRIVATE_FUTEX_WAKE (%ecx) + movl $1, %edx /* Wake one thread. */ + movl $SYS_futex, %eax + ENTER_KERNEL + + popl %edx + cfi_adjust_cfa_offset(-4) + cfi_restore(%edx) + popl %ecx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ecx) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + ret + cfi_endproc + .size __lll_unlock_wake_private,.-__lll_unlock_wake_private + +#if !IS_IN (libc) + .globl __lll_unlock_wake + .type __lll_unlock_wake,@function + .hidden __lll_unlock_wake + .align 16 +__lll_unlock_wake: + cfi_startproc + pushl %ebx + cfi_adjust_cfa_offset(4) + pushl %ecx + cfi_adjust_cfa_offset(4) + pushl %edx + cfi_adjust_cfa_offset(4) + cfi_offset(%ebx, -8) + cfi_offset(%ecx, -12) + cfi_offset(%edx, -16) + + movl %eax, %ebx + movl $0, (%eax) + LOAD_FUTEX_WAKE (%ecx) + movl $1, %edx /* Wake one thread. */ + movl $SYS_futex, %eax + ENTER_KERNEL + + popl %edx + cfi_adjust_cfa_offset(-4) + cfi_restore(%edx) + popl %ecx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ecx) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + ret + cfi_endproc + .size __lll_unlock_wake,.-__lll_unlock_wake + + .globl __lll_timedwait_tid + .type __lll_timedwait_tid,@function + .hidden __lll_timedwait_tid + .align 16 +__lll_timedwait_tid: + pushl %edi + pushl %esi + pushl %ebx + pushl %ebp + + movl %eax, %ebp + movl %edx, %edi + subl $8, %esp + + /* Get current time. */ +2: movl %esp, %ebx + xorl %ecx, %ecx + movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ + movl 4(%esp), %eax + movl $1000, %edx + mul %edx /* Milli seconds to nano seconds. */ + movl (%edi), %ecx + movl 4(%edi), %edx + subl (%esp), %ecx + subl %eax, %edx + jns 5f + addl $1000000000, %edx + subl $1, %ecx +5: testl %ecx, %ecx + js 6f /* Time is already up. */ + + movl %ecx, (%esp) /* Store relative timeout. */ + movl %edx, 4(%esp) + + movl (%ebp), %edx + testl %edx, %edx + jz 4f + + movl %esp, %esi + /* XXX The kernel so far uses global futex for the wakeup at + all times. */ + xorl %ecx, %ecx /* movl $FUTEX_WAIT, %ecx */ + movl %ebp, %ebx + movl $SYS_futex, %eax + ENTER_KERNEL + + cmpl $0, (%ebx) + jne 1f +4: xorl %eax, %eax + +3: addl $8, %esp + popl %ebp + popl %ebx + popl %esi + popl %edi + ret + +1: cmpl $-ETIMEDOUT, %eax + jne 2b +6: movl $ETIMEDOUT, %eax + jmp 3b + .size __lll_timedwait_tid,.-__lll_timedwait_tid +#endif diff --git a/sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S b/sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S new file mode 100644 index 0000000000..aefe77cce9 --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/lowlevelrobustlock.S @@ -0,0 +1,232 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include + + .text + +#define FUTEX_WAITERS 0x80000000 +#define FUTEX_OWNER_DIED 0x40000000 + +#ifdef __ASSUME_PRIVATE_FUTEX +# define LOAD_FUTEX_WAIT(reg) \ + xorl $(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg +#else +# if FUTEX_WAIT == 0 +# define LOAD_FUTEX_WAIT(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %gs:PRIVATE_FUTEX, reg +# else +# define LOAD_FUTEX_WAIT(reg) \ + xorl $FUTEX_PRIVATE_FLAG, reg ; \ + andl %gs:PRIVATE_FUTEX, reg ; \ + orl $FUTEX_WAIT, reg +# endif +#endif + + .globl __lll_robust_lock_wait + .type __lll_robust_lock_wait,@function + .hidden __lll_robust_lock_wait + .align 16 +__lll_robust_lock_wait: + cfi_startproc + pushl %edx + cfi_adjust_cfa_offset(4) + pushl %ebx + cfi_adjust_cfa_offset(4) + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_offset(%edx, -8) + cfi_offset(%ebx, -12) + cfi_offset(%esi, -16) + + movl %edx, %ebx + xorl %esi, %esi /* No timeout. */ + LOAD_FUTEX_WAIT (%ecx) + +4: movl %eax, %edx + orl $FUTEX_WAITERS, %edx + + testl $FUTEX_OWNER_DIED, %eax + jnz 3f + + cmpl %edx, %eax /* NB: %edx == 2 */ + je 1f + + LOCK + cmpxchgl %edx, (%ebx) + jnz 2f + +1: movl $SYS_futex, %eax + ENTER_KERNEL + + movl (%ebx), %eax + +2: test %eax, %eax + jne 4b + + movl %gs:TID, %edx + orl $FUTEX_WAITERS, %edx + LOCK + cmpxchgl %edx, (%ebx) + jnz 4b + /* NB: %eax == 0 */ + +3: popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %edx + cfi_adjust_cfa_offset(-4) + cfi_restore(%edx) + ret + cfi_endproc + .size __lll_robust_lock_wait,.-__lll_robust_lock_wait + + + .globl __lll_robust_timedlock_wait + .type __lll_robust_timedlock_wait,@function + .hidden __lll_robust_timedlock_wait + .align 16 +__lll_robust_timedlock_wait: + cfi_startproc + /* Check for a valid timeout value. */ + cmpl $1000000000, 4(%edx) + jae 3f + + pushl %edi + cfi_adjust_cfa_offset(4) + pushl %esi + cfi_adjust_cfa_offset(4) + pushl %ebx + cfi_adjust_cfa_offset(4) + pushl %ebp + cfi_adjust_cfa_offset(4) + cfi_offset(%edi, -8) + cfi_offset(%esi, -12) + cfi_offset(%ebx, -16) + cfi_offset(%ebp, -20) + + /* Stack frame for the timespec and timeval structs. */ + subl $12, %esp + cfi_adjust_cfa_offset(12) + + movl %ecx, %ebp + movl %edx, %edi + +1: movl %eax, 8(%esp) + + /* Get current time. */ + movl %esp, %ebx + xorl %ecx, %ecx + movl $__NR_gettimeofday, %eax + ENTER_KERNEL + + /* Compute relative timeout. */ + movl 4(%esp), %eax + movl $1000, %edx + mul %edx /* Milli seconds to nano seconds. */ + movl (%edi), %ecx + movl 4(%edi), %edx + subl (%esp), %ecx + subl %eax, %edx + jns 4f + addl $1000000000, %edx + subl $1, %ecx +4: testl %ecx, %ecx + js 8f /* Time is already up. */ + + /* Store relative timeout. */ + movl %ecx, (%esp) + movl %edx, 4(%esp) + + movl %ebp, %ebx + + movl 8(%esp), %edx + movl %edx, %eax + orl $FUTEX_WAITERS, %edx + + testl $FUTEX_OWNER_DIED, %eax + jnz 6f + + cmpl %eax, %edx + je 2f + + LOCK + cmpxchgl %edx, (%ebx) + movl $0, %ecx /* Must use mov to avoid changing cc. */ + jnz 5f + +2: + /* Futex call. */ + movl %esp, %esi + movl 20(%esp), %ecx + LOAD_FUTEX_WAIT (%ecx) + movl $SYS_futex, %eax + ENTER_KERNEL + movl %eax, %ecx + + movl (%ebx), %eax + +5: testl %eax, %eax + jne 7f + + movl %gs:TID, %edx + orl $FUTEX_WAITERS, %edx + LOCK + cmpxchgl %edx, (%ebx) + jnz 7f + +6: addl $12, %esp + cfi_adjust_cfa_offset(-12) + popl %ebp + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebp) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %edi + cfi_adjust_cfa_offset(-4) + cfi_restore(%edi) + ret + +3: movl $EINVAL, %eax + ret + + cfi_adjust_cfa_offset(28) + cfi_offset(%edi, -8) + cfi_offset(%esi, -12) + cfi_offset(%ebx, -16) + cfi_offset(%ebp, -20) + /* Check whether the time expired. */ +7: cmpl $-ETIMEDOUT, %ecx + jne 1b + +8: movl $ETIMEDOUT, %eax + jmp 6b + cfi_endproc + .size __lll_robust_timedlock_wait,.-__lll_robust_timedlock_wait diff --git a/sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S b/sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S new file mode 100644 index 0000000000..ad1a774fbd --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/pthread_barrier_wait.S @@ -0,0 +1,187 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include + + .text + + .globl __pthread_barrier_wait + .type __pthread_barrier_wait,@function + .align 16 +__pthread_barrier_wait: + cfi_startproc + pushl %ebx + cfi_adjust_cfa_offset(4) + cfi_offset(%ebx, -8) + + movl 8(%esp), %ebx + + /* Get the mutex. */ + movl $1, %edx + xorl %eax, %eax + LOCK + cmpxchgl %edx, MUTEX(%ebx) + jnz 1f + + /* One less waiter. If this was the last one needed wake + everybody. */ +2: subl $1, LEFT(%ebx) + je 3f + + /* There are more threads to come. */ + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_offset(%esi, -12) + +#if CURR_EVENT == 0 + movl (%ebx), %edx +#else + movl CURR_EVENT(%ebx), %edx +#endif + + /* Release the mutex. */ + LOCK + subl $1, MUTEX(%ebx) + jne 6f + + /* Wait for the remaining threads. The call will return immediately + if the CURR_EVENT memory has meanwhile been changed. */ +7: +#if FUTEX_WAIT == 0 + movl PRIVATE(%ebx), %ecx +#else + movl $FUTEX_WAIT, %ecx + orl PRIVATE(%ebx), %ecx +#endif + xorl %esi, %esi +8: movl $SYS_futex, %eax + ENTER_KERNEL + + /* Don't return on spurious wakeups. The syscall does not change + any register except %eax so there is no need to reload any of + them. */ +#if CURR_EVENT == 0 + cmpl %edx, (%ebx) +#else + cmpl %edx, CURR_EVENT(%ebx) +#endif + je 8b + + /* Increment LEFT. If this brings the count back to the + initial count unlock the object. */ + movl $1, %edx + movl INIT_COUNT(%ebx), %ecx + LOCK + xaddl %edx, LEFT(%ebx) + subl $1, %ecx + cmpl %ecx, %edx + jne 10f + + /* Release the mutex. We cannot release the lock before + waking the waiting threads since otherwise a new thread might + arrive and gets waken up, too. */ + LOCK + subl $1, MUTEX(%ebx) + jne 9f + + /* Note: %esi is still zero. */ +10: movl %esi, %eax /* != PTHREAD_BARRIER_SERIAL_THREAD */ + + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + ret + + cfi_adjust_cfa_offset(4) + cfi_offset(%ebx, -8) + + /* The necessary number of threads arrived. */ +3: +#if CURR_EVENT == 0 + addl $1, (%ebx) +#else + addl $1, CURR_EVENT(%ebx) +#endif + + /* Wake up all waiters. The count is a signed number in the kernel + so 0x7fffffff is the highest value. */ + movl $0x7fffffff, %edx + movl $FUTEX_WAKE, %ecx + orl PRIVATE(%ebx), %ecx + movl $SYS_futex, %eax + ENTER_KERNEL + + /* Increment LEFT. If this brings the count back to the + initial count unlock the object. */ + movl $1, %edx + movl INIT_COUNT(%ebx), %ecx + LOCK + xaddl %edx, LEFT(%ebx) + subl $1, %ecx + cmpl %ecx, %edx + jne 5f + + /* Release the mutex. We cannot release the lock before + waking the waiting threads since otherwise a new thread might + arrive and gets waken up, too. */ + LOCK + subl $1, MUTEX(%ebx) + jne 4f + +5: orl $-1, %eax /* == PTHREAD_BARRIER_SERIAL_THREAD */ + + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + ret + + cfi_adjust_cfa_offset(4) + cfi_offset(%ebx, -8) +1: movl PRIVATE(%ebx), %ecx + leal MUTEX(%ebx), %edx + xorl $LLL_SHARED, %ecx + call __lll_lock_wait + jmp 2b + +4: movl PRIVATE(%ebx), %ecx + leal MUTEX(%ebx), %eax + xorl $LLL_SHARED, %ecx + call __lll_unlock_wake + jmp 5b + + cfi_adjust_cfa_offset(4) + cfi_offset(%esi, -12) +6: movl PRIVATE(%ebx), %ecx + leal MUTEX(%ebx), %eax + xorl $LLL_SHARED, %ecx + call __lll_unlock_wake + jmp 7b + +9: movl PRIVATE(%ebx), %ecx + leal MUTEX(%ebx), %eax + xorl $LLL_SHARED, %ecx + call __lll_unlock_wake + jmp 10b + cfi_endproc + .size __pthread_barrier_wait,.-__pthread_barrier_wait +weak_alias (__pthread_barrier_wait, pthread_barrier_wait) diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S new file mode 100644 index 0000000000..5ddd5ac592 --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/pthread_cond_broadcast.S @@ -0,0 +1,241 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include + + .text + + /* int pthread_cond_broadcast (pthread_cond_t *cond) */ + .globl __pthread_cond_broadcast + .type __pthread_cond_broadcast, @function + .align 16 +__pthread_cond_broadcast: + cfi_startproc + pushl %ebx + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebx, 0) + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%esi, 0) + pushl %edi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%edi, 0) + pushl %ebp + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebp, 0) + cfi_remember_state + + movl 20(%esp), %ebx + + LIBC_PROBE (cond_broadcast, 1, %edx) + + /* Get internal lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%ebx) +#else + cmpxchgl %edx, cond_lock(%ebx) +#endif + jnz 1f + +2: addl $cond_futex, %ebx + movl total_seq+4-cond_futex(%ebx), %eax + movl total_seq-cond_futex(%ebx), %ebp + cmpl wakeup_seq+4-cond_futex(%ebx), %eax + ja 3f + jb 4f + cmpl wakeup_seq-cond_futex(%ebx), %ebp + jna 4f + + /* Cause all currently waiting threads to recognize they are + woken up. */ +3: movl %ebp, wakeup_seq-cond_futex(%ebx) + movl %eax, wakeup_seq-cond_futex+4(%ebx) + movl %ebp, woken_seq-cond_futex(%ebx) + movl %eax, woken_seq-cond_futex+4(%ebx) + addl %ebp, %ebp + addl $1, broadcast_seq-cond_futex(%ebx) + movl %ebp, (%ebx) + + /* Get the address of the mutex used. */ + movl dep_mutex-cond_futex(%ebx), %edi + + /* Unlock. */ + LOCK + subl $1, cond_lock-cond_futex(%ebx) + jne 7f + + /* Don't use requeue for pshared condvars. */ +8: cmpl $-1, %edi + je 9f + + /* Do not use requeue for pshared condvars. */ + testl $PS_BIT, MUTEX_KIND(%edi) + jne 9f + + /* Requeue to a non-robust PI mutex if the PI bit is set and + the robust bit is not set. */ + movl MUTEX_KIND(%edi), %eax + andl $(ROBUST_BIT|PI_BIT), %eax + cmpl $PI_BIT, %eax + je 81f + + /* Wake up all threads. */ +#ifdef __ASSUME_PRIVATE_FUTEX + movl $(FUTEX_CMP_REQUEUE|FUTEX_PRIVATE_FLAG), %ecx +#else + movl %gs:PRIVATE_FUTEX, %ecx + orl $FUTEX_CMP_REQUEUE, %ecx +#endif + movl $SYS_futex, %eax + movl $0x7fffffff, %esi + movl $1, %edx + /* Get the address of the futex involved. */ +# if MUTEX_FUTEX != 0 + addl $MUTEX_FUTEX, %edi +# endif +/* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for sysenter. + ENTER_KERNEL */ + int $0x80 + + /* For any kind of error, which mainly is EAGAIN, we try again + with WAKE. The general test also covers running on old + kernels. */ + cmpl $0xfffff001, %eax + jae 9f + +6: xorl %eax, %eax + popl %ebp + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebp) + popl %edi + cfi_adjust_cfa_offset(-4) + cfi_restore(%edi) + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + ret + + cfi_restore_state + +81: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx + movl $SYS_futex, %eax + movl $0x7fffffff, %esi + movl $1, %edx + /* Get the address of the futex involved. */ +# if MUTEX_FUTEX != 0 + addl $MUTEX_FUTEX, %edi +# endif + int $0x80 + + /* For any kind of error, which mainly is EAGAIN, we try again + with WAKE. The general test also covers running on old + kernels. */ + cmpl $0xfffff001, %eax + jb 6b + jmp 9f + + /* Initial locking failed. */ +1: +#if cond_lock == 0 + movl %ebx, %edx +#else + leal cond_lock(%ebx), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + jmp 2b + + .align 16 + /* Unlock. */ +4: LOCK + subl $1, cond_lock-cond_futex(%ebx) + je 6b + + /* Unlock in loop requires wakeup. */ +5: leal cond_lock-cond_futex(%ebx), %eax +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_futex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + jmp 6b + + /* Unlock in loop requires wakeup. */ +7: leal cond_lock-cond_futex(%ebx), %eax +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_futex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + jmp 8b + +9: /* The futex requeue functionality is not available. */ + movl $0x7fffffff, %edx +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_futex(%ebx) + sete %cl + subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE, %ecx + movl $SYS_futex, %eax + ENTER_KERNEL + jmp 6b + cfi_endproc + .size __pthread_cond_broadcast, .-__pthread_cond_broadcast +versioned_symbol (libpthread, __pthread_cond_broadcast, pthread_cond_broadcast, + GLIBC_2_3_2) diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S new file mode 100644 index 0000000000..8f4d937ec1 --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/pthread_cond_signal.S @@ -0,0 +1,216 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include + + .text + + /* int pthread_cond_signal (pthread_cond_t *cond) */ + .globl __pthread_cond_signal + .type __pthread_cond_signal, @function + .align 16 +__pthread_cond_signal: + + cfi_startproc + pushl %ebx + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebx, 0) + pushl %edi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%edi, 0) + cfi_remember_state + + movl 12(%esp), %edi + + LIBC_PROBE (cond_signal, 1, %edi) + + /* Get internal lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%edi) +#else + cmpxchgl %edx, cond_lock(%edi) +#endif + jnz 1f + +2: leal cond_futex(%edi), %ebx + movl total_seq+4(%edi), %eax + movl total_seq(%edi), %ecx + cmpl wakeup_seq+4(%edi), %eax +#if cond_lock != 0 + /* Must use leal to preserve the flags. */ + leal cond_lock(%edi), %edi +#endif + ja 3f + jb 4f + cmpl wakeup_seq-cond_futex(%ebx), %ecx + jbe 4f + + /* Bump the wakeup number. */ +3: addl $1, wakeup_seq-cond_futex(%ebx) + adcl $0, wakeup_seq-cond_futex+4(%ebx) + addl $1, (%ebx) + + /* Wake up one thread. */ + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%esi, 0) + pushl %ebp + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebp, 0) + +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_futex(%ebx) + sete %cl + je 8f + + movl dep_mutex-cond_futex(%ebx), %edx + /* Requeue to a non-robust PI mutex if the PI bit is set and + the robust bit is not set. */ + movl MUTEX_KIND(%edx), %eax + andl $(ROBUST_BIT|PI_BIT), %eax + cmpl $PI_BIT, %eax + je 9f + +8: subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE_OP, %ecx + movl $SYS_futex, %eax + movl $1, %edx + movl $1, %esi + movl $FUTEX_OP_CLEAR_WAKE_IF_GT_ONE, %ebp + /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for + sysenter. + ENTER_KERNEL */ + int $0x80 + popl %ebp + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebp) + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + + /* For any kind of error, we try again with WAKE. + The general test also covers running on old kernels. */ + cmpl $-4095, %eax + jae 7f + +6: xorl %eax, %eax + popl %edi + cfi_adjust_cfa_offset(-4) + cfi_restore(%edi) + popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + ret + + cfi_restore_state + +9: movl $(FUTEX_CMP_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx + movl $SYS_futex, %eax + movl $1, %edx + xorl %esi, %esi + movl dep_mutex-cond_futex(%ebx), %edi + movl (%ebx), %ebp + /* FIXME: Until Ingo fixes 4G/4G vDSO, 6 arg syscalls are broken for + sysenter. + ENTER_KERNEL */ + int $0x80 + popl %ebp + popl %esi + + leal -cond_futex(%ebx), %edi + + /* For any kind of error, we try again with WAKE. + The general test also covers running on old kernels. */ + cmpl $-4095, %eax + jb 4f + +7: +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + orl $FUTEX_WAKE, %ecx + + movl $SYS_futex, %eax + /* %edx should be 1 already from $FUTEX_WAKE_OP syscall. + movl $1, %edx */ + ENTER_KERNEL + + /* Unlock. Note that at this point %edi always points to + cond_lock. */ +4: LOCK + subl $1, (%edi) + je 6b + + /* Unlock in loop requires wakeup. */ +5: movl %edi, %eax +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_futex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + jmp 6b + + /* Initial locking failed. */ +1: +#if cond_lock == 0 + movl %edi, %edx +#else + leal cond_lock(%edi), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%edi) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + jmp 2b + + cfi_endproc + .size __pthread_cond_signal, .-__pthread_cond_signal +versioned_symbol (libpthread, __pthread_cond_signal, pthread_cond_signal, + GLIBC_2_3_2) diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S new file mode 100644 index 0000000000..130c090c9c --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/pthread_cond_timedwait.S @@ -0,0 +1,973 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include + + .text + +/* int pthread_cond_timedwait (pthread_cond_t *cond, pthread_mutex_t *mutex, + const struct timespec *abstime) */ + .globl __pthread_cond_timedwait + .type __pthread_cond_timedwait, @function + .align 16 +__pthread_cond_timedwait: +.LSTARTCODE: + cfi_startproc +#ifdef SHARED + cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, + DW.ref.__gcc_personality_v0) + cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) +#else + cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) + cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) +#endif + + pushl %ebp + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebp, 0) + pushl %edi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%edi, 0) + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%esi, 0) + pushl %ebx + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebx, 0) + + movl 20(%esp), %ebx + movl 28(%esp), %ebp + + LIBC_PROBE (cond_timedwait, 3, %ebx, 24(%esp), %ebp) + + cmpl $1000000000, 4(%ebp) + movl $EINVAL, %eax + jae 18f + + /* Stack frame: + + esp + 32 + +--------------------------+ + esp + 24 | timeout value | + +--------------------------+ + esp + 20 | futex pointer | + +--------------------------+ + esp + 16 | pi-requeued flag | + +--------------------------+ + esp + 12 | old broadcast_seq value | + +--------------------------+ + esp + 4 | old wake_seq value | + +--------------------------+ + esp + 0 | old cancellation mode | + +--------------------------+ + */ + +#ifndef __ASSUME_FUTEX_CLOCK_REALTIME +# ifdef PIC + LOAD_PIC_REG (cx) + cmpl $0, __have_futex_clock_realtime@GOTOFF(%ecx) +# else + cmpl $0, __have_futex_clock_realtime +# endif + je .Lreltmo +#endif + + /* Get internal lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%ebx) +#else + cmpxchgl %edx, cond_lock(%ebx) +#endif + jnz 1f + + /* Store the reference to the mutex. If there is already a + different value in there this is a bad user bug. */ +2: cmpl $-1, dep_mutex(%ebx) + movl 24(%esp), %eax + je 17f + movl %eax, dep_mutex(%ebx) + + /* Unlock the mutex. */ +17: xorl %edx, %edx + call __pthread_mutex_unlock_usercnt + + testl %eax, %eax + jne 16f + + addl $1, total_seq(%ebx) + adcl $0, total_seq+4(%ebx) + addl $1, cond_futex(%ebx) + addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) + +#ifdef __ASSUME_FUTEX_CLOCK_REALTIME +# define FRAME_SIZE 24 +#else +# define FRAME_SIZE 32 +#endif + subl $FRAME_SIZE, %esp + cfi_adjust_cfa_offset(FRAME_SIZE) + cfi_remember_state + + /* Get and store current wakeup_seq value. */ + movl wakeup_seq(%ebx), %edi + movl wakeup_seq+4(%ebx), %edx + movl broadcast_seq(%ebx), %eax + movl %edi, 4(%esp) + movl %edx, 8(%esp) + movl %eax, 12(%esp) + + /* Reset the pi-requeued flag. */ + movl $0, 16(%esp) + + cmpl $0, (%ebp) + movl $-ETIMEDOUT, %esi + js 6f + +8: movl cond_futex(%ebx), %edi + movl %edi, 20(%esp) + + /* Unlock. */ + LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + jne 3f + +.LcleanupSTART: +4: call __pthread_enable_asynccancel + movl %eax, (%esp) + + leal (%ebp), %esi +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + sete %cl + je 40f + + movl dep_mutex(%ebx), %edi + /* Requeue to a non-robust PI mutex if the PI bit is set and + the robust bit is not set. */ + movl MUTEX_KIND(%edi), %eax + andl $(ROBUST_BIT|PI_BIT), %eax + cmpl $PI_BIT, %eax + jne 40f + + movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx + /* The following only works like this because we only support + two clocks, represented using a single bit. */ + testl $1, cond_nwaiters(%ebx) + /* XXX Need to implement using sete instead of a jump. */ + jne 42f + orl $FUTEX_CLOCK_REALTIME, %ecx + +42: movl 20(%esp), %edx + addl $cond_futex, %ebx +.Ladd_cond_futex_pi: + movl $SYS_futex, %eax + ENTER_KERNEL + subl $cond_futex, %ebx +.Lsub_cond_futex_pi: + movl %eax, %esi + /* Set the pi-requeued flag only if the kernel has returned 0. The + kernel does not hold the mutex on ETIMEDOUT or any other error. */ + cmpl $0, %eax + sete 16(%esp) + je 41f + + /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns + successfully, it has already locked the mutex for us and the + pi_flag (16(%esp)) is set to denote that fact. However, if another + thread changed the futex value before we entered the wait, the + syscall may return an EAGAIN and the mutex is not locked. We go + ahead with a success anyway since later we look at the pi_flag to + decide if we got the mutex or not. The sequence numbers then make + sure that only one of the threads actually wake up. We retry using + normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal + and PI futexes don't mix. + + Note that we don't check for EAGAIN specifically; we assume that the + only other error the futex function could return is EAGAIN (barring + the ETIMEOUT of course, for the timeout case in futex) since + anything else would mean an error in our function. It is too + expensive to do that check for every call (which is quite common in + case of a large number of threads), so it has been skipped. */ + cmpl $-ENOSYS, %eax + jne 41f + xorl %ecx, %ecx + +40: subl $1, %ecx + movl $0, 16(%esp) +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAIT_BITSET, %ecx + /* The following only works like this because we only support + two clocks, represented using a single bit. */ + testl $1, cond_nwaiters(%ebx) + jne 30f + orl $FUTEX_CLOCK_REALTIME, %ecx +30: + movl 20(%esp), %edx + movl $0xffffffff, %ebp + addl $cond_futex, %ebx +.Ladd_cond_futex: + movl $SYS_futex, %eax + ENTER_KERNEL + subl $cond_futex, %ebx +.Lsub_cond_futex: + movl 28+FRAME_SIZE(%esp), %ebp + movl %eax, %esi + +41: movl (%esp), %eax + call __pthread_disable_asynccancel +.LcleanupEND: + + /* Lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%ebx) +#else + cmpxchgl %edx, cond_lock(%ebx) +#endif + jnz 5f + +6: movl broadcast_seq(%ebx), %eax + cmpl 12(%esp), %eax + jne 23f + + movl woken_seq(%ebx), %eax + movl woken_seq+4(%ebx), %ecx + + movl wakeup_seq(%ebx), %edi + movl wakeup_seq+4(%ebx), %edx + + cmpl 8(%esp), %edx + jne 7f + cmpl 4(%esp), %edi + je 15f + +7: cmpl %ecx, %edx + jne 9f + cmp %eax, %edi + jne 9f + +15: cmpl $-ETIMEDOUT, %esi + je 28f + + /* We need to go back to futex_wait. If we're using requeue_pi, then + release the mutex we had acquired and go back. */ + movl 16(%esp), %edx + test %edx, %edx + jz 8b + + /* Adjust the mutex values first and then unlock it. The unlock + should always succeed or else the kernel did not lock the mutex + correctly. */ + movl dep_mutex(%ebx), %eax + call __pthread_mutex_cond_lock_adjust + xorl %edx, %edx + call __pthread_mutex_unlock_usercnt + jmp 8b + +28: addl $1, wakeup_seq(%ebx) + adcl $0, wakeup_seq+4(%ebx) + addl $1, cond_futex(%ebx) + movl $ETIMEDOUT, %esi + jmp 14f + +23: xorl %esi, %esi + jmp 24f + +9: xorl %esi, %esi +14: addl $1, woken_seq(%ebx) + adcl $0, woken_seq+4(%ebx) + +24: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) + + /* Wake up a thread which wants to destroy the condvar object. */ + movl total_seq(%ebx), %eax + andl total_seq+4(%ebx), %eax + cmpl $0xffffffff, %eax + jne 25f + movl cond_nwaiters(%ebx), %eax + andl $~((1 << nwaiters_shift) - 1), %eax + jne 25f + + addl $cond_nwaiters, %ebx + movl $SYS_futex, %eax +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_nwaiters(%ebx) + sete %cl + subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE, %ecx + movl $1, %edx + ENTER_KERNEL + subl $cond_nwaiters, %ebx + +25: LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + jne 10f + +11: movl 24+FRAME_SIZE(%esp), %eax + /* With requeue_pi, the mutex lock is held in the kernel. */ + movl 16(%esp), %ecx + testl %ecx, %ecx + jnz 27f + + call __pthread_mutex_cond_lock +26: addl $FRAME_SIZE, %esp + cfi_adjust_cfa_offset(-FRAME_SIZE) + + /* We return the result of the mutex_lock operation if it failed. */ + testl %eax, %eax +#ifdef HAVE_CMOV + cmovel %esi, %eax +#else + jne 22f + movl %esi, %eax +22: +#endif + +18: popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %edi + cfi_adjust_cfa_offset(-4) + cfi_restore(%edi) + popl %ebp + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebp) + + ret + + cfi_restore_state + +27: call __pthread_mutex_cond_lock_adjust + xorl %eax, %eax + jmp 26b + + cfi_adjust_cfa_offset(-FRAME_SIZE); + /* Initial locking failed. */ +1: +#if cond_lock == 0 + movl %ebx, %edx +#else + leal cond_lock(%ebx), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + jmp 2b + + /* The initial unlocking of the mutex failed. */ +16: + LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + jne 18b + + movl %eax, %esi +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + + movl %esi, %eax + jmp 18b + + cfi_adjust_cfa_offset(FRAME_SIZE) + + /* Unlock in loop requires wakeup. */ +3: +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + jmp 4b + + /* Locking in loop failed. */ +5: +#if cond_lock == 0 + movl %ebx, %edx +#else + leal cond_lock(%ebx), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + jmp 6b + + /* Unlock after loop requires wakeup. */ +10: +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + jmp 11b + +#ifndef __ASSUME_FUTEX_CLOCK_REALTIME + cfi_adjust_cfa_offset(-FRAME_SIZE) +.Lreltmo: + /* Get internal lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +# if cond_lock == 0 + cmpxchgl %edx, (%ebx) +# else + cmpxchgl %edx, cond_lock(%ebx) +# endif + jnz 101f + + /* Store the reference to the mutex. If there is already a + different value in there this is a bad user bug. */ +102: cmpl $-1, dep_mutex(%ebx) + movl 24(%esp), %eax + je 117f + movl %eax, dep_mutex(%ebx) + + /* Unlock the mutex. */ +117: xorl %edx, %edx + call __pthread_mutex_unlock_usercnt + + testl %eax, %eax + jne 16b + + addl $1, total_seq(%ebx) + adcl $0, total_seq+4(%ebx) + addl $1, cond_futex(%ebx) + addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) + + subl $FRAME_SIZE, %esp + cfi_adjust_cfa_offset(FRAME_SIZE) + + /* Get and store current wakeup_seq value. */ + movl wakeup_seq(%ebx), %edi + movl wakeup_seq+4(%ebx), %edx + movl broadcast_seq(%ebx), %eax + movl %edi, 4(%esp) + movl %edx, 8(%esp) + movl %eax, 12(%esp) + + /* Reset the pi-requeued flag. */ + movl $0, 16(%esp) + + /* Get the current time. */ +108: movl %ebx, %edx +# ifdef __NR_clock_gettime + /* Get the clock number. */ + movl cond_nwaiters(%ebx), %ebx + andl $((1 << nwaiters_shift) - 1), %ebx + /* Only clocks 0 and 1 are allowed so far. Both are handled in the + kernel. */ + leal 24(%esp), %ecx + movl $__NR_clock_gettime, %eax + ENTER_KERNEL + movl %edx, %ebx + + /* Compute relative timeout. */ + movl (%ebp), %ecx + movl 4(%ebp), %edx + subl 24(%esp), %ecx + subl 28(%esp), %edx +# else + /* Get the current time. */ + leal 24(%esp), %ebx + xorl %ecx, %ecx + movl $__NR_gettimeofday, %eax + ENTER_KERNEL + movl %edx, %ebx + + /* Compute relative timeout. */ + movl 28(%esp), %eax + movl $1000, %edx + mul %edx /* Milli seconds to nano seconds. */ + movl (%ebp), %ecx + movl 4(%ebp), %edx + subl 24(%esp), %ecx + subl %eax, %edx +# endif + jns 112f + addl $1000000000, %edx + subl $1, %ecx +112: testl %ecx, %ecx + movl $-ETIMEDOUT, %esi + js 106f + + /* Store relative timeout. */ +121: movl %ecx, 24(%esp) + movl %edx, 28(%esp) + + movl cond_futex(%ebx), %edi + movl %edi, 20(%esp) + + /* Unlock. */ + LOCK +# if cond_lock == 0 + subl $1, (%ebx) +# else + subl $1, cond_lock(%ebx) +# endif + jne 103f + +.LcleanupSTART2: +104: call __pthread_enable_asynccancel + movl %eax, (%esp) + + leal 24(%esp), %esi +# if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +# endif + cmpl $-1, dep_mutex(%ebx) + sete %cl + subl $1, %ecx +# ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +# else + andl %gs:PRIVATE_FUTEX, %ecx +# endif +# if FUTEX_WAIT != 0 + addl $FUTEX_WAIT, %ecx +# endif + movl 20(%esp), %edx + addl $cond_futex, %ebx +.Ladd_cond_futex2: + movl $SYS_futex, %eax + ENTER_KERNEL + subl $cond_futex, %ebx +.Lsub_cond_futex2: + movl %eax, %esi + +141: movl (%esp), %eax + call __pthread_disable_asynccancel +.LcleanupEND2: + + + /* Lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +# if cond_lock == 0 + cmpxchgl %edx, (%ebx) +# else + cmpxchgl %edx, cond_lock(%ebx) +# endif + jnz 105f + +106: movl broadcast_seq(%ebx), %eax + cmpl 12(%esp), %eax + jne 23b + + movl woken_seq(%ebx), %eax + movl woken_seq+4(%ebx), %ecx + + movl wakeup_seq(%ebx), %edi + movl wakeup_seq+4(%ebx), %edx + + cmpl 8(%esp), %edx + jne 107f + cmpl 4(%esp), %edi + je 115f + +107: cmpl %ecx, %edx + jne 9b + cmp %eax, %edi + jne 9b + +115: cmpl $-ETIMEDOUT, %esi + je 28b + + jmp 8b + + cfi_adjust_cfa_offset(-FRAME_SIZE) + /* Initial locking failed. */ +101: +# if cond_lock == 0 + movl %ebx, %edx +# else + leal cond_lock(%ebx), %edx +# endif +# if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +# endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +# if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +# endif + call __lll_lock_wait + jmp 102b + + cfi_adjust_cfa_offset(FRAME_SIZE) + + /* Unlock in loop requires wakeup. */ +103: +# if cond_lock == 0 + movl %ebx, %eax +# else + leal cond_lock(%ebx), %eax +# endif +# if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +# endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +# if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +# endif + call __lll_unlock_wake + jmp 104b + + /* Locking in loop failed. */ +105: +# if cond_lock == 0 + movl %ebx, %edx +# else + leal cond_lock(%ebx), %edx +# endif +# if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +# endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +# if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +# endif + call __lll_lock_wait + jmp 106b +#endif + + .size __pthread_cond_timedwait, .-__pthread_cond_timedwait +versioned_symbol (libpthread, __pthread_cond_timedwait, pthread_cond_timedwait, + GLIBC_2_3_2) + + + .type __condvar_tw_cleanup2, @function +__condvar_tw_cleanup2: + subl $cond_futex, %ebx + .size __condvar_tw_cleanup2, .-__condvar_tw_cleanup2 + .type __condvar_tw_cleanup, @function +__condvar_tw_cleanup: + movl %eax, %esi + + /* Get internal lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%ebx) +#else + cmpxchgl %edx, cond_lock(%ebx) +#endif + jz 1f + +#if cond_lock == 0 + movl %ebx, %edx +#else + leal cond_lock(%ebx), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + +1: movl broadcast_seq(%ebx), %eax + cmpl 12(%esp), %eax + jne 3f + + /* We increment the wakeup_seq counter only if it is lower than + total_seq. If this is not the case the thread was woken and + then canceled. In this case we ignore the signal. */ + movl total_seq(%ebx), %eax + movl total_seq+4(%ebx), %edi + cmpl wakeup_seq+4(%ebx), %edi + jb 6f + ja 7f + cmpl wakeup_seq(%ebx), %eax + jbe 7f + +6: addl $1, wakeup_seq(%ebx) + adcl $0, wakeup_seq+4(%ebx) + addl $1, cond_futex(%ebx) + +7: addl $1, woken_seq(%ebx) + adcl $0, woken_seq+4(%ebx) + +3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) + + /* Wake up a thread which wants to destroy the condvar object. */ + xorl %edi, %edi + movl total_seq(%ebx), %eax + andl total_seq+4(%ebx), %eax + cmpl $0xffffffff, %eax + jne 4f + movl cond_nwaiters(%ebx), %eax + andl $~((1 << nwaiters_shift) - 1), %eax + jne 4f + + addl $cond_nwaiters, %ebx + movl $SYS_futex, %eax +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_nwaiters(%ebx) + sete %cl + subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE, %ecx + movl $1, %edx + ENTER_KERNEL + subl $cond_nwaiters, %ebx + movl $1, %edi + +4: LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + je 2f + +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + + /* Wake up all waiters to make sure no signal gets lost. */ +2: testl %edi, %edi + jnz 5f + addl $cond_futex, %ebx +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_futex(%ebx) + sete %cl + subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE, %ecx + movl $SYS_futex, %eax + movl $0x7fffffff, %edx + ENTER_KERNEL + + /* Lock the mutex only if we don't own it already. This only happens + in case of PI mutexes, if we got cancelled after a successful + return of the futex syscall and before disabling async + cancellation. */ +5: movl 24+FRAME_SIZE(%esp), %eax + movl MUTEX_KIND(%eax), %ebx + andl $(ROBUST_BIT|PI_BIT), %ebx + cmpl $PI_BIT, %ebx + jne 8f + + movl (%eax), %ebx + andl $TID_MASK, %ebx + cmpl %ebx, %gs:TID + jne 8f + /* We managed to get the lock. Fix it up before returning. */ + call __pthread_mutex_cond_lock_adjust + jmp 9f + +8: call __pthread_mutex_cond_lock + +9: movl %esi, (%esp) +.LcallUR: + call _Unwind_Resume + hlt +.LENDCODE: + cfi_endproc + .size __condvar_tw_cleanup, .-__condvar_tw_cleanup + + + .section .gcc_except_table,"a",@progbits +.LexceptSTART: + .byte DW_EH_PE_omit # @LPStart format (omit) + .byte DW_EH_PE_omit # @TType format (omit) + .byte DW_EH_PE_sdata4 # call-site format + # DW_EH_PE_sdata4 + .uleb128 .Lcstend-.Lcstbegin +.Lcstbegin: + .long .LcleanupSTART-.LSTARTCODE + .long .Ladd_cond_futex_pi-.LcleanupSTART + .long __condvar_tw_cleanup-.LSTARTCODE + .uleb128 0 + .long .Ladd_cond_futex_pi-.LSTARTCODE + .long .Lsub_cond_futex_pi-.Ladd_cond_futex_pi + .long __condvar_tw_cleanup2-.LSTARTCODE + .uleb128 0 + .long .Lsub_cond_futex_pi-.LSTARTCODE + .long .Ladd_cond_futex-.Lsub_cond_futex_pi + .long __condvar_tw_cleanup-.LSTARTCODE + .uleb128 0 + .long .Ladd_cond_futex-.LSTARTCODE + .long .Lsub_cond_futex-.Ladd_cond_futex + .long __condvar_tw_cleanup2-.LSTARTCODE + .uleb128 0 + .long .Lsub_cond_futex-.LSTARTCODE + .long .LcleanupEND-.Lsub_cond_futex + .long __condvar_tw_cleanup-.LSTARTCODE + .uleb128 0 +#ifndef __ASSUME_FUTEX_CLOCK_REALTIME + .long .LcleanupSTART2-.LSTARTCODE + .long .Ladd_cond_futex2-.LcleanupSTART2 + .long __condvar_tw_cleanup-.LSTARTCODE + .uleb128 0 + .long .Ladd_cond_futex2-.LSTARTCODE + .long .Lsub_cond_futex2-.Ladd_cond_futex2 + .long __condvar_tw_cleanup2-.LSTARTCODE + .uleb128 0 + .long .Lsub_cond_futex2-.LSTARTCODE + .long .LcleanupEND2-.Lsub_cond_futex2 + .long __condvar_tw_cleanup-.LSTARTCODE + .uleb128 0 +#endif + .long .LcallUR-.LSTARTCODE + .long .LENDCODE-.LcallUR + .long 0 + .uleb128 0 +.Lcstend: + + +#ifdef SHARED + .hidden DW.ref.__gcc_personality_v0 + .weak DW.ref.__gcc_personality_v0 + .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits + .align 4 + .type DW.ref.__gcc_personality_v0, @object + .size DW.ref.__gcc_personality_v0, 4 +DW.ref.__gcc_personality_v0: + .long __gcc_personality_v0 +#endif diff --git a/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S b/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S new file mode 100644 index 0000000000..ec3538f561 --- /dev/null +++ b/sysdeps/unix/sysv/linux/i386/pthread_cond_wait.S @@ -0,0 +1,641 @@ +/* Copyright (C) 2002-2015 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper , 2002. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + . */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + .text + +/* int pthread_cond_wait (pthread_cond_t *cond, pthread_mutex_t *mutex) */ + .globl __pthread_cond_wait + .type __pthread_cond_wait, @function + .align 16 +__pthread_cond_wait: +.LSTARTCODE: + cfi_startproc +#ifdef SHARED + cfi_personality(DW_EH_PE_pcrel | DW_EH_PE_sdata4 | DW_EH_PE_indirect, + DW.ref.__gcc_personality_v0) + cfi_lsda(DW_EH_PE_pcrel | DW_EH_PE_sdata4, .LexceptSTART) +#else + cfi_personality(DW_EH_PE_udata4, __gcc_personality_v0) + cfi_lsda(DW_EH_PE_udata4, .LexceptSTART) +#endif + + pushl %ebp + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebp, 0) + pushl %edi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%edi, 0) + pushl %esi + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%esi, 0) + pushl %ebx + cfi_adjust_cfa_offset(4) + cfi_rel_offset(%ebx, 0) + + xorl %esi, %esi + movl 20(%esp), %ebx + + LIBC_PROBE (cond_wait, 2, 24(%esp), %ebx) + + /* Get internal lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%ebx) +#else + cmpxchgl %edx, cond_lock(%ebx) +#endif + jnz 1f + + /* Store the reference to the mutex. If there is already a + different value in there this is a bad user bug. */ +2: cmpl $-1, dep_mutex(%ebx) + movl 24(%esp), %eax + je 15f + movl %eax, dep_mutex(%ebx) + + /* Unlock the mutex. */ +15: xorl %edx, %edx + call __pthread_mutex_unlock_usercnt + + testl %eax, %eax + jne 12f + + addl $1, total_seq(%ebx) + adcl $0, total_seq+4(%ebx) + addl $1, cond_futex(%ebx) + addl $(1 << nwaiters_shift), cond_nwaiters(%ebx) + +#define FRAME_SIZE 20 + subl $FRAME_SIZE, %esp + cfi_adjust_cfa_offset(FRAME_SIZE) + cfi_remember_state + + /* Get and store current wakeup_seq value. */ + movl wakeup_seq(%ebx), %edi + movl wakeup_seq+4(%ebx), %edx + movl broadcast_seq(%ebx), %eax + movl %edi, 4(%esp) + movl %edx, 8(%esp) + movl %eax, 12(%esp) + + /* Reset the pi-requeued flag. */ +8: movl $0, 16(%esp) + movl cond_futex(%ebx), %ebp + + /* Unlock. */ + LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + jne 3f + +.LcleanupSTART: +4: call __pthread_enable_asynccancel + movl %eax, (%esp) + + xorl %ecx, %ecx + cmpl $-1, dep_mutex(%ebx) + sete %cl + je 18f + + movl dep_mutex(%ebx), %edi + /* Requeue to a non-robust PI mutex if the PI bit is set and + the robust bit is not set. */ + movl MUTEX_KIND(%edi), %eax + andl $(ROBUST_BIT|PI_BIT), %eax + cmpl $PI_BIT, %eax + jne 18f + + movl $(FUTEX_WAIT_REQUEUE_PI|FUTEX_PRIVATE_FLAG), %ecx + movl %ebp, %edx + xorl %esi, %esi + addl $cond_futex, %ebx +.Ladd_cond_futex_pi: + movl $SYS_futex, %eax + ENTER_KERNEL + subl $cond_futex, %ebx +.Lsub_cond_futex_pi: + /* Set the pi-requeued flag only if the kernel has returned 0. The + kernel does not hold the mutex on error. */ + cmpl $0, %eax + sete 16(%esp) + je 19f + + /* When a futex syscall with FUTEX_WAIT_REQUEUE_PI returns + successfully, it has already locked the mutex for us and the + pi_flag (16(%esp)) is set to denote that fact. However, if another + thread changed the futex value before we entered the wait, the + syscall may return an EAGAIN and the mutex is not locked. We go + ahead with a success anyway since later we look at the pi_flag to + decide if we got the mutex or not. The sequence numbers then make + sure that only one of the threads actually wake up. We retry using + normal FUTEX_WAIT only if the kernel returned ENOSYS, since normal + and PI futexes don't mix. + + Note that we don't check for EAGAIN specifically; we assume that the + only other error the futex function could return is EAGAIN since + anything else would mean an error in our function. It is too + expensive to do that check for every call (which is quite common in + case of a large number of threads), so it has been skipped. */ + cmpl $-ENOSYS, %eax + jne 19f + xorl %ecx, %ecx + +18: subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif +#if FUTEX_WAIT != 0 + addl $FUTEX_WAIT, %ecx +#endif + movl %ebp, %edx + addl $cond_futex, %ebx +.Ladd_cond_futex: + movl $SYS_futex, %eax + ENTER_KERNEL + subl $cond_futex, %ebx +.Lsub_cond_futex: + +19: movl (%esp), %eax + call __pthread_disable_asynccancel +.LcleanupEND: + + /* Lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%ebx) +#else + cmpxchgl %edx, cond_lock(%ebx) +#endif + jnz 5f + +6: movl broadcast_seq(%ebx), %eax + cmpl 12(%esp), %eax + jne 16f + + movl woken_seq(%ebx), %eax + movl woken_seq+4(%ebx), %ecx + + movl wakeup_seq(%ebx), %edi + movl wakeup_seq+4(%ebx), %edx + + cmpl 8(%esp), %edx + jne 7f + cmpl 4(%esp), %edi + je 22f + +7: cmpl %ecx, %edx + jne 9f + cmp %eax, %edi + je 22f + +9: addl $1, woken_seq(%ebx) + adcl $0, woken_seq+4(%ebx) + + /* Unlock */ +16: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) + + /* Wake up a thread which wants to destroy the condvar object. */ + movl total_seq(%ebx), %eax + andl total_seq+4(%ebx), %eax + cmpl $0xffffffff, %eax + jne 17f + movl cond_nwaiters(%ebx), %eax + andl $~((1 << nwaiters_shift) - 1), %eax + jne 17f + + addl $cond_nwaiters, %ebx + movl $SYS_futex, %eax +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_nwaiters(%ebx) + sete %cl + subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE, %ecx + movl $1, %edx + ENTER_KERNEL + subl $cond_nwaiters, %ebx + +17: LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + jne 10f + + /* With requeue_pi, the mutex lock is held in the kernel. */ +11: movl 24+FRAME_SIZE(%esp), %eax + movl 16(%esp), %ecx + testl %ecx, %ecx + jnz 21f + + call __pthread_mutex_cond_lock +20: addl $FRAME_SIZE, %esp + cfi_adjust_cfa_offset(-FRAME_SIZE); + +14: popl %ebx + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebx) + popl %esi + cfi_adjust_cfa_offset(-4) + cfi_restore(%esi) + popl %edi + cfi_adjust_cfa_offset(-4) + cfi_restore(%edi) + popl %ebp + cfi_adjust_cfa_offset(-4) + cfi_restore(%ebp) + + /* We return the result of the mutex_lock operation. */ + ret + + cfi_restore_state + +21: call __pthread_mutex_cond_lock_adjust + xorl %eax, %eax + jmp 20b + + cfi_adjust_cfa_offset(-FRAME_SIZE); + + /* We need to go back to futex_wait. If we're using requeue_pi, then + release the mutex we had acquired and go back. */ +22: movl 16(%esp), %edx + test %edx, %edx + jz 8b + + /* Adjust the mutex values first and then unlock it. The unlock + should always succeed or else the kernel did not lock the mutex + correctly. */ + movl dep_mutex(%ebx), %eax + call __pthread_mutex_cond_lock_adjust + xorl %edx, %edx + call __pthread_mutex_unlock_usercnt + jmp 8b + + /* Initial locking failed. */ +1: +#if cond_lock == 0 + movl %ebx, %edx +#else + leal cond_lock(%ebx), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + jmp 2b + + /* The initial unlocking of the mutex failed. */ +12: + LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + jne 14b + + movl %eax, %esi +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + + movl %esi, %eax + jmp 14b + + cfi_adjust_cfa_offset(FRAME_SIZE) + + /* Unlock in loop requires wakeup. */ +3: +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + jmp 4b + + /* Locking in loop failed. */ +5: +#if cond_lock == 0 + movl %ebx, %edx +#else + leal cond_lock(%ebx), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + jmp 6b + + /* Unlock after loop requires wakeup. */ +10: +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + jmp 11b + + .size __pthread_cond_wait, .-__pthread_cond_wait +versioned_symbol (libpthread, __pthread_cond_wait, pthread_cond_wait, + GLIBC_2_3_2) + + + .type __condvar_w_cleanup2, @function +__condvar_w_cleanup2: + subl $cond_futex, %ebx + .size __condvar_w_cleanup2, .-__condvar_w_cleanup2 +.LSbl4: + .type __condvar_w_cleanup, @function +__condvar_w_cleanup: + movl %eax, %esi + + /* Get internal lock. */ + movl $1, %edx + xorl %eax, %eax + LOCK +#if cond_lock == 0 + cmpxchgl %edx, (%ebx) +#else + cmpxchgl %edx, cond_lock(%ebx) +#endif + jz 1f + +#if cond_lock == 0 + movl %ebx, %edx +#else + leal cond_lock(%ebx), %edx +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_lock_wait + +1: movl broadcast_seq(%ebx), %eax + cmpl 12(%esp), %eax + jne 3f + + /* We increment the wakeup_seq counter only if it is lower than + total_seq. If this is not the case the thread was woken and + then canceled. In this case we ignore the signal. */ + movl total_seq(%ebx), %eax + movl total_seq+4(%ebx), %edi + cmpl wakeup_seq+4(%ebx), %edi + jb 6f + ja 7f + cmpl wakeup_seq(%ebx), %eax + jbe 7f + +6: addl $1, wakeup_seq(%ebx) + adcl $0, wakeup_seq+4(%ebx) + addl $1, cond_futex(%ebx) + +7: addl $1, woken_seq(%ebx) + adcl $0, woken_seq+4(%ebx) + +3: subl $(1 << nwaiters_shift), cond_nwaiters(%ebx) + + /* Wake up a thread which wants to destroy the condvar object. */ + xorl %edi, %edi + movl total_seq(%ebx), %eax + andl total_seq+4(%ebx), %eax + cmpl $0xffffffff, %eax + jne 4f + movl cond_nwaiters(%ebx), %eax + andl $~((1 << nwaiters_shift) - 1), %eax + jne 4f + + addl $cond_nwaiters, %ebx + movl $SYS_futex, %eax +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_nwaiters(%ebx) + sete %cl + subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE, %ecx + movl $1, %edx + ENTER_KERNEL + subl $cond_nwaiters, %ebx + movl $1, %edi + +4: LOCK +#if cond_lock == 0 + subl $1, (%ebx) +#else + subl $1, cond_lock(%ebx) +#endif + je 2f + +#if cond_lock == 0 + movl %ebx, %eax +#else + leal cond_lock(%ebx), %eax +#endif +#if (LLL_SHARED-LLL_PRIVATE) > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex(%ebx) + setne %cl + subl $1, %ecx + andl $(LLL_SHARED-LLL_PRIVATE), %ecx +#if LLL_PRIVATE != 0 + addl $LLL_PRIVATE, %ecx +#endif + call __lll_unlock_wake + + /* Wake up all waiters to make sure no signal gets lost. */ +2: testl %edi, %edi + jnz 5f + addl $cond_futex, %ebx +#if FUTEX_PRIVATE_FLAG > 255 + xorl %ecx, %ecx +#endif + cmpl $-1, dep_mutex-cond_futex(%ebx) + sete %cl + subl $1, %ecx +#ifdef __ASSUME_PRIVATE_FUTEX + andl $FUTEX_PRIVATE_FLAG, %ecx +#else + andl %gs:PRIVATE_FUTEX, %ecx +#endif + addl $FUTEX_WAKE, %ecx + movl $SYS_futex, %eax + movl $0x7fffffff, %edx + ENTER_KERNEL + + /* Lock the mutex only if we don't own it already. This only happens + in case of PI mutexes, if we got cancelled after a successful + return of the futex syscall and before disabling async + cancellation. */ +5: movl 24+FRAME_SIZE(%esp), %eax + movl MUTEX_KIND(%eax), %ebx + andl $(ROBUST_BIT|PI_BIT), %ebx + cmpl $PI_BIT, %ebx + jne 8f + + movl (%eax), %ebx + andl $TID_MASK, %ebx + cmpl %ebx, %gs:TID + jne 8f + /* We managed to get the lock. Fix it up before returning. */ + call __pthread_mutex_cond_lock_adjust + jmp 9f + +8: call __pthread_mutex_cond_lock + +9: movl %esi, (%esp) +.LcallUR: + call _Unwind_Resume + hlt +.LENDCODE: + cfi_endproc + .size __condvar_w_cleanup, .-__condvar_w_cleanup + + + .section .gcc_except_table,"a",@progbits +.LexceptSTART: + .byte DW_EH_PE_omit # @LPStart format (omit) + .byte DW_EH_PE_omit # @TType format (omit) + .byte DW_EH_PE_sdata4 # call-site format + # DW_EH_PE_sdata4 + .uleb128 .Lcstend-.Lcstbegin +.Lcstbegin: + .long .LcleanupSTART-.LSTARTCODE + .long .Ladd_cond_futex_pi-.LcleanupSTART + .long __condvar_w_cleanup-.LSTARTCODE + .uleb128 0 + .long .Ladd_cond_futex_pi-.LSTARTCODE + .long .Lsub_cond_futex_pi-.Ladd_cond_futex_pi + .long __condvar_w_cleanup2-.LSTARTCODE + .uleb128 0 + .long .Lsub_cond_futex_pi-.LSTARTCODE + .long .Ladd_cond_futex-.Lsub_cond_futex_pi + .long __condvar_w_cleanup-.LSTARTCODE + .uleb128 0 + .long .Ladd_cond_futex-.LSTARTCODE + .long .Lsub_cond_futex-.Ladd_cond_futex + .long __condvar_w_cleanup2-.LSTARTCODE + .uleb128 0 + .long .Lsub_cond_futex-.LSTARTCODE + .long .LcleanupEND-.Lsub_cond_futex + .long __condvar_w_cleanup-.LSTARTCODE + .uleb128 0 + .long .LcallUR-.LSTARTCODE + .long .LENDCODE-.LcallUR + .long 0 + .uleb128 0 +.Lcstend: + +#ifdef SHARED + .hidden DW.ref.__gcc_personality_v0 + .weak DW.ref.__gcc_personality_v0 + .section .gnu.linkonce.d.DW.ref.__gcc_personality_v0,"aw",@progbits + .align 4 + .type DW.ref.__gcc_personality_v0, @object + .size DW.ref.__gcc_personality_v0, 4 +DW.ref.__gcc_personality_v0: + .long __gcc_personality_v0 +#endif -- cgit v1.2.3-70-g09d2