aboutsummaryrefslogtreecommitdiff
path: root/sysdeps/hppa
diff options
context:
space:
mode:
authorCarlos O'Donell <carlos@systemhalted.org>2006-04-19 05:35:34 +0000
committerCarlos O'Donell <carlos@systemhalted.org>2006-04-19 05:35:34 +0000
commit5d3b4a7723a58d00e26c3a144b67d97ec25e241a (patch)
tree3da58c70fe69fb9156d95fa0a18cfc7a525c4ab6 /sysdeps/hppa
parent44f4e767265e025e254f40ff02cd3793048d362d (diff)
downloadglibc-5d3b4a7723a58d00e26c3a144b67d97ec25e241a.tar
glibc-5d3b4a7723a58d00e26c3a144b67d97ec25e241a.tar.gz
glibc-5d3b4a7723a58d00e26c3a144b67d97ec25e241a.tar.bz2
glibc-5d3b4a7723a58d00e26c3a144b67d97ec25e241a.zip
2006-04-19 Carlos O'Donell <carlos@systemhalted.org>
* sysdeps/hppa/linuxthreads/pspinlock.c: New file. * sysdeps/hppa/linuxthreads/pt-machine.h: Likewise. * sysdeps/hppa/linuxthreads/tls.h: Likewise. * sysdeps/unix/sysv/linux/hppa/linuxthreads/aio_cancel.c: Likewise. * sysdeps/unix/sysv/linux/hppa/linuxthreads/malloc-machine.h: Likewise. * sysdeps/unix/sysv/linux/hppa/linuxthreads/pt-initfini.c: Likewise. * sysdeps/unix/sysv/linux/hppa/linuxthreads/sysdep-cancel.h: Likewise. * sysdeps/unix/sysv/linux/hppa/linuxthreads/bits/initspin.h: Likewise. * sysdeps/unix/sysv/linux/hppa/linuxthreads/bits/pthreadtypes.h: Likewise.
Diffstat (limited to 'sysdeps/hppa')
-rw-r--r--sysdeps/hppa/linuxthreads/pspinlock.c82
-rw-r--r--sysdeps/hppa/linuxthreads/pt-machine.h134
-rw-r--r--sysdeps/hppa/linuxthreads/tls.h163
3 files changed, 379 insertions, 0 deletions
diff --git a/sysdeps/hppa/linuxthreads/pspinlock.c b/sysdeps/hppa/linuxthreads/pspinlock.c
new file mode 100644
index 0000000000..e5a5545227
--- /dev/null
+++ b/sysdeps/hppa/linuxthreads/pspinlock.c
@@ -0,0 +1,82 @@
+/* POSIX spinlock implementation. hppa version.
+ Copyright (C) 2000 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#include <errno.h>
+#include <pthread.h>
+#include "internals.h"
+
+int
+__pthread_spin_lock (pthread_spinlock_t *lock)
+{
+ volatile unsigned int *addr = __ldcw_align (lock);
+
+ while (__ldcw (addr) == 0)
+ while (*addr == 0) ;
+
+ return 0;
+}
+weak_alias (__pthread_spin_lock, pthread_spin_lock)
+
+
+int
+__pthread_spin_trylock (pthread_spinlock_t *lock)
+{
+ volatile unsigned int *a = __ldcw_align (lock);
+
+ return __ldcw (a) ? 0 : EBUSY;
+}
+weak_alias (__pthread_spin_trylock, pthread_spin_trylock)
+
+
+int
+__pthread_spin_unlock (pthread_spinlock_t *lock)
+{
+ volatile unsigned int *a = __ldcw_align (lock);
+ int tmp = 1;
+ /* This should be a memory barrier to newer compilers */
+ __asm__ __volatile__ ("stw,ma %1,0(%0)"
+ : : "r" (a), "r" (tmp) : "memory");
+ return 0;
+}
+weak_alias (__pthread_spin_unlock, pthread_spin_unlock)
+
+
+int
+__pthread_spin_init (pthread_spinlock_t *lock, int pshared)
+{
+ /* We can ignore the `pshared' parameter. Since we are busy-waiting
+ all processes which can access the memory location `lock' points
+ to can use the spinlock. */
+ volatile unsigned int *a = __ldcw_align (lock);
+ int tmp = 1;
+ /* This should be a memory barrier to newer compilers */
+ __asm__ __volatile__ ("stw,ma %1,0(%0)"
+ : : "r" (a), "r" (tmp) : "memory");
+ return 0;
+}
+weak_alias (__pthread_spin_init, pthread_spin_init)
+
+
+int
+__pthread_spin_destroy (pthread_spinlock_t *lock)
+{
+ /* Nothing to do. */
+ return 0;
+}
+weak_alias (__pthread_spin_destroy, pthread_spin_destroy)
diff --git a/sysdeps/hppa/linuxthreads/pt-machine.h b/sysdeps/hppa/linuxthreads/pt-machine.h
new file mode 100644
index 0000000000..f35523f0f3
--- /dev/null
+++ b/sysdeps/hppa/linuxthreads/pt-machine.h
@@ -0,0 +1,134 @@
+/* Machine-dependent pthreads configuration and inline functions.
+ hppa version.
+ Copyright (C) 2000, 2002, 2003 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Richard Henderson <rth@tamu.edu>.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2.1 of the
+ License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; see the file COPYING.LIB. If not,
+ write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ Boston, MA 02111-1307, USA. */
+
+#ifndef _PT_MACHINE_H
+#define _PT_MACHINE_H 1
+
+#include <sys/types.h>
+#include <bits/initspin.h>
+
+#ifndef PT_EI
+# define PT_EI extern inline __attribute__ ((always_inline))
+#endif
+
+extern inline long int testandset (__atomic_lock_t *spinlock);
+extern inline int __compare_and_swap (long int *p, long int oldval, long int newval);
+extern inline int lock_held (__atomic_lock_t *spinlock);
+extern inline int __load_and_clear (__atomic_lock_t *spinlock);
+
+/* Get some notion of the current stack. Need not be exactly the top
+ of the stack, just something somewhere in the current frame. */
+#define CURRENT_STACK_FRAME stack_pointer
+register char * stack_pointer __asm__ ("%r30");
+
+/* Get/Set thread-specific pointer. We have to call into the kernel to
+ * modify it, but we can read it in user mode. */
+#ifndef THREAD_SELF
+#define THREAD_SELF __get_cr27()
+#endif
+
+#ifndef SET_THREAD_SELF
+#define SET_THREAD_SELF(descr) __set_cr27(descr)
+#endif
+/* Use this to determine type */
+struct _pthread_descr_struct *__thread_self;
+
+static inline struct _pthread_descr_struct * __get_cr27(void)
+{
+ long cr27;
+ asm ("mfctl %%cr27, %0" : "=r" (cr27) : );
+ return (struct _pthread_descr_struct *) cr27;
+}
+
+#ifndef INIT_THREAD_SELF
+#define INIT_THREAD_SELF(descr, nr) __set_cr27(descr)
+#endif
+
+static inline void __set_cr27(struct _pthread_descr_struct * cr27)
+{
+ asm ( "ble 0xe0(%%sr2, %%r0)\n\t"
+ "copy %0, %%r26"
+ : : "r" (cr27) : "r26" );
+}
+
+/* We want the OS to assign stack addresses. */
+#define FLOATING_STACKS 1
+#define ARCH_STACK_MAX_SIZE 8*1024*1024
+
+/* The hppa only has one atomic read and modify memory operation,
+ load and clear, so hppa spinlocks must use zero to signify that
+ someone is holding the lock. The address used for the ldcw
+ semaphore must be 16-byte aligned. */
+#define __ldcw(a) \
+({ \
+ unsigned int __ret; \
+ __asm__ __volatile__("ldcw 0(%1),%0" \
+ : "=r" (__ret) : "r" (a) : "memory"); \
+ __ret; \
+})
+
+/* Strongly ordered lock reset */
+#define __lock_reset(lock_addr, tmp) \
+({ \
+ __asm__ __volatile__ ("stw,ma %1,0(%0)" \
+ : : "r" (lock_addr), "r" (tmp) : "memory"); \
+})
+
+/* Because malloc only guarantees 8-byte alignment for malloc'd data,
+ and GCC only guarantees 8-byte alignment for stack locals, we can't
+ be assured of 16-byte alignment for atomic lock data even if we
+ specify "__attribute ((aligned(16)))" in the type declaration. So,
+ we use a struct containing an array of four ints for the atomic lock
+ type and dynamically select the 16-byte aligned int from the array
+ for the semaphore. */
+#define __PA_LDCW_ALIGNMENT 16
+#define __ldcw_align(a) ({ \
+ volatile unsigned int __ret = (unsigned int) a; \
+ if ((__ret & ~(__PA_LDCW_ALIGNMENT - 1)) < (unsigned int) a) \
+ __ret = (__ret & ~(__PA_LDCW_ALIGNMENT - 1)) + __PA_LDCW_ALIGNMENT; \
+ (unsigned int *) __ret; \
+})
+
+/* Spinlock implementation; required. */
+PT_EI int
+__load_and_clear (__atomic_lock_t *spinlock)
+{
+ volatile unsigned int *a = __ldcw_align (spinlock);
+
+ return __ldcw (a);
+}
+
+/* Emulate testandset */
+PT_EI long int
+testandset (__atomic_lock_t *spinlock)
+{
+ return (__load_and_clear(spinlock) == 0);
+}
+
+PT_EI int
+lock_held (__atomic_lock_t *spinlock)
+{
+ volatile unsigned int *a = __ldcw_align (spinlock);
+
+ return *a == 0;
+}
+
+#endif /* pt-machine.h */
diff --git a/sysdeps/hppa/linuxthreads/tls.h b/sysdeps/hppa/linuxthreads/tls.h
new file mode 100644
index 0000000000..3d33a18923
--- /dev/null
+++ b/sysdeps/hppa/linuxthreads/tls.h
@@ -0,0 +1,163 @@
+/* Definition for thread-local data handling. linuxthreads/hppa version.
+ Copyright (C) 2005 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ The GNU C Library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with the GNU C Library; if not, write to the Free
+ Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+ 02111-1307 USA. */
+
+#ifndef _TLS_H
+#define _TLS_H
+
+#ifndef __ASSEMBLER__
+# include <pt-machine.h>
+# include <stdbool.h>
+# include <stddef.h>
+
+/* Type for the dtv. */
+typedef union dtv
+{
+ size_t counter;
+ struct
+ {
+ void *val;
+ bool is_static;
+ } pointer;
+} dtv_t;
+
+#else /* __ASSEMBLER__ */
+# include <tcb-offsets.h>
+#endif /* __ASSEMBLER__ */
+
+
+#if defined HAVE_TLS_SUPPORT
+
+/* Signal that TLS support is available. */
+# define USE_TLS 1
+
+# ifndef __ASSEMBLER__
+
+typedef struct
+{
+ dtv_t *dtv;
+ void *private;
+} tcbhead_t;
+
+/* Include some syscall information for other headers */
+# include <sysdep.h>
+
+/* This is the size of the initial TCB. */
+# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t)
+
+/* Alignment requirements for the initial TCB. */
+# define TLS_INIT_TCB_ALIGN __alignof__ (tcbhead_t)
+
+/* This is the size of the TCB. */
+# define TLS_TCB_SIZE sizeof (tcbhead_t)
+
+/* This is the size we need before TCB. */
+# define TLS_PRE_TCB_SIZE sizeof (struct _pthread_descr_struct)
+
+/* Alignment requirements for the TCB. */
+# define TLS_TCB_ALIGN __alignof__ (struct _pthread_descr_struct)
+
+/* The TLS blocks start right after the TCB. */
+# define TLS_DTV_AT_TP 1
+
+/* Return the thread descriptor for the current thread. */
+# undef THREAD_SELF
+# define THREAD_SELF \
+ ({ struct _pthread_descr_struct *__self; \
+ __self = __get_cr27(); \
+ __self - 1; \
+ })
+
+# undef INIT_THREAD_SELF
+# define INIT_THREAD_SELF(descr, nr) \
+ ({ struct _pthread_descr_struct *__self = (void *)descr; \
+ __set_cr27(__self + 1); \
+ 0; \
+ })
+
+/* Access to data in the thread descriptor is easy. */
+#define THREAD_GETMEM(descr, member) \
+ ((void) sizeof (descr), THREAD_SELF->member)
+#define THREAD_GETMEM_NC(descr, member) \
+ ((void) sizeof (descr), THREAD_SELF->member)
+#define THREAD_SETMEM(descr, member, value) \
+ ((void) sizeof (descr), THREAD_SELF->member = (value))
+#define THREAD_SETMEM_NC(descr, member, value) \
+ ((void) sizeof (descr), THREAD_SELF->member = (value))
+
+/* Install the dtv pointer. The pointer passed is to the element with
+ index -1 which contain the length. */
+# define INSTALL_DTV(tcbp, dtvp) \
+ ((tcbhead_t *) (tcbp))->dtv = dtvp + 1
+
+/* Install new dtv for current thread. */
+# define INSTALL_NEW_DTV(dtv) \
+ ({ tcbhead_t *__tcbp = (tcbhead_t *)__get_cr27(); \
+ __tcbp->dtv = dtv; \
+ })
+
+/* Return dtv of given thread descriptor. */
+# define GET_DTV(tcbp) \
+ (((tcbhead_t *) (tcbp))->dtv)
+
+/* Code to initially initialize the thread pointer. This might need
+ special attention since 'errno' is not yet available and if the
+ operation can cause a failure 'errno' must not be touched. */
+# define TLS_INIT_TP(tcbp, secondcall) \
+ ({ __set_cr27(tcbp); 0; })
+
+/* Return the address of the dtv for the current thread. */
+# define THREAD_DTV() \
+ ({ tcbhead_t *__tcbp = (tcbhead_t *)__get_cr27(); \
+ __tcbp->dtv; \
+ })
+
+# define TLS_MULTIPLE_THREADS_IN_TCB 1
+
+/* Get the thread descriptor definition. This must be after the
+ the definition of THREAD_SELF for TLS. */
+# include <linuxthreads/descr.h>
+
+# endif /* __ASSEMBLER__ */
+
+#else
+
+# ifndef __ASSEMBLER__
+
+typedef struct
+{
+ void *tcb;
+ dtv_t *dtv;
+ void *self;
+ int multiple_threads;
+} tcbhead_t;
+
+/* Get the thread descriptor definition. */
+# include <linuxthreads/descr.h>
+
+# define NONTLS_INIT_TP \
+ do { \
+ static const tcbhead_t nontls_init_tp = { .multiple_threads = 0 }; \
+ INIT_THREAD_SELF(&nontls_init_tp, 0); \
+ } while (0)
+
+# endif /* __ASSEMBLER__ */
+
+#endif /* HAVE_TLS_SUPPORT */
+
+#endif /* tls.h */