diff options
Diffstat (limited to 'sysdeps/unix/sysv')
-rw-r--r-- | sysdeps/unix/sysv/linux/alpha/ioperm.c | 7 | ||||
-rw-r--r-- | sysdeps/unix/sysv/linux/alpha/sysdep.h | 354 |
2 files changed, 348 insertions, 13 deletions
diff --git a/sysdeps/unix/sysv/linux/alpha/ioperm.c b/sysdeps/unix/sysv/linux/alpha/ioperm.c index 086c782b9f..cf775674b4 100644 --- a/sysdeps/unix/sysv/linux/alpha/ioperm.c +++ b/sysdeps/unix/sysv/linux/alpha/ioperm.c @@ -196,12 +196,7 @@ stl_mb(unsigned int val, unsigned long addr) static inline void __sethae(unsigned long value) { - register unsigned long r16 __asm__("$16") = value; - register unsigned long r0 __asm__("$0") = __NR_sethae; - __asm__ __volatile__ ("callsys" - : "=r"(r0) - : "0"(r0), "r" (r16) - : inline_syscall_clobbers, "$19"); + INLINE_SYSCALL_CALL (sethae, value); } extern long __pciconfig_iobase(enum __pciconfig_iobase_which __which, diff --git a/sysdeps/unix/sysv/linux/alpha/sysdep.h b/sysdeps/unix/sysv/linux/alpha/sysdep.h index f8c9e589ec..218e207e50 100644 --- a/sysdeps/unix/sysv/linux/alpha/sysdep.h +++ b/sysdeps/unix/sysv/linux/alpha/sysdep.h @@ -19,14 +19,10 @@ #ifndef _LINUX_ALPHA_SYSDEP_H #define _LINUX_ALPHA_SYSDEP_H 1 -#ifdef __ASSEMBLER__ -#include <asm/pal.h> -#include <alpha/regdef.h> -#endif - /* There is some commonality. */ #include <sysdeps/unix/sysv/linux/sysdep.h> -#include <sysdeps/unix/alpha/sysdep.h> +#include <sysdeps/unix/sysdep.h> +#include <dl-sysdep.h> /* Defines RTLD_PRIVATE_ERRNO. */ #include <tls.h> @@ -39,4 +35,348 @@ #define SINGLE_THREAD_BY_GLOBAL 1 -#endif /* _LINUX_ALPHA_SYSDEP_H */ +#ifdef __ASSEMBLER__ +#include <asm/pal.h> +#include <alpha/regdef.h> + +#define __LABEL(x) x##: + +#define LEAF(name, framesize) \ + .globl name; \ + .align 4; \ + .ent name, 0; \ + __LABEL(name) \ + .frame sp, framesize, ra + +#define ENTRY(name) \ + .globl name; \ + .align 4; \ + .ent name, 0; \ + __LABEL(name) \ + .frame sp, 0, ra + +/* Mark the end of function SYM. */ +#undef END +#define END(sym) .end sym + +#ifdef PROF +# define PSEUDO_PROF \ + .set noat; \ + lda AT, _mcount; \ + jsr AT, (AT), _mcount; \ + .set at +#else +# define PSEUDO_PROF +#endif + +#ifdef PROF +# define PSEUDO_PROLOGUE \ + .frame sp, 0, ra; \ + ldgp gp,0(pv); \ + PSEUDO_PROF; \ + .prologue 1 +#elif defined PIC +# define PSEUDO_PROLOGUE \ + .frame sp, 0, ra; \ + .prologue 0 +#else +# define PSEUDO_PROLOGUE \ + .frame sp, 0, ra; \ + ldgp gp,0(pv); \ + .prologue 1 +#endif /* PROF */ + +#ifdef PROF +# define USEPV_PROF std +#else +# define USEPV_PROF no +#endif + +#if RTLD_PRIVATE_ERRNO +# define SYSCALL_ERROR_LABEL $syscall_error +# define SYSCALL_ERROR_HANDLER \ +$syscall_error: \ + stl v0, rtld_errno(gp) !gprel; \ + lda v0, -1; \ + ret +# define SYSCALL_ERROR_FALLTHRU +#elif defined(PIC) +# define SYSCALL_ERROR_LABEL __syscall_error !samegp +# define SYSCALL_ERROR_HANDLER +# define SYSCALL_ERROR_FALLTHRU br SYSCALL_ERROR_LABEL +#else +# define SYSCALL_ERROR_LABEL $syscall_error +# define SYSCALL_ERROR_HANDLER \ +$syscall_error: \ + jmp $31, __syscall_error +# define SYSCALL_ERROR_FALLTHRU +#endif /* RTLD_PRIVATE_ERRNO */ + +/* Overridden by specific syscalls. */ +#undef PSEUDO_PREPARE_ARGS +#define PSEUDO_PREPARE_ARGS /* Nothing. */ + +#define PSEUDO(name, syscall_name, args) \ + .globl name; \ + .align 4; \ + .ent name,0; \ +__LABEL(name) \ + PSEUDO_PROLOGUE; \ + PSEUDO_PREPARE_ARGS \ + lda v0, SYS_ify(syscall_name); \ + call_pal PAL_callsys; \ + bne a3, SYSCALL_ERROR_LABEL + +#undef PSEUDO_END +#define PSEUDO_END(sym) \ + SYSCALL_ERROR_HANDLER; \ + END(sym) + +#define PSEUDO_NOERRNO(name, syscall_name, args) \ + .globl name; \ + .align 4; \ + .ent name,0; \ +__LABEL(name) \ + PSEUDO_PROLOGUE; \ + PSEUDO_PREPARE_ARGS \ + lda v0, SYS_ify(syscall_name); \ + call_pal PAL_callsys; + +#undef PSEUDO_END_NOERRNO +#define PSEUDO_END_NOERRNO(sym) END(sym) + +#define ret_NOERRNO ret + +#define PSEUDO_ERRVAL(name, syscall_name, args) \ + .globl name; \ + .align 4; \ + .ent name,0; \ +__LABEL(name) \ + PSEUDO_PROLOGUE; \ + PSEUDO_PREPARE_ARGS \ + lda v0, SYS_ify(syscall_name); \ + call_pal PAL_callsys; + +#undef PSEUDO_END_ERRVAL +#define PSEUDO_END_ERRVAL(sym) END(sym) + +#define ret_ERRVAL ret + +#define r0 v0 +#define r1 a4 + +#define MOVE(x,y) mov x,y + +#else /* !ASSEMBLER */ + +/* In order to get __set_errno() definition in INLINE_SYSCALL. */ +#include <errno.h> + +#undef INLINE_SYSCALL +#define INLINE_SYSCALL(name, nr, args...) \ +({ \ + INTERNAL_SYSCALL_DECL (_sc_err); \ + long int _sc_ret = INTERNAL_SYSCALL (name, sc_err, nr, args); \ + if (INTERNAL_SYSCALL_ERROR_P (_sc_ret, _sc_err)) \ + { \ + __set_errno (INTERNAL_SYSCALL_ERRNO (_sc_ret, _sc_err)); \ + _sc_ret = -1L; \ + } \ + _sc_ret; \ +}) + +#define INTERNAL_SYSCALL(name, err_out, nr, args...) \ + internal_syscall##nr(__NR_##name, args) + +#define INTERNAL_SYSCALL_NCS(name, err_out, nr, args...) \ + internal_syscall##nr(name, args) + +#define INTERNAL_SYSCALL_DECL(err) do { } while (0) + +/* The normal Alpha calling convention sign-extends 32-bit quantties + no matter what the "real" sign of the 32-bit type. We want to + preserve that when filling in values for the kernel. */ +#define syscall_promote(arg) \ + (sizeof (arg) == 4 ? (long int)(int)(long int)(arg) : (long int)(arg)) + +/* Make sure and "use" the variable that we're not returning, + in order to suppress unused variable warnings. */ +#define INTERNAL_SYSCALL_ERROR_P(val, err) \ + ((unsigned long) (val) > -4096UL) +#define INTERNAL_SYSCALL_ERRNO(val, err) (-(val)) + +#define internal_syscall_clobbers \ + "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", \ + "$22", "$23", "$24", "$25", "$27", "$28", "memory" + +/* It is moderately important optimization-wise to limit the lifetime + of the hard-register variables as much as possible. Thus we copy + in/out as close to the asm as possible. */ + +#define internal_syscall0(name, args...) \ +({ \ + register long int _sc_19 __asm__("$19"); \ + register long int _sc_0 = name; \ + __asm__ __volatile__ \ + ("callsys # %0 %1 <= %2" \ + : "+v"(_sc_0), "=r"(_sc_19) \ + : : internal_syscall_clobbers, \ + "$16", "$17", "$18", "$20", "$21"); \ + _sc_19 != 0 ? -_sc_0 : _sc_0; \ +}) + +#define internal_syscall1(name,arg1) \ +({ \ + register long int _tmp_16 = syscall_promote (arg1); \ + register long int _sc_0 = name; \ + register long int _sc_16 __asm__("$16") = _tmp_16; \ + register long int _sc_19 __asm__("$19"); \ + __asm__ __volatile__ \ + ("callsys # %0 %1 <= %2 %3" \ + : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16) \ + : : internal_syscall_clobbers, \ + "$17", "$18", "$20", "$21"); \ + _sc_19 != 0 ? -_sc_0 : _sc_0; \ +}) + +#define internal_syscall2(name,arg1,arg2) \ +({ \ + register long int _tmp_16 = syscall_promote (arg1); \ + register long int _tmp_17 = syscall_promote (arg2); \ + register long int _sc_0 = name; \ + register long int _sc_16 __asm__("$16") = _tmp_16; \ + register long int _sc_17 __asm__("$17") = _tmp_17; \ + register long int _sc_19 __asm__("$19"); \ + __asm__ __volatile__ \ + ("callsys # %0 %1 <= %2 %3 %4" \ + : "+v"(_sc_0), "=r"(_sc_19), \ + "+r"(_sc_16), "+r"(_sc_17) \ + : : internal_syscall_clobbers, \ + "$18", "$20", "$21"); \ + _sc_19 != 0 ? -_sc_0 : _sc_0; \ +}) + +#define internal_syscall3(name,arg1,arg2,arg3) \ +({ \ + register long int _tmp_16 = syscall_promote (arg1); \ + register long int _tmp_17 = syscall_promote (arg2); \ + register long int _tmp_18 = syscall_promote (arg3); \ + register long int _sc_0 = name; \ + register long int _sc_16 __asm__("$16") = _tmp_16; \ + register long int _sc_17 __asm__("$17") = _tmp_17; \ + register long int _sc_18 __asm__("$18") = _tmp_18; \ + register long int _sc_19 __asm__("$19"); \ + __asm__ __volatile__ \ + ("callsys # %0 %1 <= %2 %3 %4 %5" \ + : "+v"(_sc_0), "=r"(_sc_19), "+r"(_sc_16), \ + "+r"(_sc_17), "+r"(_sc_18) \ + : : internal_syscall_clobbers, "$20", "$21"); \ + _sc_19 != 0 ? -_sc_0 : _sc_0; \ +}) + +#define internal_syscall4(name,arg1,arg2,arg3,arg4) \ +({ \ + register long int _tmp_16 = syscall_promote (arg1); \ + register long int _tmp_17 = syscall_promote (arg2); \ + register long int _tmp_18 = syscall_promote (arg3); \ + register long int _tmp_19 = syscall_promote (arg4); \ + register long int _sc_0 = name; \ + register long int _sc_16 __asm__("$16") = _tmp_16; \ + register long int _sc_17 __asm__("$17") = _tmp_17; \ + register long int _sc_18 __asm__("$18") = _tmp_18; \ + register long int _sc_19 __asm__("$19") = _tmp_19; \ + __asm__ __volatile__ \ + ("callsys # %0 %1 <= %2 %3 %4 %5 %6" \ + : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \ + "+r"(_sc_17), "+r"(_sc_18) \ + : : internal_syscall_clobbers, "$20", "$21"); \ + _sc_19 != 0 ? -_sc_0 : _sc_0; \ +}) + +#define internal_syscall5(name,arg1,arg2,arg3,arg4,arg5) \ +({ \ + register long int _tmp_16 = syscall_promote (arg1); \ + register long int _tmp_17 = syscall_promote (arg2); \ + register long int _tmp_18 = syscall_promote (arg3); \ + register long int _tmp_19 = syscall_promote (arg4); \ + register long int _tmp_20 = syscall_promote (arg5); \ + register long int _sc_0 = name; \ + register long int _sc_16 __asm__("$16") = _tmp_16; \ + register long int _sc_17 __asm__("$17") = _tmp_17; \ + register long int _sc_18 __asm__("$18") = _tmp_18; \ + register long int _sc_19 __asm__("$19") = _tmp_19; \ + register long int _sc_20 __asm__("$20") = _tmp_20; \ + __asm__ __volatile__ \ + ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7" \ + : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \ + "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20) \ + : : internal_syscall_clobbers, "$21"); \ + _sc_19 != 0 ? -_sc_0 : _sc_0; \ +}) + +#define internal_syscall6(name,arg1,arg2,arg3,arg4,arg5,arg6) \ +({ \ + register long int _tmp_16 = syscall_promote (arg1); \ + register long int _tmp_17 = syscall_promote (arg2); \ + register long int _tmp_18 = syscall_promote (arg3); \ + register long int _tmp_19 = syscall_promote (arg4); \ + register long int _tmp_20 = syscall_promote (arg5); \ + register long int _tmp_21 = syscall_promote (arg6); \ + register long int _sc_0 = name; \ + register long int _sc_16 __asm__("$16") = _tmp_16; \ + register long int _sc_17 __asm__("$17") = _tmp_17; \ + register long int _sc_18 __asm__("$18") = _tmp_18; \ + register long int _sc_19 __asm__("$19") = _tmp_19; \ + register long int _sc_20 __asm__("$20") = _tmp_20; \ + register long int _sc_21 __asm__("$21") = _tmp_21; \ + __asm__ __volatile__ \ + ("callsys # %0 %1 <= %2 %3 %4 %5 %6 %7 %8" \ + : "+v"(_sc_0), "+r"(_sc_19), "+r"(_sc_16), \ + "+r"(_sc_17), "+r"(_sc_18), "+r"(_sc_20), \ + "+r"(_sc_21) \ + : : internal_syscall_clobbers); \ + _sc_19 != 0 ? -_sc_0 : _sc_0; \ +}) +#endif /* ASSEMBLER */ + +/* Pointer mangling support. Note that tls access is slow enough that + we don't deoptimize things by placing the pointer check value there. */ + +#ifdef __ASSEMBLER__ +# if IS_IN (rtld) +# define PTR_MANGLE(dst, src, tmp) \ + ldah tmp, __pointer_chk_guard_local($29) !gprelhigh; \ + ldq tmp, __pointer_chk_guard_local(tmp) !gprellow; \ + xor src, tmp, dst +# define PTR_MANGLE2(dst, src, tmp) \ + xor src, tmp, dst +# elif defined SHARED +# define PTR_MANGLE(dst, src, tmp) \ + ldq tmp, __pointer_chk_guard; \ + xor src, tmp, dst +# else +# define PTR_MANGLE(dst, src, tmp) \ + ldq tmp, __pointer_chk_guard_local; \ + xor src, tmp, dst +# endif +# define PTR_MANGLE2(dst, src, tmp) \ + xor src, tmp, dst +# define PTR_DEMANGLE(dst, tmp) PTR_MANGLE(dst, dst, tmp) +# define PTR_DEMANGLE2(dst, tmp) PTR_MANGLE2(dst, dst, tmp) +#else +# include <stdint.h> +# if (IS_IN (rtld) \ + || (!defined SHARED && (IS_IN (libc) \ + || IS_IN (libpthread)))) +extern uintptr_t __pointer_chk_guard_local attribute_relro attribute_hidden; +# define PTR_MANGLE(var) \ + (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard_local) +# else +extern uintptr_t __pointer_chk_guard attribute_relro; +# define PTR_MANGLE(var) \ + (var) = (__typeof(var)) ((uintptr_t) (var) ^ __pointer_chk_guard) +# endif +# define PTR_DEMANGLE(var) PTR_MANGLE(var) +#endif /* ASSEMBLER */ + +#endif /* _LINUX_ALPHA_SYSDEP_H */ |