diff options
author | Zack Weinberg <zackw@panix.com> | 2017-06-08 15:39:03 -0400 |
---|---|---|
committer | Zack Weinberg <zackw@panix.com> | 2017-06-08 15:39:03 -0400 |
commit | 5046dbb4a7eba5eccfd258f92f4735c9ffc8d069 (patch) | |
tree | 4470480d904b65cf14ca524f96f79eca818c3eaf /REORG.TODO/sysdeps/arm | |
parent | 199fc19d3aaaf57944ef036e15904febe877fc93 (diff) | |
download | glibc-zack/build-layout-experiment.tar glibc-zack/build-layout-experiment.tar.gz glibc-zack/build-layout-experiment.tar.bz2 glibc-zack/build-layout-experiment.zip |
Prepare for radical source tree reorganization.zack/build-layout-experiment
All top-level files and directories are moved into a temporary storage
directory, REORG.TODO, except for files that will certainly still
exist in their current form at top level when we're done (COPYING,
COPYING.LIB, LICENSES, NEWS, README), all old ChangeLog files (which
are moved to the new directory OldChangeLogs, instead), and the
generated file INSTALL (which is just deleted; in the new order, there
will be no generated files checked into version control).
Diffstat (limited to 'REORG.TODO/sysdeps/arm')
149 files changed, 12421 insertions, 0 deletions
diff --git a/REORG.TODO/sysdeps/arm/Implies b/REORG.TODO/sysdeps/arm/Implies new file mode 100644 index 0000000000..780c4e2467 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/Implies @@ -0,0 +1,3 @@ +wordsize-32 +ieee754/flt-32 +ieee754/dbl-64 diff --git a/REORG.TODO/sysdeps/arm/Makefile b/REORG.TODO/sysdeps/arm/Makefile new file mode 100644 index 0000000000..2849aeda42 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/Makefile @@ -0,0 +1,79 @@ +gnulib-arch = $(elf-objpfx)libgcc-stubs.a +static-gnulib-arch = $(elf-objpfx)libgcc-stubs.a + +# All supported build tools support TLS descriptors, but the OS may not. +ifndef have-arm-tls-desc +have-arm-tls-desc = yes +endif + +ifeq ($(subdir),elf) +sysdep-dl-routines += tlsdesc dl-tlsdesc +sysdep_routines += aeabi_unwind_cpp_pr1 find_exidx +sysdep-rtld-routines += aeabi_unwind_cpp_pr1 +shared-only-routines += aeabi_unwind_cpp_pr1 + +$(objpfx)libgcc-stubs.a: $(objpfx)aeabi_unwind_cpp_pr1.os + $(build-extra-lib) + +lib-noranlib: $(objpfx)libgcc-stubs.a + +ifeq ($(build-shared),yes) +ifeq ($(have-arm-tls-desc),yes) +tests += tst-armtlsdescloc tst-armtlsdescextnow tst-armtlsdescextlazy +modules-names += tst-armtlsdesclocmod +modules-names += tst-armtlsdescextlazymod tst-armtlsdescextnowmod +CPPFLAGS-tst-armtlsdescextnowmod.c += -Dstatic= +CPPFLAGS-tst-armtlsdescextlazymod.c += -Dstatic= +CFLAGS-tst-armtlsdesclocmod.c += -mtls-dialect=gnu2 +CFLAGS-tst-armtlsdescextnowmod.c += -mtls-dialect=gnu2 +CFLAGS-tst-armtlsdescextlazymod.c += -mtls-dialect=gnu2 +LDFLAGS-tst-armtlsdescextnowmod.so += -Wl,-z,now +tst-armtlsdescloc-ENV = LD_BIND_NOW=1 +tst-armtlsdescextnow-ENV = LD_BIND_NOW=1 +tst-armtlsdescextlazy-ENV = LD_BIND_NOW=1 +$(objpfx)tst-armtlsdescloc: $(objpfx)tst-armtlsdesclocmod.so +$(objpfx)tst-armtlsdescextnow: $(objpfx)tst-armtlsdescextnowmod.so +$(objpfx)tst-armtlsdescextlazy: $(objpfx)tst-armtlsdescextlazymod.so +endif +endif +endif + +ifeq ($(subdir),csu) +# get offset to rtld_global._dl_hwcap +gen-as-const-headers += rtld-global-offsets.sym tlsdesc.sym +aeabi_constants = aeabi_lcsts aeabi_sighandlers aeabi_math +aeabi_routines = aeabi_assert aeabi_localeconv aeabi_errno_addr \ + aeabi_mb_cur_max aeabi_atexit aeabi_memclr aeabi_memcpy \ + aeabi_memmove aeabi_memset \ + aeabi_read_tp libc-aeabi_read_tp + +sysdep_routines += $(aeabi_constants) $(aeabi_routines) +static-only-routines += $(aeabi_constants) aeabi_read_tp +shared-only-routines += libc-aeabi_read_tp + +# In order for unwinding to fail when it falls out of main, we need a +# cantunwind marker. There's one in start.S. To make sure we reach it, add +# unwind tables for __libc_start_main. +CFLAGS-libc-start.c += -fexceptions + +sysdep_routines += arm-unwind-resume +shared-only-routines += arm-unwind-resume +endif + +ifeq ($(subdir),gmon) +sysdep_routines += arm-mcount +endif + +ifeq ($(subdir),debug) +CFLAGS-backtrace.c += -funwind-tables +endif + +ifeq ($(subdir),rt) +librt-sysdep_routines += rt-aeabi_unwind_cpp_pr1 rt-arm-unwind-resume +librt-shared-only-routines += rt-aeabi_unwind_cpp_pr1 rt-arm-unwind-resume +endif + +ifeq ($(subdir),nptl) +libpthread-sysdep_routines += pt-arm-unwind-resume +libpthread-shared-only-routines += pt-arm-unwind-resume +endif diff --git a/REORG.TODO/sysdeps/arm/Versions b/REORG.TODO/sysdeps/arm/Versions new file mode 100644 index 0000000000..f26a1a7606 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/Versions @@ -0,0 +1,24 @@ +libc { + GLIBC_2.4 { + # ARM EABI compatibility routines + __aeabi_assert; + __aeabi_atexit; + __aeabi_errno_addr; + __aeabi_localeconv; + __aeabi_MB_CUR_MAX; + __aeabi_memclr; __aeabi_memclr4; __aeabi_memclr8; + __aeabi_memcpy; __aeabi_memcpy4; __aeabi_memcpy8; + __aeabi_memmove; __aeabi_memmove4; __aeabi_memmove8; + __aeabi_memset; __aeabi_memset4; __aeabi_memset8; + + # Helper routines + __gnu_Unwind_Find_exidx; + } + GLIBC_2.8 { + __gnu_mcount_nc; + } + GLIBC_2.19 { + # This set has to exist in some Versions file so we can use 2.19 in + # SHLIB_COMPAT. Since it didn't exist anywhere else, we add it here. + } +} diff --git a/REORG.TODO/sysdeps/arm/__longjmp.S b/REORG.TODO/sysdeps/arm/__longjmp.S new file mode 100644 index 0000000000..5202c728bc --- /dev/null +++ b/REORG.TODO/sysdeps/arm/__longjmp.S @@ -0,0 +1,121 @@ +/* longjmp for ARM. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include <stap-probe.h> +#include <bits/setjmp.h> +#include <rtld-global-offsets.h> +#include <arm-features.h> + +/* __longjmp(jmpbuf, val) */ + +ENTRY (__longjmp) + mov ip, r0 + +#ifdef CHECK_SP + ldr r4, [ip] /* jmpbuf's sp */ + cfi_undefined (r4) +#ifdef PTR_DEMANGLE + PTR_DEMANGLE (r4, r4, a3, a4) +#endif + CHECK_SP (r4) +#endif + +#ifdef PTR_DEMANGLE + ldr a4, [ip], #4 + PTR_DEMANGLE (a4, a4, a3, r4) + cfi_undefined (r4) + ldr r4, [ip], #4 + PTR_DEMANGLE2 (r4, r4, a3) +#else + ldr a4, [ip], #4 + ldr r4, [ip], #4 + cfi_undefined (r4) +#endif + /* longjmp probe expects longjmp first argument (4@r0), second + argument (-4@r1), and target address (4@r4), respectively. */ + LIBC_PROBE (longjmp, 3, 4@r0, -4@r1, 4@r4) + mov sp, a4 + mov lr, r4 + ldmia ip!, JMP_BUF_REGLIST + cfi_restore (v1) + cfi_restore (v2) + cfi_restore (v3) + cfi_restore (v4) + cfi_restore (v5) + cfi_restore (v6) + cfi_restore (sl) + cfi_restore (fp) + cfi_restore (sp) + cfi_restore (lr) + +#if !defined ARM_ASSUME_NO_IWMMXT || defined __SOFTFP__ +# define NEED_HWCAP 1 +#endif + +#ifdef NEED_HWCAP +# if IS_IN (rtld) + LDST_PCREL (ldr, a4, a3, \ + C_SYMBOL_NAME(_rtld_local_ro) \ + + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET) +# else +# ifdef SHARED + LDR_GLOBAL (a4, a3, C_SYMBOL_NAME(_rtld_global_ro), \ + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET) +# else + LDR_GLOBAL (a4, a3, C_SYMBOL_NAME(_dl_hwcap), 0) +# endif +# endif +#endif + +#ifdef __SOFTFP__ + tst a4, #HWCAP_ARM_VFP + beq .Lno_vfp +#endif + + /* Restore the VFP registers. */ + /* Following instruction is vldmia ip!, {d8-d15}. */ + ldc p11, cr8, [r12], #64 +.Lno_vfp: + +#ifndef ARM_ASSUME_NO_IWMMXT + tst a4, #HWCAP_ARM_IWMMXT + beq .Lno_iwmmxt + + /* Restore the call-preserved iWMMXt registers. */ + /* Following instructions are wldrd wr10, [ip], #8 (etc.) */ + ldcl p1, cr10, [r12], #8 + ldcl p1, cr11, [r12], #8 + ldcl p1, cr12, [r12], #8 + ldcl p1, cr13, [r12], #8 + ldcl p1, cr14, [r12], #8 + ldcl p1, cr15, [r12], #8 +.Lno_iwmmxt: +#endif + + /* longjmp_target probe expects longjmp first argument (4@r0), second + argument (-4@r1), and target address (4@r14), respectively. */ + LIBC_PROBE (longjmp_target, 3, 4@r0, -4@r1, 4@r14) + + movs r0, r1 /* get the return value in place */ + it eq + moveq r0, #1 /* can't let setjmp() return zero! */ + + DO_RET(lr) + +END (__longjmp) diff --git a/REORG.TODO/sysdeps/arm/abi-note.S b/REORG.TODO/sysdeps/arm/abi-note.S new file mode 100644 index 0000000000..07bd4c4619 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/abi-note.S @@ -0,0 +1,8 @@ +/* Tag_ABI_align8_preserved: This code preserves 8-byte + alignment in any callee. */ + .eabi_attribute 25, 1 +/* Tag_ABI_align8_needed: This code may require 8-byte alignment from + the caller. */ + .eabi_attribute 24, 1 + +#include <csu/abi-note.S> diff --git a/REORG.TODO/sysdeps/arm/add_n.S b/REORG.TODO/sysdeps/arm/add_n.S new file mode 100644 index 0000000000..811a769959 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/add_n.S @@ -0,0 +1,90 @@ +/* mpn_add_n -- add (or subtract) bignums. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include <arm-features.h> + + .syntax unified + .text + +#ifdef USE_AS_SUB_N +# define INITC cmp r0, r0 +# define OPC sbcs +# define RETC sbc r0, r0, r0; neg r0, r0 +# define FUNC __mpn_sub_n +#else +# define INITC cmn r0, #0 +# define OPC adcs +# define RETC mov r0, #0; adc r0, r0, r0 +# define FUNC __mpn_add_n +#endif + +/* mp_limb_t mpn_add_n(res_ptr, src1_ptr, src2_ptr, size) */ + +ENTRY (FUNC) + push { r4, r5, r6, r7, r8, r10, lr } + cfi_adjust_cfa_offset (28) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_rel_offset (r6, 8) + cfi_rel_offset (r7, 12) + cfi_rel_offset (r8, 16) + cfi_rel_offset (r10, 20) + cfi_rel_offset (lr, 24) + + INITC /* initialize carry flag */ + tst r3, #1 /* count & 1 == 1? */ + add lr, r1, r3, lsl #2 /* compute end src1 */ + beq 1f + + ldr r4, [r1], #4 /* do one to make count even */ + ldr r5, [r2], #4 + OPC r4, r4, r5 + teq r1, lr /* end of count? (preserve carry) */ + str r4, [r0], #4 + beq 9f +1: + tst r3, #2 /* count & 2 == 2? */ + beq 2f + ldm r1!, { r4, r5 } /* do two to make count 0 mod 4 */ + ldm r2!, { r6, r7 } + OPC r4, r4, r6 + OPC r5, r5, r7 + teq r1, lr /* end of count? */ + stm r0!, { r4, r5 } + beq 9f +2: + ldm r1!, { r3, r5, r7, r10 } /* do four each loop */ + ldm r2!, { r4, r6, r8, ip } + OPC r3, r3, r4 + OPC r5, r5, r6 + OPC r7, r7, r8 + OPC r10, r10, ip + teq r1, lr + stm r0!, { r3, r5, r7, r10 } + bne 2b + +9: + RETC /* copy carry out */ +#ifndef ARM_ALWAYS_BX + pop { r4, r5, r6, r7, r8, r10, pc } +#else + pop { r4, r5, r6, r7, r8, r10, lr } + bx lr +#endif +END (FUNC) diff --git a/REORG.TODO/sysdeps/arm/addmul_1.S b/REORG.TODO/sysdeps/arm/addmul_1.S new file mode 100644 index 0000000000..c4e54f8723 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/addmul_1.S @@ -0,0 +1,67 @@ +/* mpn_addmul_1 -- multiply and accumulate bignums. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +@ cycles/limb +@ StrongArm ? +@ Cortex-A8 ? +@ Cortex-A9 ? +@ Cortex-A15 4 + +/* mp_limb_t mpn_addmul_1(res_ptr, src1_ptr, size, s2_limb) */ + +ENTRY (__mpn_addmul_1) + push { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_rel_offset (r6, 8) + cfi_rel_offset (r7, 12) + + ldr r6, [r1], #4 + ldr r5, [r0] + mov r4, #0 /* init carry in */ + b 1f +0: + ldr r6, [r1], #4 /* load next ul */ + adds r7, r4, r5 /* (out, c) = cl + lpl */ + ldr r5, [r0, #4] /* load next rl */ + adc r4, ip, #0 /* cl = hpl + c */ + str r7, [r0], #4 +1: + mov ip, #0 /* zero-extend rl */ + umlal r5, ip, r6, r3 /* (hpl, lpl) = ul * vl + rl */ + subs r2, r2, #1 + bne 0b + + adds r4, r4, r5 /* (out, c) = cl + llpl */ + str r4, [r0] + adc r0, ip, #0 /* return hpl + c */ + + pop { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (-16) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + DO_RET (lr) +END (__mpn_addmul_1) diff --git a/REORG.TODO/sysdeps/arm/aeabi_assert.c b/REORG.TODO/sysdeps/arm/aeabi_assert.c new file mode 100644 index 0000000000..1cb710da8f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_assert.c @@ -0,0 +1,26 @@ +/* Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <assert.h> +#include <stdlib.h> + +void +__aeabi_assert (const char *assertion, const char *file, + unsigned int line) +{ + __assert_fail (assertion, file, line, NULL); +} diff --git a/REORG.TODO/sysdeps/arm/aeabi_atexit.c b/REORG.TODO/sysdeps/arm/aeabi_atexit.c new file mode 100644 index 0000000000..28fcee2d66 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_atexit.c @@ -0,0 +1,27 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <stdlib.h> + +/* Register a function to be called by exit or when a shared library + is unloaded. This routine is like __cxa_atexit, but uses the + calling sequence required by the ARM EABI. */ +int +__aeabi_atexit (void *arg, void (*func) (void *), void *d) +{ + return __cxa_atexit (func, arg, d); +} diff --git a/REORG.TODO/sysdeps/arm/aeabi_errno_addr.c b/REORG.TODO/sysdeps/arm/aeabi_errno_addr.c new file mode 100644 index 0000000000..6d3a1cd233 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_errno_addr.c @@ -0,0 +1,24 @@ +/* Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <errno.h> + +volatile int * +__aeabi_errno_addr (void) +{ + return &errno; +} diff --git a/REORG.TODO/sysdeps/arm/aeabi_lcsts.c b/REORG.TODO/sysdeps/arm/aeabi_lcsts.c new file mode 100644 index 0000000000..971ed26cdc --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_lcsts.c @@ -0,0 +1,98 @@ +/* Link-time constants for ARM EABI. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* The ARM EABI requires that we provide ISO compile-time constants as + link-time constants. Some portable applications may reference these. */ + +#include <errno.h> +#include <limits.h> +#include <locale.h> +#include <setjmp.h> +#include <signal.h> +#include <stdio.h> +#include <time.h> + +#define eabi_constant2(X,Y) const int __aeabi_##X attribute_hidden = Y +#define eabi_constant(X) const int __aeabi_##X attribute_hidden = X + +eabi_constant (EDOM); +eabi_constant (ERANGE); +eabi_constant (EILSEQ); + +eabi_constant (MB_LEN_MAX); + +eabi_constant (LC_COLLATE); +eabi_constant (LC_CTYPE); +eabi_constant (LC_MONETARY); +eabi_constant (LC_NUMERIC); +eabi_constant (LC_TIME); +eabi_constant (LC_ALL); + +/* The value of __aeabi_JMP_BUF_SIZE is the number of doublewords in a + jmp_buf. */ +eabi_constant2 (JMP_BUF_SIZE, sizeof (jmp_buf) / 8); + +eabi_constant (SIGABRT); +eabi_constant (SIGFPE); +eabi_constant (SIGILL); +eabi_constant (SIGINT); +eabi_constant (SIGSEGV); +eabi_constant (SIGTERM); + +eabi_constant2 (IOFBF, _IOFBF); +eabi_constant2 (IOLBF, _IOLBF); +eabi_constant2 (IONBF, _IONBF); +eabi_constant (BUFSIZ); +eabi_constant (FOPEN_MAX); +eabi_constant (TMP_MAX); +eabi_constant (FILENAME_MAX); +eabi_constant (L_tmpnam); + +FILE *__aeabi_stdin attribute_hidden; +FILE *__aeabi_stdout attribute_hidden; +FILE *__aeabi_stderr attribute_hidden; + +static void __attribute__ ((used)) +setup_aeabi_stdio (void) +{ + __aeabi_stdin = stdin; + __aeabi_stdout = stdout; + __aeabi_stderr = stderr; +} + +static void (*fp) (void) __attribute__ ((used, section (".preinit_array"))) + = setup_aeabi_stdio; + +eabi_constant (CLOCKS_PER_SEC); diff --git a/REORG.TODO/sysdeps/arm/aeabi_localeconv.c b/REORG.TODO/sysdeps/arm/aeabi_localeconv.c new file mode 100644 index 0000000000..4c410ab35c --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_localeconv.c @@ -0,0 +1,24 @@ +/* Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <locale.h> + +struct lconv * +__aeabi_localeconv (void) +{ + return __localeconv (); +} diff --git a/REORG.TODO/sysdeps/arm/aeabi_math.c b/REORG.TODO/sysdeps/arm/aeabi_math.c new file mode 100644 index 0000000000..f516672057 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_math.c @@ -0,0 +1,41 @@ +/* Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <math.h> + +const double __aeabi_HUGE_VAL attribute_hidden = HUGE_VAL; +const long double __aeabi_HUGE_VALL attribute_hidden = HUGE_VALL; +const float __aeabi_HUGE_VALF attribute_hidden = HUGE_VALF; +const float __aeabi_INFINITY attribute_hidden = INFINITY; +const float __aeabi_NAN attribute_hidden = NAN; diff --git a/REORG.TODO/sysdeps/arm/aeabi_mb_cur_max.c b/REORG.TODO/sysdeps/arm/aeabi_mb_cur_max.c new file mode 100644 index 0000000000..dab1d6d974 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_mb_cur_max.c @@ -0,0 +1,27 @@ +/* Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <langinfo.h> +#include <locale.h> +#include <stdlib.h> +#include <locale/localeinfo.h> + +int +__aeabi_MB_CUR_MAX (void) +{ + return MB_CUR_MAX; +} diff --git a/REORG.TODO/sysdeps/arm/aeabi_memclr.c b/REORG.TODO/sysdeps/arm/aeabi_memclr.c new file mode 100644 index 0000000000..6687e49c9e --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_memclr.c @@ -0,0 +1,30 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> + +/* Clear memory. Can't alias to bzero because it's not defined in the + same translation unit. */ +void +__aeabi_memclr (void *dest, size_t n) +{ + __bzero (dest, n); +} + +/* Versions of the above which may assume memory alignment. */ +strong_alias (__aeabi_memclr, __aeabi_memclr4) +strong_alias (__aeabi_memclr, __aeabi_memclr8) diff --git a/REORG.TODO/sysdeps/arm/aeabi_memcpy.c b/REORG.TODO/sysdeps/arm/aeabi_memcpy.c new file mode 100644 index 0000000000..0820084f00 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_memcpy.c @@ -0,0 +1,31 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> + +/* Copy memory like memcpy, but no return value required. Can't alias + to memcpy because it's not defined in the same translation + unit. */ +void +__aeabi_memcpy (void *dest, const void *src, size_t n) +{ + memcpy (dest, src, n); +} + +/* Versions of the above which may assume memory alignment. */ +strong_alias (__aeabi_memcpy, __aeabi_memcpy4) +strong_alias (__aeabi_memcpy, __aeabi_memcpy8) diff --git a/REORG.TODO/sysdeps/arm/aeabi_memmove.c b/REORG.TODO/sysdeps/arm/aeabi_memmove.c new file mode 100644 index 0000000000..4166f84612 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_memmove.c @@ -0,0 +1,31 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> + +/* Copy memory like memmove, but no return value required. Can't + alias to memmove because it's not defined in the same translation + unit. */ +void +__aeabi_memmove (void *dest, const void *src, size_t n) +{ + memmove (dest, src, n); +} + +/* Versions of the above which may assume memory alignment. */ +strong_alias (__aeabi_memmove, __aeabi_memmove4) +strong_alias (__aeabi_memmove, __aeabi_memmove8) diff --git a/REORG.TODO/sysdeps/arm/aeabi_memset.c b/REORG.TODO/sysdeps/arm/aeabi_memset.c new file mode 100644 index 0000000000..84977c2da0 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_memset.c @@ -0,0 +1,30 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <string.h> + +/* Set memory like memset, but different argument order and no return + value required. */ +void +__aeabi_memset (void *dest, size_t n, int c) +{ + memset (dest, c, n); +} + +/* Versions of the above which may assume memory alignment. */ +strong_alias (__aeabi_memset, __aeabi_memset4) +strong_alias (__aeabi_memset, __aeabi_memset8) diff --git a/REORG.TODO/sysdeps/arm/aeabi_sighandlers.S b/REORG.TODO/sysdeps/arm/aeabi_sighandlers.S new file mode 100644 index 0000000000..895edbd5a1 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_sighandlers.S @@ -0,0 +1,53 @@ +/* Link-time constants for ARM EABI - signal handlers. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* The ARM EABI defines these as "functions". */ + +#include <sysdep.h> + + .global __aeabi_SIG_DFL + .hidden __aeabi_SIG_DFL + .type __aeabi_SIG_DFL, %function + .set __aeabi_SIG_DFL, 0 + + .global __aeabi_SIG_IGN + .hidden __aeabi_SIG_IGN + .type __aeabi_SIG_IGN, %function + .set __aeabi_SIG_IGN, 1 + + .global __aeabi_SIG_ERR + .hidden __aeabi_SIG_ERR + .type __aeabi_SIG_ERR, %function + .set __aeabi_SIG_ERR, -1 diff --git a/REORG.TODO/sysdeps/arm/aeabi_unwind_cpp_pr1.c b/REORG.TODO/sysdeps/arm/aeabi_unwind_cpp_pr1.c new file mode 100644 index 0000000000..dcef8aa480 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/aeabi_unwind_cpp_pr1.c @@ -0,0 +1,51 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Because some objects in ld.so and libc.so are built with + -fexceptions, we end up with references to this personality + routine. However, these libraries are not linked against + libgcc_eh.a, so we need a dummy definition. This routine will + never actually be called. */ + +#include <stdlib.h> + +attribute_hidden +void +__aeabi_unwind_cpp_pr0 (void) +{ +#if !IS_IN (rtld) + abort (); +#endif +} + +attribute_hidden +void +__aeabi_unwind_cpp_pr1 (void) +{ +#if !IS_IN (rtld) + abort (); +#endif +} + +attribute_hidden +void +__aeabi_unwind_cpp_pr2 (void) +{ +#if !IS_IN (rtld) + abort (); +#endif +} diff --git a/REORG.TODO/sysdeps/arm/arm-features.h b/REORG.TODO/sysdeps/arm/arm-features.h new file mode 100644 index 0000000000..400af0f75a --- /dev/null +++ b/REORG.TODO/sysdeps/arm/arm-features.h @@ -0,0 +1,59 @@ +/* Macros to test for CPU features on ARM. Generic ARM version. + Copyright (C) 2012-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _ARM_ARM_FEATURES_H +#define _ARM_ARM_FEATURES_H 1 + +/* An OS-specific arm-features.h file should define ARM_HAVE_VFP to + an appropriate expression for testing at runtime whether the VFP + hardware is present. We'll then redefine it to a constant if we + know at compile time that we can assume VFP. */ + +#ifndef __SOFTFP__ +/* The compiler is generating VFP instructions, so we're already + assuming the hardware exists. */ +# undef ARM_HAVE_VFP +# define ARM_HAVE_VFP 1 +#endif + +/* An OS-specific arm-features.h file may define ARM_ASSUME_NO_IWMMXT + to indicate at compile time that iWMMXt hardware is never present + at runtime (or that we never care about its state) and so need not + be checked for. */ + +/* A more-specific arm-features.h file may define ARM_ALWAYS_BX to indicate + that instructions using pc as a destination register must never be used, + so a "bx" (or "blx") instruction is always required. */ + +/* The log2 of the minimum alignment required for an address that + is the target of a computed branch (i.e. a "bx" instruction). + A more-specific arm-features.h file may define this to set a more + stringent requirement. + + Using this only makes sense for code in ARM mode (where instructions + always have a fixed size of four bytes), or for Thumb-mode code that is + specifically aligning all the related branch targets to match (since + Thumb instructions might be either two or four bytes). */ +#ifndef ARM_BX_ALIGN_LOG2 +# define ARM_BX_ALIGN_LOG2 2 +#endif + +/* An OS-specific arm-features.h file may define ARM_NO_INDEX_REGISTER to + indicate that the two-register addressing modes must never be used. */ + +#endif /* arm-features.h */ diff --git a/REORG.TODO/sysdeps/arm/arm-mcount.S b/REORG.TODO/sysdeps/arm/arm-mcount.S new file mode 100644 index 0000000000..df2601b4a6 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/arm-mcount.S @@ -0,0 +1,128 @@ +/* Implementation of profiling support. ARM EABI version. + Copyright (C) 2008-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Don't call mcount when calling mcount... */ +#undef PROF + +#include <sysdep.h> + +#undef mcount + +#ifdef __thumb2__ + .thumb +#endif + .syntax unified + + +/* Use an assembly stub with a special ABI. The calling lr has been + pushed to the stack (which will be misaligned). We should preserve + all registers except ip and pop a word off the stack. + + NOTE: This assumes mcount_internal does not clobber any non-core + (coprocessor) registers. Currently this is true, but may require + additional attention in the future. + + The calling sequence looks something like: +func: + push {lr} + bl __gnu_mcount_nc + <function body> +*/ + +ENTRY(__gnu_mcount_nc) + push {r0, r1, r2, r3, lr} + cfi_adjust_cfa_offset (20) + cfi_rel_offset (r0, 0) + cfi_rel_offset (r1, 4) + cfi_rel_offset (r2, 8) + cfi_rel_offset (r3, 12) + cfi_rel_offset (lr, 16) + bic r1, lr, #1 + ldr r0, [sp, #20] + bl __mcount_internal + pop {r0, r1, r2, r3, ip, lr} + cfi_adjust_cfa_offset (-24) + cfi_restore (r0) + cfi_restore (r1) + cfi_restore (r2) + cfi_restore (r3) + cfi_register (lr, ip) + bx ip +END(__gnu_mcount_nc) + + +#include <gcc-compat.h> +#include <shlib-compat.h> + +/* The new __gnu_mcount_nc entry point was introduced in 4.4, so the + static library needs the old one only to support older compilers. + Even in a configuration that only cares about newer compilers, the + shared library might need it only for strict ABI compatibility. */ + +#if GCC_COMPAT (4, 3) || SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_19) + +/* Provide old mcount for backwards compatibility. This requires + code be compiled with APCS frame pointers. */ + +ENTRY(__mcount_arm_compat) + push {r0, r1, r2, r3, fp, lr} + cfi_adjust_cfa_offset (24) + cfi_rel_offset (r0, 0) + cfi_rel_offset (r1, 4) + cfi_rel_offset (r2, 8) + cfi_rel_offset (r3, 12) + cfi_rel_offset (fp, 16) + cfi_rel_offset (lr, 20) + movs r0, fp + ittt ne + ldrne r0, [r0, #-4] + movsne r1, lr + blne __mcount_internal +# if defined (__ARM_ARCH_4T__) && defined (__THUMB_INTERWORK__) + pop {r0, r1, r2, r3, fp, lr} + cfi_adjust_cfa_offset (-24) + cfi_restore (r0) + cfi_restore (r1) + cfi_restore (r2) + cfi_restore (r3) + cfi_restore (fp) + cfi_restore (lr) + bx lr +# else + pop {r0, r1, r2, r3, fp, pc} +# endif +END(__mcount_arm_compat) + +#endif + +#if GCC_COMPAT (4, 3) + +strong_alias (__mcount_arm_compat, _mcount) + +/* The canonical name for the function is `_mcount' in both C and asm, + but some old asm code might assume it's `mcount'. */ +weak_alias (_mcount, mcount) + +#elif SHLIB_COMPAT (libc, GLIBC_2_0, GLIBC_2_19) + +compat_symbol (libc, __mcount_arm_compat, _mcount, GLIBC_2_0) + +strong_alias (__mcount_arm_compat, __mcount_arm_compat_1) +compat_symbol (libc, __mcount_arm_compat_1, mcount, GLIBC_2_0) + +#endif diff --git a/REORG.TODO/sysdeps/arm/arm-unwind-resume.S b/REORG.TODO/sysdeps/arm/arm-unwind-resume.S new file mode 100644 index 0000000000..990ff960e3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/arm-unwind-resume.S @@ -0,0 +1,46 @@ +/* _Unwind_Resume wrapper for ARM EABI. + Copyright (C) 2015-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 2.1 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + +/* This is just implementing exactly what the C version does. + We do it in assembly just to ensure that we get an unmolested tail + call to the libgcc function, which is necessary for the ARM unwinder. */ + +ENTRY (_Unwind_Resume) + LDR_HIDDEN (ip, ip, __libgcc_s_resume, 0) + cmp ip, #0 + beq 1f +0: PTR_DEMANGLE (ip, ip, r2, r3) + bx ip + + /* We need to save and restore LR (for our own return address) + and R0 (for the argument to _Unwind_Resume) around the call. */ +1: push {r0, lr} + cfi_adjust_cfa_offset (8) + cfi_rel_offset (r0, 0) + cfi_rel_offset (lr, 4) + bl __libgcc_s_init + pop {r0, lr} + cfi_adjust_cfa_offset (-8) + cfi_restore (r0) + cfi_restore (lr) + + LDR_HIDDEN (ip, ip, __libgcc_s_resume, 0) + b 0b +END (_Unwind_Resume) diff --git a/REORG.TODO/sysdeps/arm/armv6/rawmemchr.S b/REORG.TODO/sysdeps/arm/armv6/rawmemchr.S new file mode 100644 index 0000000000..2511f105bd --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6/rawmemchr.S @@ -0,0 +1,105 @@ +/* rawmemchr -- find a byte within an unsized memory block. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +ENTRY (__rawmemchr) + @ r0 = start of string + @ r1 = character to match + @ returns a pointer to the match, which must be present. + ldrb r2, [r0] @ load first byte asap + + @ To cater to long strings, we want to search through a few + @ characters until we reach an aligned pointer. To cater to + @ small strings, we don't want to start doing word operations + @ immediately. The compromise is a maximum of 16 bytes less + @ whatever is required to end with an aligned pointer. + @ r3 = number of characters to search in alignment loop + and r3, r0, #7 + uxtb r1, r1 + rsb r3, r3, #15 @ 16 - 1 peeled loop iteration + cmp r2, r1 + it eq + bxeq lr + + @ Loop until we find ... +1: ldrb r2, [r0, #1]! + subs r3, r3, #1 @ ... the alignment point + it ne + cmpne r2, r1 @ ... or C + bne 1b + + @ Disambiguate the exit possibilites above + cmp r2, r1 @ Found C + it eq + bxeq lr + add r0, r0, #1 + + @ So now we're aligned. + ldrd r2, r3, [r0], #8 + orr r1, r1, r1, lsl #8 @ Replicate C to all bytes +#ifdef ARCH_HAS_T2 + movw ip, #0x0101 + pld [r0, #64] + movt ip, #0x0101 +#else + ldr ip, =0x01010101 + pld [r0, #64] +#endif + orr r1, r1, r1, lsl #16 + + @ Loop searching for C, 8 bytes at a time. + @ Subtracting (unsigned saturating) from 1 means result of 1 for + @ any byte that was originally zero and 0 otherwise. Therefore + @ we consider the lsb of each byte the "found" bit. +2: eor r2, r2, r1 @ Convert C bytes to 0 + eor r3, r3, r1 + uqsub8 r2, ip, r2 @ Find C + uqsub8 r3, ip, r3 + pld [r0, #128] + orrs r3, r3, r2 @ Test both words for found + it eq + ldrdeq r2, r3, [r0], #8 + beq 2b + + @ Found something. Disambiguate between first and second words. + @ Adjust r0 to point to the word containing the match. + @ Adjust r2 to the found bits for the word containing the match. + cmp r2, #0 + sub r0, r0, #4 + ite eq + moveq r2, r3 + subne r0, r0, #4 + + @ Find the bit-offset of the match within the word. Note that the + @ bit result from clz will be 7 higher than "true", but we'll + @ immediately discard those bits converting to a byte offset. +#ifdef __ARMEL__ + rev r2, r2 @ For LE, count from the little end +#endif + clz r2, r2 + add r0, r0, r2, lsr #3 @ Adjust the pointer to the found byte + bx lr + +END (__rawmemchr) + +weak_alias (__rawmemchr, rawmemchr) +libc_hidden_def (__rawmemchr) diff --git a/REORG.TODO/sysdeps/arm/armv6/stpcpy.S b/REORG.TODO/sysdeps/arm/armv6/stpcpy.S new file mode 100644 index 0000000000..21a4f385be --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6/stpcpy.S @@ -0,0 +1 @@ +/* Defined in strcpy.S. */ diff --git a/REORG.TODO/sysdeps/arm/armv6/strchr.S b/REORG.TODO/sysdeps/arm/armv6/strchr.S new file mode 100644 index 0000000000..bfd0a6b237 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6/strchr.S @@ -0,0 +1,143 @@ +/* strchr -- find the first instance of C in a nul-terminated string. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +ENTRY (strchr) + @ r0 = start of string + @ r1 = character to match + @ returns NULL for no match, or a pointer to the match + ldrb r2, [r0] @ load the first byte asap + uxtb r1, r1 + + @ To cater to long strings, we want to search through a few + @ characters until we reach an aligned pointer. To cater to + @ small strings, we don't want to start doing word operations + @ immediately. The compromise is a maximum of 16 bytes less + @ whatever is required to end with an aligned pointer. + @ r3 = number of characters to search in alignment loop + and r3, r0, #7 + rsb r3, r3, #15 @ 16 - 1 peeled loop iteration + cmp r2, r1 @ Found C? + it ne + cmpne r2, #0 @ Found EOS? + beq 99f + + @ Loop until we find ... +1: ldrb r2, [r0, #1]! + subs r3, r3, #1 @ ... the aligment point + it ne + cmpne r2, r1 @ ... or the character + it ne + cmpne r2, #0 @ ... or EOS + bne 1b + + @ Disambiguate the exit possibilites above + cmp r2, r1 @ Found the character + it ne + cmpne r2, #0 @ Found EOS + beq 99f + add r0, r0, #1 + + @ So now we're aligned. Now we actually need a stack frame. + push { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_rel_offset (r6, 8) + cfi_rel_offset (r7, 12) + + ldrd r2, r3, [r0], #8 + orr r1, r1, r1, lsl #8 @ Replicate C to all bytes +#ifdef ARCH_HAS_T2 + movw ip, #0x0101 + pld [r0, #64] + movt ip, #0x0101 +#else + ldr ip, =0x01010101 + pld [r0, #64] +#endif + orr r1, r1, r1, lsl #16 + + @ Loop searching for EOS or C, 8 bytes at a time. +2: + @ Subtracting (unsigned saturating) from 1 means result of 1 for + @ any byte that was originally zero and 0 otherwise. Therefore + @ we consider the lsb of each byte the "found" bit. + uqsub8 r4, ip, r2 @ Find EOS + eor r6, r2, r1 @ Convert C bytes to 0 + uqsub8 r5, ip, r3 + eor r7, r3, r1 + uqsub8 r6, ip, r6 @ Find C + pld [r0, #128] @ Prefetch 2 lines ahead + uqsub8 r7, ip, r7 + orr r4, r4, r6 @ Combine found for EOS and C + orr r5, r5, r7 + orrs r6, r4, r5 @ Combine the two words + it eq + ldrdeq r2, r3, [r0], #8 + beq 2b + + @ Found something. Disambiguate between first and second words. + @ Adjust r0 to point to the word containing the match. + @ Adjust r2 to the contents of the word containing the match. + @ Adjust r4 to the found bits for the word containing the match. + cmp r4, #0 + sub r0, r0, #4 + itte eq + moveq r4, r5 + moveq r2, r3 + subne r0, r0, #4 + + @ Find the bit-offset of the match within the word. +#if defined(__ARMEL__) + @ For LE, swap the found word so clz searches from the little end. + rev r4, r4 +#else + @ For BE, byte swap the word to make it easier to extract the byte. + rev r2, r2 +#endif + @ We're counting 0x01 (not 0x80), so the bit offset is 7 too high. + clz r3, r4 + sub r3, r3, #7 + lsr r2, r2, r3 @ Shift down found byte + uxtb r1, r1 @ Undo replication of C + uxtb r2, r2 @ Extract found byte + add r0, r0, r3, lsr #3 @ Adjust the pointer to the found byte + + pop { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (-16) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + + @ Disambiguate between EOS and C. +99: + cmp r2, r1 + it ne + movne r0, #0 @ Found EOS, return NULL + bx lr + +END (strchr) + +weak_alias (strchr, index) +libc_hidden_builtin_def (strchr) diff --git a/REORG.TODO/sysdeps/arm/armv6/strcpy.S b/REORG.TODO/sysdeps/arm/armv6/strcpy.S new file mode 100644 index 0000000000..1b98dbce30 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6/strcpy.S @@ -0,0 +1,218 @@ +/* strcpy -- copy a nul-terminated string. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + +/* Endian independent macros for shifting bytes within registers. */ +#ifdef __ARMEB__ +#define lsh_gt lsr +#define lsh_ls lsl +#else +#define lsh_gt lsl +#define lsh_ls lsr +#endif + + .syntax unified + .text + +ENTRY (__stpcpy) + @ Signal stpcpy with NULL in IP. + mov ip, #0 + b 0f +END (__stpcpy) + +weak_alias (__stpcpy, stpcpy) +libc_hidden_def (__stpcpy) +libc_hidden_builtin_def (stpcpy) + +ENTRY (strcpy) + @ Signal strcpy with DEST in IP. + mov ip, r0 +0: + pld [r0, #0] + pld [r1, #0] + + @ To cater to long strings, we want 8 byte alignment in the source. + @ To cater to small strings, we don't want to start that right away. + @ Loop up to 16 times, less whatever it takes to reach alignment. + and r3, r1, #7 + rsb r3, r3, #16 + + @ Loop until we find ... +1: ldrb r2, [r1], #1 + subs r3, r3, #1 @ ... the alignment point + strb r2, [r0], #1 + it ne + cmpne r2, #0 @ ... or EOS + bne 1b + + @ Disambiguate the exit possibilites above + cmp r2, #0 @ Found EOS + beq .Lreturn + + @ Load the next two words asap + ldrd r2, r3, [r1], #8 + pld [r0, #64] + pld [r1, #64] + + @ For longer strings, we actaully need a stack frame. + push { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_rel_offset (r6, 8) + cfi_rel_offset (r7, 12) + + @ Subtracting (unsigned saturating) from 1 for any byte means result + @ of 1 for any byte that was originally zero and 0 otherwise. + @ Therefore we consider the lsb of each byte the "found" bit. +#ifdef ARCH_HAS_T2 + movw r7, #0x0101 + tst r0, #3 @ Test alignment of DEST + movt r7, #0x0101 +#else + ldr r7, =0x01010101 + tst r0, #3 +#endif + bne .Lunaligned + + @ So now source (r1) is aligned to 8, and dest (r0) is aligned to 4. + @ Loop, reading 8 bytes at a time, searching for EOS. + .balign 16 +2: uqsub8 r4, r7, r2 @ Find EOS + uqsub8 r5, r7, r3 + pld [r1, #128] + cmp r4, #0 @ EOS in first word? + pld [r0, #128] + bne 3f + str r2, [r0], #4 + cmp r5, #0 @ EOS in second word? + bne 4f + str r3, [r0], #4 + ldrd r2, r3, [r1], #8 + b 2b + +3: sub r1, r1, #4 @ backup to first word +4: sub r1, r1, #4 @ backup to second word + + @ ... then finish up any tail a byte at a time. + @ Note that we generally back up and re-read source bytes, + @ but we'll not re-write dest bytes. +.Lbyte_loop: + ldrb r2, [r1], #1 + cmp r2, #0 + strb r2, [r0], #1 + bne .Lbyte_loop + + pop { r4, r5, r6, r7 } + cfi_remember_state + cfi_adjust_cfa_offset (-16) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + +.Lreturn: + cmp ip, #0 @ Was this strcpy or stpcpy? + ite eq + subeq r0, r0, #1 @ stpcpy: undo post-inc from store + movne r0, ip @ strcpy: return original dest + bx lr + +.Lunaligned: + cfi_restore_state + @ Here, source is aligned to 8, but the destination is not word + @ aligned. Therefore we have to shift the data in order to be + @ able to perform aligned word stores. + + @ Find out which misalignment we're dealing with. + tst r0, #1 + beq .Lunaligned2 + tst r0, #2 + bne .Lunaligned3 + @ Fallthru to .Lunaligned1. + +.macro unaligned_copy unalign + @ Prologue to unaligned loop. Seed shifted non-zero bytes. + uqsub8 r4, r7, r2 @ Find EOS + uqsub8 r5, r7, r3 + cmp r4, #0 @ EOS in first word? + it ne + subne r1, r1, #8 + bne .Lbyte_loop +#ifdef __ARMEB__ + rev r2, r2 @ Byte stores below need LE data +#endif + @ Store a few bytes from the first word. + @ At the same time we align r0 and shift out bytes from r2. +.rept 4-\unalign + strb r2, [r0], #1 + lsr r2, r2, #8 +.endr +#ifdef __ARMEB__ + rev r2, r2 @ Undo previous rev +#endif + @ Rotated unaligned copy loop. The tail of the prologue is + @ shared with the loop itself. + .balign 8 +1: cmp r5, #0 @ EOS in second word? + bne 4f + @ Combine first and second words + orr r2, r2, r3, lsh_gt #(\unalign*8) + @ Save leftover bytes from the two words + lsh_ls r6, r3, #((4-\unalign)*8) + str r2, [r0], #4 + @ The "real" start of the unaligned copy loop. + ldrd r2, r3, [r1], #8 @ Load 8 more bytes + uqsub8 r4, r7, r2 @ Find EOS + pld [r1, #128] + uqsub8 r5, r7, r3 + pld [r0, #128] + cmp r4, #0 @ EOS in first word? + bne 3f + @ Combine the leftover and the first word + orr r6, r6, r2, lsh_gt #(\unalign*8) + @ Discard used bytes from the first word. + lsh_ls r2, r2, #((4-\unalign)*8) + str r6, [r0], #4 + b 1b + @ Found EOS in one of the words; adjust backward +3: sub r1, r1, #4 + mov r2, r6 +4: sub r1, r1, #4 + @ And store the remaining bytes from the leftover +#ifdef __ARMEB__ + rev r2, r2 +#endif +.rept \unalign + strb r2, [r0], #1 + lsr r2, r2, #8 +.endr + b .Lbyte_loop +.endm + +.Lunaligned1: + unaligned_copy 1 +.Lunaligned2: + unaligned_copy 2 +.Lunaligned3: + unaligned_copy 3 + +END (strcpy) + +libc_hidden_builtin_def (strcpy) diff --git a/REORG.TODO/sysdeps/arm/armv6/strlen.S b/REORG.TODO/sysdeps/arm/armv6/strlen.S new file mode 100644 index 0000000000..64a971f2de --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6/strlen.S @@ -0,0 +1,99 @@ +/* strlen -- find the length of a nul-terminated string. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +ENTRY (strlen) + @ r0 = start of string + ldrb r2, [r0] @ load the first byte asap + + @ To cater to long strings, we want to search through a few + @ characters until we reach an aligned pointer. To cater to + @ small strings, we don't want to start doing word operations + @ immediately. The compromise is a maximum of 16 bytes less + @ whatever is required to end with an aligned pointer. + @ r3 = number of characters to search in alignment loop + and r3, r0, #7 + mov r1, r0 @ Save the input pointer + rsb r3, r3, #15 @ 16 - 1 peeled loop iteration + cmp r2, #0 + beq 99f + + @ Loop until we find ... +1: ldrb r2, [r0, #1]! + subs r3, r3, #1 @ ... the aligment point + it ne + cmpne r2, #0 @ ... or EOS + bne 1b + + @ Disambiguate the exit possibilites above + cmp r2, #0 @ Found EOS + beq 99f + add r0, r0, #1 + + @ So now we're aligned. + ldrd r2, r3, [r0], #8 +#ifdef ARCH_HAS_T2 + movw ip, #0x0101 + pld [r0, #64] + movt ip, #0x0101 +#else + ldr ip, =0x01010101 + pld [r0, #64] +#endif + + @ Loop searching for EOS, 8 bytes at a time. + @ Subtracting (unsigned saturating) from 1 for any byte means that + @ we get 1 for any byte that was originally zero and 0 otherwise. + @ Therefore we consider the lsb of each byte the "found" bit. + .balign 16 +2: uqsub8 r2, ip, r2 @ Find EOS + uqsub8 r3, ip, r3 + pld [r0, #128] @ Prefetch 2 lines ahead + orrs r3, r3, r2 @ Combine the two words + it eq + ldrdeq r2, r3, [r0], #8 + beq 2b + + @ Found something. Disambiguate between first and second words. + @ Adjust r0 to point to the word containing the match. + @ Adjust r2 to the found bits for the word containing the match. + cmp r2, #0 + sub r0, r0, #4 + ite eq + moveq r2, r3 + subne r0, r0, #4 + + @ Find the bit-offset of the match within the word. Note that the + @ bit result from clz will be 7 higher than "true", but we'll + @ immediately discard those bits converting to a byte offset. +#ifdef __ARMEL__ + rev r2, r2 @ For LE, count from the little end +#endif + clz r2, r2 + add r0, r0, r2, lsr #3 @ Adjust the pointer to the found byte +99: + sub r0, r0, r1 @ Subtract input to compute length + bx lr + +END (strlen) + +libc_hidden_builtin_def (strlen) diff --git a/REORG.TODO/sysdeps/arm/armv6/strrchr.S b/REORG.TODO/sysdeps/arm/armv6/strrchr.S new file mode 100644 index 0000000000..e6eea01816 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6/strrchr.S @@ -0,0 +1,129 @@ +/* strrchr -- find the last occurence of C in a nul-terminated string + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +ENTRY (strrchr) + @ r0 = start of string + @ r1 = character to match + @ returns NULL for no match, or a pointer to the match + + mov r3, r0 + mov r0, #0 + uxtb r1, r1 + + @ Loop a few times until we're aligned. + tst r3, #7 + beq 2f +1: ldrb r2, [r3], #1 + cmp r2, r1 @ Find the character + it eq + subeq r0, r3, #1 + cmp r2, #0 @ Find EOS + it eq + bxeq lr + tst r3, #7 @ Find the aligment point + bne 1b + + @ So now we're aligned. Now we actually need a stack frame. +2: push { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_rel_offset (r6, 8) + cfi_rel_offset (r7, 12) + + orr r1, r1, r1, lsl #8 @ Replicate C to all bytes +#ifdef ARCH_HAS_T2 + movw ip, #0x0101 + movt ip, #0x0101 +#else + ldr ip, =0x01010101 +#endif + orr r1, r1, r1, lsl #16 + mov r2, #0 @ No found bits yet + + @ Loop searching for EOS and C, 8 bytes at a time. + @ Any time we find a match in a word, we copy the address of + @ the word to r0, and the found bits to r2. +3: ldrd r4, r5, [r3], #8 + @ Subtracting (unsigned saturating) from 1 means result of 1 for + @ any byte that was originally zero and 0 otherwise. Therefore + @ we consider the lsb of each byte the "found" bit. + uqsub8 r6, ip, r4 @ Find EOS + uqsub8 r7, ip, r5 + eor r4, r4, r1 @ Convert C bytes to 0 + eor r5, r5, r1 + uqsub8 r4, ip, r4 @ Find C + uqsub8 r5, ip, r5 + cmp r6, #0 @ Found EOS, first word + bne 4f + cmp r4, #0 @ Handle C, first word + itt ne + subne r0, r3, #8 + movne r2, r4 + cmp r7, #0 @ Found EOS, second word + bne 5f + cmp r5, #0 @ Handle C, second word + itt ne + subne r0, r3, #4 + movne r2, r5 + b 3b + + @ Found EOS in second word; fold to first word. +5: add r3, r3, #4 @ Dec pointer to 2nd word, with below + mov r4, r5 @ Overwrite first word C found + mov r6, r7 @ Overwrite first word EOS found + + @ Found EOS. Zap found C after EOS. +4: sub r3, r3, #8 @ Decrement pointer to first word +#ifdef __ARMEB__ + @ Byte swap to be congruent with LE, which is easier from here on. + rev r6, r6 @ Byte swap found EOS, + rev r4, r4 @ ... this found C + rev r2, r2 @ ... prev found C +#endif + sub r7, r6, #1 @ Toggle EOS lsb and below + eor r6, r6, r7 @ All bits below and including lsb + ands r4, r4, r6 @ Zap C above EOS + itt ne + movne r2, r4 @ Copy to result, if still non-zero + movne r0, r3 + + pop { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (-16) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + + @ Adjust the result pointer if we found a word containing C. + cmp r2, #0 + clz r2, r2 @ Find the bit offset of the last C + itt ne + rsbne r2, r2, #32 @ Convert to a count from the right + addne r0, r0, r2, lsr #3 @ Convert to byte offset and add. + bx lr + +END (strrchr) + +weak_alias (strrchr, rindex) +libc_hidden_builtin_def (strrchr) diff --git a/REORG.TODO/sysdeps/arm/armv6t2/Implies b/REORG.TODO/sysdeps/arm/armv6t2/Implies new file mode 100644 index 0000000000..20a87fc8a5 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6t2/Implies @@ -0,0 +1,2 @@ +# We can do everything that 6 can +arm/armv6 diff --git a/REORG.TODO/sysdeps/arm/armv6t2/ffs.S b/REORG.TODO/sysdeps/arm/armv6t2/ffs.S new file mode 100644 index 0000000000..ed13cd97ba --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6t2/ffs.S @@ -0,0 +1,36 @@ +/* ffs -- find first set bit in an int, from least significant end. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +ENTRY (__ffs) + cmp r0, #0 + rbit r0, r0 + itt ne + clzne r0, r0 + addne r0, r0, #1 + bx lr +END (__ffs) + +weak_alias (__ffs, ffs) +weak_alias (__ffs, ffsl) +libc_hidden_def (__ffs) +libc_hidden_builtin_def (ffs) diff --git a/REORG.TODO/sysdeps/arm/armv6t2/ffsll.S b/REORG.TODO/sysdeps/arm/armv6t2/ffsll.S new file mode 100644 index 0000000000..2754cc70aa --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6t2/ffsll.S @@ -0,0 +1,50 @@ +/* ffsll -- find first set bit in a long long, from least significant end. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +ENTRY (ffsll) + @ If low part is 0, operate on the high part. Ensure that the + @ word on which we operate is in r0. Set r2 to the bit offset + @ of the word being considered. Set the flags for the word + @ being operated on. +#ifdef __ARMEL__ + cmp r0, #0 + itee ne + movne r2, #0 + moveq r2, #32 + movseq r0, r1 +#else + cmp r1, #0 + ittee ne + movne r2, #0 + movne r0, r1 + moveq r2, #32 + cmpeq r0, #0 +#endif + @ Perform the ffs on r0. + rbit r0, r0 + ittt ne + clzne r0, r0 + addne r2, r2, #1 + addne r0, r0, r2 + bx lr +END (ffsll) diff --git a/REORG.TODO/sysdeps/arm/armv6t2/memchr.S b/REORG.TODO/sysdeps/arm/armv6t2/memchr.S new file mode 100644 index 0000000000..fb4dc8efa3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6t2/memchr.S @@ -0,0 +1,184 @@ +/* Copyright (C) 2011-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Code contributed by Dave Gilbert <david.gilbert@linaro.org> + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + +@ This memchr routine is optimised on a Cortex-A9 and should work on all ARMv7 +@ and ARMv6T2 processors. It has a fast path for short sizes, and has an +@ optimised path for large data sets; the worst case is finding the match early +@ in a large data set. +@ Note: The use of cbz/cbnz means it's Thumb only + +@ 2011-07-15 david.gilbert@linaro.org +@ Copy from Cortex strings release 21 and change license +@ http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/view/head:/src/linaro-a9/memchr.S +@ Change function declarations/entry/exit +@ 2011-12-01 david.gilbert@linaro.org +@ Add some fixes from comments received (including use of ldrd instead ldm) +@ 2011-12-07 david.gilbert@linaro.org +@ Removed cbz from align loop - can't be taken + +@ this lets us check a flag in a 00/ff byte easily in either endianness +#ifdef __ARMEB__ +#define CHARTSTMASK(c) 1<<(31-(c*8)) +#else +#define CHARTSTMASK(c) 1<<(c*8) +#endif + .syntax unified + + .text +#ifdef NO_THUMB + .arm +#else + .thumb + .thumb_func +#endif + .global memchr + .type memchr,%function +ENTRY(memchr) + @ r0 = start of memory to scan + @ r1 = character to look for + @ r2 = length + @ returns r0 = pointer to character or NULL if not found + and r1,r1,#0xff @ Don't think we can trust the caller to actually pass a char + + cmp r2,#16 @ If it's short don't bother with anything clever + blt 20f + + tst r0, #7 @ If it's already aligned skip the next bit + beq 10f + + @ Work up to an aligned point +5: + ldrb r3, [r0],#1 + subs r2, r2, #1 + cmp r3, r1 + beq 50f @ If it matches exit found + tst r0, #7 + bne 5b @ If not aligned yet then do next byte + +10: + @ At this point, we are aligned, we know we have at least 8 bytes to work with + push {r4,r5,r6,r7} + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_rel_offset (r6, 8) + cfi_rel_offset (r7, 12) + + cfi_remember_state + + orr r1, r1, r1, lsl #8 @ expand the match word across to all bytes + orr r1, r1, r1, lsl #16 + bic r6, r2, #7 @ Number of double words to work with * 8 + mvns r7, #0 @ all F's + movs r3, #0 + +15: + ldrd r4,r5, [r0],#8 +#ifndef NO_THUMB + subs r6, r6, #8 +#endif + eor r4,r4, r1 @ Get it so that r4,r5 have 00's where the bytes match the target + eor r5,r5, r1 + uadd8 r4, r4, r7 @ Parallel add 0xff - sets the GE bits for anything that wasn't 0 + sel r4, r3, r7 @ bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION + uadd8 r5, r5, r7 @ Parallel add 0xff - sets the GE bits for anything that wasn't 0 + sel r5, r4, r7 @ chained....bytes are 00 for none-00 bytes, or ff for 00 bytes - NOTE INVERSION +#ifndef NO_THUMB + cbnz r5, 60f +#else + cmp r5, #0 + bne 60f + subs r6, r6, #8 +#endif + bne 15b @ (Flags from the subs above) If not run out of bytes then go around again + + pop {r4,r5,r6,r7} + cfi_adjust_cfa_offset (-16) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + + and r1,r1,#0xff @ Get r1 back to a single character from the expansion above + and r2,r2,#7 @ Leave the count remaining as the number after the double words have been done + +20: +#ifndef NO_THUMB + cbz r2, 40f @ 0 length or hit the end already then not found +#else + cmp r2, #0 + beq 40f +#endif + +21: @ Post aligned section, or just a short call + ldrb r3,[r0],#1 +#ifndef NO_THUMB + subs r2,r2,#1 + eor r3,r3,r1 @ r3 = 0 if match - doesn't break flags from sub + cbz r3, 50f +#else + eors r3, r3, r1 + beq 50f + subs r2, r2, #1 +#endif + bne 21b @ on r2 flags + +40: + movs r0,#0 @ not found + DO_RET(lr) + +50: + subs r0,r0,#1 @ found + DO_RET(lr) + +60: @ We're here because the fast path found a hit - now we have to track down exactly which word it was + @ r0 points to the start of the double word after the one that was tested + @ r4 has the 00/ff pattern for the first word, r5 has the chained value + cfi_restore_state + cmp r4, #0 + itte eq + moveq r4, r5 @ the end is in the 2nd word + subeq r0,r0,#3 @ Points to 2nd byte of 2nd word + subne r0,r0,#7 @ or 2nd byte of 1st word + + @ r0 currently points to the 2nd byte of the word containing the hit + tst r4, # CHARTSTMASK(0) @ 1st character + bne 61f + adds r0,r0,#1 + tst r4, # CHARTSTMASK(1) @ 2nd character + ittt eq + addeq r0,r0,#1 + tsteq r4, # (3<<15) @ 2nd & 3rd character + @ If not the 3rd must be the last one + addeq r0,r0,#1 + +61: + pop {r4,r5,r6,r7} + cfi_adjust_cfa_offset (-16) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + + subs r0,r0,#1 + DO_RET(lr) + +END(memchr) +libc_hidden_builtin_def (memchr) diff --git a/REORG.TODO/sysdeps/arm/armv6t2/strlen.S b/REORG.TODO/sysdeps/arm/armv6t2/strlen.S new file mode 100644 index 0000000000..c72a1e73be --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv6t2/strlen.S @@ -0,0 +1,164 @@ +/* Copyright (C) 2010-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* + Assumes: + ARMv6T2, AArch32 + + */ + +#include <arm-features.h> /* This might #define NO_THUMB. */ +#include <sysdep.h> + +#ifdef __ARMEB__ +#define S2LO lsl +#define S2HI lsr +#else +#define S2LO lsr +#define S2HI lsl +#endif + +#ifndef NO_THUMB +/* This code is best on Thumb. */ + .thumb +#else +/* Using bne.w explicitly is desirable in Thumb mode because it helps + align the following label without a nop. In ARM mode there is no + such difference. */ +.macro bne.w label + bne \label +.endm + +/* This clobbers the condition codes, which the real Thumb cbnz instruction + does not do. But it doesn't matter for any of the uses here. */ +.macro cbnz reg, label + cmp \reg, #0 + bne \label +.endm +#endif + +/* Parameters and result. */ +#define srcin r0 +#define result r0 + +/* Internal variables. */ +#define src r1 +#define data1a r2 +#define data1b r3 +#define const_m1 r12 +#define const_0 r4 +#define tmp1 r4 /* Overlaps const_0 */ +#define tmp2 r5 + + .text + .p2align 6 +ENTRY(strlen) + pld [srcin, #0] + strd r4, r5, [sp, #-8]! + cfi_adjust_cfa_offset (8) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_remember_state + bic src, srcin, #7 + mvn const_m1, #0 + ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */ + pld [src, #32] + bne.w .Lmisaligned8 + mov const_0, #0 + mov result, #-8 +.Lloop_aligned: + /* Bytes 0-7. */ + ldrd data1a, data1b, [src] + pld [src, #64] + add result, result, #8 +.Lstart_realigned: + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cbnz data1b, .Lnull_found + + /* Bytes 8-15. */ + ldrd data1a, data1b, [src, #8] + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + add result, result, #8 + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cbnz data1b, .Lnull_found + + /* Bytes 16-23. */ + ldrd data1a, data1b, [src, #16] + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + add result, result, #8 + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cbnz data1b, .Lnull_found + + /* Bytes 24-31. */ + ldrd data1a, data1b, [src, #24] + add src, src, #32 + uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */ + add result, result, #8 + sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */ + uadd8 data1b, data1b, const_m1 + sel data1b, data1a, const_m1 /* Only used if d1a == 0. */ + cmp data1b, #0 + beq .Lloop_aligned + +.Lnull_found: + cmp data1a, #0 + itt eq + addeq result, result, #4 + moveq data1a, data1b +#ifndef __ARMEB__ + rev data1a, data1a +#endif + clz data1a, data1a + ldrd r4, r5, [sp], #8 + cfi_adjust_cfa_offset (-8) + cfi_restore (r4) + cfi_restore (r5) + add result, result, data1a, lsr #3 /* Bits -> Bytes. */ + DO_RET(lr) + +.Lmisaligned8: + cfi_restore_state + ldrd data1a, data1b, [src] + and tmp2, tmp1, #3 + rsb result, tmp1, #0 + lsl tmp2, tmp2, #3 /* Bytes -> bits. */ + tst tmp1, #4 + pld [src, #64] + S2HI tmp2, const_m1, tmp2 +#ifdef NO_THUMB + mvn tmp1, tmp2 + orr data1a, data1a, tmp1 + itt ne + orrne data1b, data1b, tmp1 +#else + orn data1a, data1a, tmp2 + itt ne + ornne data1b, data1b, tmp2 +#endif + movne data1a, const_m1 + mov const_0, #0 + b .Lstart_realigned + +END(strlen) +libc_hidden_builtin_def (strlen) diff --git a/REORG.TODO/sysdeps/arm/armv7/Implies b/REORG.TODO/sysdeps/arm/armv7/Implies new file mode 100644 index 0000000000..c6cd0eb877 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/Implies @@ -0,0 +1,2 @@ +# We can do everything that 6T2 can +arm/armv6t2 diff --git a/REORG.TODO/sysdeps/arm/armv7/multiarch/Makefile b/REORG.TODO/sysdeps/arm/armv7/multiarch/Makefile new file mode 100644 index 0000000000..e834cc937f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/multiarch/Makefile @@ -0,0 +1,3 @@ +ifeq ($(subdir),string) +sysdep_routines += memcpy_neon memcpy_vfp +endif diff --git a/REORG.TODO/sysdeps/arm/armv7/multiarch/aeabi_memcpy.c b/REORG.TODO/sysdeps/arm/armv7/multiarch/aeabi_memcpy.c new file mode 100644 index 0000000000..c6a2a98a55 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/multiarch/aeabi_memcpy.c @@ -0,0 +1,2 @@ +/* Empty file to override sysdeps/arm version. See memcpy.S for definitions + of these functions. */ diff --git a/REORG.TODO/sysdeps/arm/armv7/multiarch/ifunc-impl-list.c b/REORG.TODO/sysdeps/arm/armv7/multiarch/ifunc-impl-list.c new file mode 100644 index 0000000000..b8094fd393 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/multiarch/ifunc-impl-list.c @@ -0,0 +1,56 @@ +/* Enumerate available IFUNC implementations of a function. ARM version. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <stdbool.h> +#include <string.h> +#include <ldsodefs.h> +#include <sysdep.h> +#include <ifunc-impl-list.h> + +/* Fill ARRAY of MAX elements with IFUNC implementations for function + NAME and return the number of valid entries. */ + +size_t +__libc_ifunc_impl_list (const char *name, struct libc_ifunc_impl *array, + size_t max) +{ + size_t i = 0; + + bool use_neon = true; +#ifdef __ARM_NEON__ +# define __memcpy_neon memcpy +#else + use_neon = (GLRO(dl_hwcap) & HWCAP_ARM_NEON) != 0; +#endif + +#ifndef __ARM_NEON__ + bool use_vfp = true; +# ifdef __SOFTFP__ + use_vfp = (GLRO(dl_hwcap) & HWCAP_ARM_VFP) != 0; +# endif +#endif + + IFUNC_IMPL (i, name, memcpy, + IFUNC_IMPL_ADD (array, i, memcpy, use_neon, __memcpy_neon) +#ifndef __ARM_NEON__ + IFUNC_IMPL_ADD (array, i, memcpy, use_vfp, __memcpy_vfp) +#endif + IFUNC_IMPL_ADD (array, i, memcpy, 1, __memcpy_arm)); + + return i; +} diff --git a/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy.S b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy.S new file mode 100644 index 0000000000..8a53bdaf91 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy.S @@ -0,0 +1,76 @@ +/* Multiple versions of memcpy + All versions must be listed in ifunc-impl-list.c. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +/* Thumb requires excess IT instructions here. */ +#define NO_THUMB +#include <sysdep.h> +#include <rtld-global-offsets.h> + +#if IS_IN (libc) +/* Under __ARM_NEON__, memcpy_neon.S defines the name memcpy. */ +# ifndef __ARM_NEON__ + .text +ENTRY(memcpy) + .type memcpy, %gnu_indirect_function +# ifdef __SOFTFP__ + ldr r1, .Lmemcpy_arm + tst r0, #HWCAP_ARM_VFP + ldrne r1, .Lmemcpy_vfp +# else + ldr r1, .Lmemcpy_vfp +# endif + tst r0, #HWCAP_ARM_NEON + ldrne r1, .Lmemcpy_neon +1: + add r0, r1, pc + DO_RET(lr) + +# ifdef __SOFTFP__ +.Lmemcpy_arm: + .long C_SYMBOL_NAME(__memcpy_arm) - 1b - PC_OFS +# endif +.Lmemcpy_neon: + .long C_SYMBOL_NAME(__memcpy_neon) - 1b - PC_OFS +.Lmemcpy_vfp: + .long C_SYMBOL_NAME(__memcpy_vfp) - 1b - PC_OFS + +END(memcpy) + +libc_hidden_builtin_def (memcpy) +#endif /* Not __ARM_NEON__. */ + +/* These versions of memcpy are defined not to clobber any VFP or NEON + registers so they must always call the ARM variant of the memcpy code. */ +strong_alias (__memcpy_arm, __aeabi_memcpy) +strong_alias (__memcpy_arm, __aeabi_memcpy4) +strong_alias (__memcpy_arm, __aeabi_memcpy8) +libc_hidden_def (__memcpy_arm) + +#undef libc_hidden_builtin_def +#define libc_hidden_builtin_def(name) +#undef weak_alias +#define weak_alias(x, y) +#undef libc_hidden_def +#define libc_hidden_def(name) + +#define memcpy __memcpy_arm + +#endif + +#include "memcpy_impl.S" diff --git a/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_impl.S b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_impl.S new file mode 100644 index 0000000000..c1b9fb0ab5 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_impl.S @@ -0,0 +1,728 @@ +/* NEON/VFP/ARM version of memcpy optimized for Cortex-A15. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. + + This memcpy routine is optimised for Cortex-A15 cores and takes advantage + of VFP or NEON when built with the appropriate flags. + + Assumptions: + + ARMv6 (ARMv7-a if using Neon) + ARM state + Unaligned accesses + + */ + +/* Thumb cannot encode negative immediate offsets in memory operations. */ +#ifndef NO_THUMB +#define NO_THUMB +#endif +#include <sysdep.h> +#include <arm-features.h> + + .syntax unified + /* This implementation requires ARM state. */ + .arm + +#ifdef MEMCPY_NEON + + .fpu neon + .arch armv7-a +# define FRAME_SIZE 4 +# define USE_VFP +# define USE_NEON + +#elif defined (MEMCPY_VFP) + + .arch armv6 + .fpu vfpv2 +# define FRAME_SIZE 32 +# define USE_VFP + +#else + .arch armv6 +# define FRAME_SIZE 32 + +#endif + +#define ALIGN(addr, align) addr:align + +#define INSN_SIZE 4 + +/* Call parameters. */ +#define dstin r0 +#define src r1 +#define count r2 + +/* Locals. */ +#define tmp1 r3 +#define dst ip +#define tmp2 r8 + +/* These two macros both work by repeated invocation of the macro + dispatch_step (not defined here). That macro performs one "step", + doing one load instruction and one store instruction to copy one + "unit". On entry, TMP1 contains the number of bytes to be copied, + a multiple of the unit size. The macro clobbers TMP1 in the + process of doing a computed jump to the tail containing the + appropriate number of steps. + + In dispatch_7_dword, dispatch_step is invoked seven times, with an + argument that is 7 for the first and 1 for the last. Units are + double-words (8 bytes). TMP1 is at most 56. + + In dispatch_15_word, dispatch_step is invoked fifteen times, + with an argument that is 15 for the first and 1 for the last. + Units are words (4 bytes). TMP1 is at most 60. */ + +#ifndef ARM_ALWAYS_BX +# if ARM_BX_ALIGN_LOG2 != 2 +# error case not handled +# endif + .macro dispatch_7_dword + rsb tmp1, tmp1, #((7 * 8) - PC_OFS + INSN_SIZE) + add pc, pc, tmp1 + dispatch_step 7 + dispatch_step 6 + dispatch_step 5 + dispatch_step 4 + dispatch_step 3 + dispatch_step 2 + dispatch_step 1 + .purgem dispatch_step + .endm + + .macro dispatch_15_word + rsb tmp1, tmp1, #((15 * 4) - PC_OFS/2 + INSN_SIZE/2) + add pc, pc, tmp1, lsl #1 + dispatch_step 15 + dispatch_step 14 + dispatch_step 13 + dispatch_step 12 + dispatch_step 11 + dispatch_step 10 + dispatch_step 9 + dispatch_step 8 + dispatch_step 7 + dispatch_step 6 + dispatch_step 5 + dispatch_step 4 + dispatch_step 3 + dispatch_step 2 + dispatch_step 1 + .purgem dispatch_step + .endm +#else +# if ARM_BX_ALIGN_LOG2 < 3 +# error case not handled +# endif + .macro dispatch_helper steps, log2_bytes_per_step + /* TMP1 gets (max_bytes - bytes_to_copy), where max_bytes is + (STEPS << LOG2_BYTES_PER_STEP). + So this is (steps_to_skip << LOG2_BYTES_PER_STEP). + Then it needs further adjustment to compensate for the + distance between the PC value taken below (0f + PC_OFS) + and the first step's instructions (1f). */ + rsb tmp1, tmp1, #((\steps << \log2_bytes_per_step) \ + + ((1f - PC_OFS - 0f) \ + >> (ARM_BX_ALIGN_LOG2 - \log2_bytes_per_step))) + /* Shifting down LOG2_BYTES_PER_STEP gives us the number of + steps to skip, then shifting up ARM_BX_ALIGN_LOG2 gives us + the (byte) distance to add to the PC. */ +0: add tmp1, pc, tmp1, lsl #(ARM_BX_ALIGN_LOG2 - \log2_bytes_per_step) + bx tmp1 + .p2align ARM_BX_ALIGN_LOG2 +1: + .endm + + .macro dispatch_7_dword + dispatch_helper 7, 3 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 7 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 6 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 5 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 4 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 3 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 2 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 1 + .p2align ARM_BX_ALIGN_LOG2 + .purgem dispatch_step + .endm + + .macro dispatch_15_word + dispatch_helper 15, 2 + dispatch_step 15 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 14 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 13 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 12 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 11 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 10 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 9 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 8 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 7 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 6 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 5 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 4 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 3 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 2 + .p2align ARM_BX_ALIGN_LOG2 + dispatch_step 1 + .p2align ARM_BX_ALIGN_LOG2 + .purgem dispatch_step + .endm + +#endif + +#ifndef USE_NEON +/* For bulk copies using GP registers. */ +#define A_l r2 /* Call-clobbered. */ +#define A_h r3 /* Call-clobbered. */ +#define B_l r4 +#define B_h r5 +#define C_l r6 +#define C_h r7 +/* Don't use the pair r8,r9 because in some EABI variants r9 is reserved. */ +#define D_l r10 +#define D_h r11 +#endif + +/* Number of lines ahead to pre-fetch data. If you change this the code + below will need adjustment to compensate. */ + +#define prefetch_lines 5 + +#ifdef USE_VFP + .macro cpy_line_vfp vreg, base + vstr \vreg, [dst, #\base] + vldr \vreg, [src, #\base] + vstr d0, [dst, #\base + 8] + vldr d0, [src, #\base + 8] + vstr d1, [dst, #\base + 16] + vldr d1, [src, #\base + 16] + vstr d2, [dst, #\base + 24] + vldr d2, [src, #\base + 24] + vstr \vreg, [dst, #\base + 32] + vldr \vreg, [src, #\base + prefetch_lines * 64 - 32] + vstr d0, [dst, #\base + 40] + vldr d0, [src, #\base + 40] + vstr d1, [dst, #\base + 48] + vldr d1, [src, #\base + 48] + vstr d2, [dst, #\base + 56] + vldr d2, [src, #\base + 56] + .endm + + .macro cpy_tail_vfp vreg, base + vstr \vreg, [dst, #\base] + vldr \vreg, [src, #\base] + vstr d0, [dst, #\base + 8] + vldr d0, [src, #\base + 8] + vstr d1, [dst, #\base + 16] + vldr d1, [src, #\base + 16] + vstr d2, [dst, #\base + 24] + vldr d2, [src, #\base + 24] + vstr \vreg, [dst, #\base + 32] + vstr d0, [dst, #\base + 40] + vldr d0, [src, #\base + 40] + vstr d1, [dst, #\base + 48] + vldr d1, [src, #\base + 48] + vstr d2, [dst, #\base + 56] + vldr d2, [src, #\base + 56] + .endm +#endif + + .p2align 6 +ENTRY(memcpy) + + mov dst, dstin /* Preserve dstin, we need to return it. */ + cmp count, #64 + bge .Lcpy_not_short + /* Deal with small copies quickly by dropping straight into the + exit block. */ + +.Ltail63unaligned: +#ifdef USE_NEON + /* These need an extra layer of macro just to work around a + bug in the assembler's parser when an operand starts with + a {...}. http://sourceware.org/bugzilla/show_bug.cgi?id=15647 + tracks that bug; it was not fixed as of binutils-2.23.2. */ + .macro neon_load_d0 reg + vld1.8 {d0}, [\reg]! + .endm + .macro neon_store_d0 reg + vst1.8 {d0}, [\reg]! + .endm + + and tmp1, count, #0x38 + .macro dispatch_step i + neon_load_d0 src + neon_store_d0 dst + .endm + dispatch_7_dword + + tst count, #4 + ldrne tmp1, [src], #4 + strne tmp1, [dst], #4 +#else + /* Copy up to 15 full words of data. May not be aligned. */ + /* Cannot use VFP for unaligned data. */ + and tmp1, count, #0x3c + add dst, dst, tmp1 + add src, src, tmp1 + /* Jump directly into the sequence below at the correct offset. */ + .macro dispatch_step i + ldr tmp1, [src, #-(\i * 4)] + str tmp1, [dst, #-(\i * 4)] + .endm + dispatch_15_word +#endif + + lsls count, count, #31 + ldrhcs tmp1, [src], #2 + ldrbne src, [src] /* Src is dead, use as a scratch. */ + strhcs tmp1, [dst], #2 + strbne src, [dst] + bx lr + +.Lcpy_not_short: + /* At least 64 bytes to copy, but don't know the alignment yet. */ + str tmp2, [sp, #-FRAME_SIZE]! + cfi_adjust_cfa_offset (FRAME_SIZE) + cfi_rel_offset (tmp2, 0) + cfi_remember_state + and tmp2, src, #7 + and tmp1, dst, #7 + cmp tmp1, tmp2 + bne .Lcpy_notaligned + +#ifdef USE_VFP + /* Magic dust alert! Force VFP on Cortex-A9. Experiments show + that the FP pipeline is much better at streaming loads and + stores. This is outside the critical loop. */ + vmov.f32 s0, s0 +#endif + + /* SRC and DST have the same mutual 64-bit alignment, but we may + still need to pre-copy some bytes to get to natural alignment. + We bring SRC and DST into full 64-bit alignment. */ + lsls tmp2, dst, #29 + beq 1f + rsbs tmp2, tmp2, #0 + sub count, count, tmp2, lsr #29 + ldrmi tmp1, [src], #4 + strmi tmp1, [dst], #4 + lsls tmp2, tmp2, #2 + ldrhcs tmp1, [src], #2 + ldrbne tmp2, [src], #1 + strhcs tmp1, [dst], #2 + strbne tmp2, [dst], #1 + +1: + subs tmp2, count, #64 /* Use tmp2 for count. */ + blt .Ltail63aligned + + cmp tmp2, #512 + bge .Lcpy_body_long + +.Lcpy_body_medium: /* Count in tmp2. */ +#ifdef USE_VFP +1: + vldr d0, [src, #0] + subs tmp2, tmp2, #64 + vldr d1, [src, #8] + vstr d0, [dst, #0] + vldr d0, [src, #16] + vstr d1, [dst, #8] + vldr d1, [src, #24] + vstr d0, [dst, #16] + vldr d0, [src, #32] + vstr d1, [dst, #24] + vldr d1, [src, #40] + vstr d0, [dst, #32] + vldr d0, [src, #48] + vstr d1, [dst, #40] + vldr d1, [src, #56] + vstr d0, [dst, #48] + add src, src, #64 + vstr d1, [dst, #56] + add dst, dst, #64 + bge 1b + tst tmp2, #0x3f + beq .Ldone + +.Ltail63aligned: /* Count in tmp2. */ + and tmp1, tmp2, #0x38 + add dst, dst, tmp1 + add src, src, tmp1 + .macro dispatch_step i + vldr d0, [src, #-(\i * 8)] + vstr d0, [dst, #-(\i * 8)] + .endm + dispatch_7_dword +#else + sub src, src, #8 + sub dst, dst, #8 +1: + ldrd A_l, A_h, [src, #8] + strd A_l, A_h, [dst, #8] + ldrd A_l, A_h, [src, #16] + strd A_l, A_h, [dst, #16] + ldrd A_l, A_h, [src, #24] + strd A_l, A_h, [dst, #24] + ldrd A_l, A_h, [src, #32] + strd A_l, A_h, [dst, #32] + ldrd A_l, A_h, [src, #40] + strd A_l, A_h, [dst, #40] + ldrd A_l, A_h, [src, #48] + strd A_l, A_h, [dst, #48] + ldrd A_l, A_h, [src, #56] + strd A_l, A_h, [dst, #56] + ldrd A_l, A_h, [src, #64]! + strd A_l, A_h, [dst, #64]! + subs tmp2, tmp2, #64 + bge 1b + tst tmp2, #0x3f + bne 1f + ldr tmp2,[sp], #FRAME_SIZE + cfi_adjust_cfa_offset (-FRAME_SIZE) + cfi_restore (tmp2) + bx lr + + cfi_restore_state + cfi_remember_state +1: + add src, src, #8 + add dst, dst, #8 + +.Ltail63aligned: /* Count in tmp2. */ + /* Copy up to 7 d-words of data. Similar to Ltail63unaligned, but + we know that the src and dest are 64-bit aligned so we can use + LDRD/STRD to improve efficiency. */ + /* TMP2 is now negative, but we don't care about that. The bottom + six bits still tell us how many bytes are left to copy. */ + + and tmp1, tmp2, #0x38 + add dst, dst, tmp1 + add src, src, tmp1 + .macro dispatch_step i + ldrd A_l, A_h, [src, #-(\i * 8)] + strd A_l, A_h, [dst, #-(\i * 8)] + .endm + dispatch_7_dword +#endif + + tst tmp2, #4 + ldrne tmp1, [src], #4 + strne tmp1, [dst], #4 + lsls tmp2, tmp2, #31 /* Count (tmp2) now dead. */ + ldrhcs tmp1, [src], #2 + ldrbne tmp2, [src] + strhcs tmp1, [dst], #2 + strbne tmp2, [dst] + +.Ldone: + ldr tmp2, [sp], #FRAME_SIZE + cfi_adjust_cfa_offset (-FRAME_SIZE) + cfi_restore (tmp2) + bx lr + + cfi_restore_state + cfi_remember_state + +.Lcpy_body_long: /* Count in tmp2. */ + + /* Long copy. We know that there's at least (prefetch_lines * 64) + bytes to go. */ +#ifdef USE_VFP + /* Don't use PLD. Instead, read some data in advance of the current + copy position into a register. This should act like a PLD + operation but we won't have to repeat the transfer. */ + + vldr d3, [src, #0] + vldr d4, [src, #64] + vldr d5, [src, #128] + vldr d6, [src, #192] + vldr d7, [src, #256] + + vldr d0, [src, #8] + vldr d1, [src, #16] + vldr d2, [src, #24] + add src, src, #32 + + subs tmp2, tmp2, #prefetch_lines * 64 * 2 + blt 2f +1: + cpy_line_vfp d3, 0 + cpy_line_vfp d4, 64 + cpy_line_vfp d5, 128 + add dst, dst, #3 * 64 + add src, src, #3 * 64 + cpy_line_vfp d6, 0 + cpy_line_vfp d7, 64 + add dst, dst, #2 * 64 + add src, src, #2 * 64 + subs tmp2, tmp2, #prefetch_lines * 64 + bge 1b + +2: + cpy_tail_vfp d3, 0 + cpy_tail_vfp d4, 64 + cpy_tail_vfp d5, 128 + add src, src, #3 * 64 + add dst, dst, #3 * 64 + cpy_tail_vfp d6, 0 + vstr d7, [dst, #64] + vldr d7, [src, #64] + vstr d0, [dst, #64 + 8] + vldr d0, [src, #64 + 8] + vstr d1, [dst, #64 + 16] + vldr d1, [src, #64 + 16] + vstr d2, [dst, #64 + 24] + vldr d2, [src, #64 + 24] + vstr d7, [dst, #64 + 32] + add src, src, #96 + vstr d0, [dst, #64 + 40] + vstr d1, [dst, #64 + 48] + vstr d2, [dst, #64 + 56] + add dst, dst, #128 + add tmp2, tmp2, #prefetch_lines * 64 + b .Lcpy_body_medium +#else + /* Long copy. Use an SMS style loop to maximize the I/O + bandwidth of the core. We don't have enough spare registers + to synthesise prefetching, so use PLD operations. */ + /* Pre-bias src and dst. */ + sub src, src, #8 + sub dst, dst, #8 + pld [src, #8] + pld [src, #72] + subs tmp2, tmp2, #64 + pld [src, #136] + ldrd A_l, A_h, [src, #8] + strd B_l, B_h, [sp, #8] + cfi_rel_offset (B_l, 8) + cfi_rel_offset (B_h, 12) + ldrd B_l, B_h, [src, #16] + strd C_l, C_h, [sp, #16] + cfi_rel_offset (C_l, 16) + cfi_rel_offset (C_h, 20) + ldrd C_l, C_h, [src, #24] + strd D_l, D_h, [sp, #24] + cfi_rel_offset (D_l, 24) + cfi_rel_offset (D_h, 28) + pld [src, #200] + ldrd D_l, D_h, [src, #32]! + b 1f + .p2align 6 +2: + pld [src, #232] + strd A_l, A_h, [dst, #40] + ldrd A_l, A_h, [src, #40] + strd B_l, B_h, [dst, #48] + ldrd B_l, B_h, [src, #48] + strd C_l, C_h, [dst, #56] + ldrd C_l, C_h, [src, #56] + strd D_l, D_h, [dst, #64]! + ldrd D_l, D_h, [src, #64]! + subs tmp2, tmp2, #64 +1: + strd A_l, A_h, [dst, #8] + ldrd A_l, A_h, [src, #8] + strd B_l, B_h, [dst, #16] + ldrd B_l, B_h, [src, #16] + strd C_l, C_h, [dst, #24] + ldrd C_l, C_h, [src, #24] + strd D_l, D_h, [dst, #32] + ldrd D_l, D_h, [src, #32] + bcs 2b + /* Save the remaining bytes and restore the callee-saved regs. */ + strd A_l, A_h, [dst, #40] + add src, src, #40 + strd B_l, B_h, [dst, #48] + ldrd B_l, B_h, [sp, #8] + cfi_restore (B_l) + cfi_restore (B_h) + strd C_l, C_h, [dst, #56] + ldrd C_l, C_h, [sp, #16] + cfi_restore (C_l) + cfi_restore (C_h) + strd D_l, D_h, [dst, #64] + ldrd D_l, D_h, [sp, #24] + cfi_restore (D_l) + cfi_restore (D_h) + add dst, dst, #72 + tst tmp2, #0x3f + bne .Ltail63aligned + ldr tmp2, [sp], #FRAME_SIZE + cfi_adjust_cfa_offset (-FRAME_SIZE) + cfi_restore (tmp2) + bx lr +#endif + + cfi_restore_state + cfi_remember_state + +.Lcpy_notaligned: + pld [src, #0] + pld [src, #64] + /* There's at least 64 bytes to copy, but there is no mutual + alignment. */ + /* Bring DST to 64-bit alignment. */ + lsls tmp2, dst, #29 + pld [src, #(2 * 64)] + beq 1f + rsbs tmp2, tmp2, #0 + sub count, count, tmp2, lsr #29 + ldrmi tmp1, [src], #4 + strmi tmp1, [dst], #4 + lsls tmp2, tmp2, #2 + ldrbne tmp1, [src], #1 + ldrhcs tmp2, [src], #2 + strbne tmp1, [dst], #1 + strhcs tmp2, [dst], #2 +1: + pld [src, #(3 * 64)] + subs count, count, #64 + ldrmi tmp2, [sp], #FRAME_SIZE + bmi .Ltail63unaligned + pld [src, #(4 * 64)] + +#ifdef USE_NEON + /* These need an extra layer of macro just to work around a + bug in the assembler's parser when an operand starts with + a {...}. */ + .macro neon_load_multi reglist, basereg + vld1.8 {\reglist}, [\basereg]! + .endm + .macro neon_store_multi reglist, basereg + vst1.8 {\reglist}, [ALIGN (\basereg, 64)]! + .endm + + neon_load_multi d0-d3, src + neon_load_multi d4-d7, src + subs count, count, #64 + bmi 2f +1: + pld [src, #(4 * 64)] + neon_store_multi d0-d3, dst + neon_load_multi d0-d3, src + neon_store_multi d4-d7, dst + neon_load_multi d4-d7, src + subs count, count, #64 + bpl 1b +2: + neon_store_multi d0-d3, dst + neon_store_multi d4-d7, dst + ands count, count, #0x3f +#else + /* Use an SMS style loop to maximize the I/O bandwidth. */ + sub src, src, #4 + sub dst, dst, #8 + subs tmp2, count, #64 /* Use tmp2 for count. */ + ldr A_l, [src, #4] + ldr A_h, [src, #8] + strd B_l, B_h, [sp, #8] + cfi_rel_offset (B_l, 8) + cfi_rel_offset (B_h, 12) + ldr B_l, [src, #12] + ldr B_h, [src, #16] + strd C_l, C_h, [sp, #16] + cfi_rel_offset (C_l, 16) + cfi_rel_offset (C_h, 20) + ldr C_l, [src, #20] + ldr C_h, [src, #24] + strd D_l, D_h, [sp, #24] + cfi_rel_offset (D_l, 24) + cfi_rel_offset (D_h, 28) + ldr D_l, [src, #28] + ldr D_h, [src, #32]! + b 1f + .p2align 6 +2: + pld [src, #(5 * 64) - (32 - 4)] + strd A_l, A_h, [dst, #40] + ldr A_l, [src, #36] + ldr A_h, [src, #40] + strd B_l, B_h, [dst, #48] + ldr B_l, [src, #44] + ldr B_h, [src, #48] + strd C_l, C_h, [dst, #56] + ldr C_l, [src, #52] + ldr C_h, [src, #56] + strd D_l, D_h, [dst, #64]! + ldr D_l, [src, #60] + ldr D_h, [src, #64]! + subs tmp2, tmp2, #64 +1: + strd A_l, A_h, [dst, #8] + ldr A_l, [src, #4] + ldr A_h, [src, #8] + strd B_l, B_h, [dst, #16] + ldr B_l, [src, #12] + ldr B_h, [src, #16] + strd C_l, C_h, [dst, #24] + ldr C_l, [src, #20] + ldr C_h, [src, #24] + strd D_l, D_h, [dst, #32] + ldr D_l, [src, #28] + ldr D_h, [src, #32] + bcs 2b + + /* Save the remaining bytes and restore the callee-saved regs. */ + strd A_l, A_h, [dst, #40] + add src, src, #36 + strd B_l, B_h, [dst, #48] + ldrd B_l, B_h, [sp, #8] + cfi_restore (B_l) + cfi_restore (B_h) + strd C_l, C_h, [dst, #56] + ldrd C_l, C_h, [sp, #16] + cfi_restore (C_l) + cfi_restore (C_h) + strd D_l, D_h, [dst, #64] + ldrd D_l, D_h, [sp, #24] + cfi_restore (D_l) + cfi_restore (D_h) + add dst, dst, #72 + ands count, tmp2, #0x3f +#endif + ldr tmp2, [sp], #FRAME_SIZE + cfi_adjust_cfa_offset (-FRAME_SIZE) + cfi_restore (tmp2) + bne .Ltail63unaligned + bx lr + +END(memcpy) +libc_hidden_builtin_def (memcpy) diff --git a/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_neon.S b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_neon.S new file mode 100644 index 0000000000..e60d1cc0e1 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_neon.S @@ -0,0 +1,9 @@ +#ifdef __ARM_NEON__ +/* Under __ARM_NEON__, this file defines memcpy directly. */ +libc_hidden_builtin_def (memcpy) +#else +# define memcpy __memcpy_neon +#endif + +#define MEMCPY_NEON +#include "memcpy_impl.S" diff --git a/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_vfp.S b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_vfp.S new file mode 100644 index 0000000000..e008c041ed --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/multiarch/memcpy_vfp.S @@ -0,0 +1,7 @@ +/* Under __ARM_NEON__, memcpy_neon.S defines memcpy directly + and the __memcpy_vfp code will never be used. */ +#ifndef __ARM_NEON__ +# define MEMCPY_VFP +# define memcpy __memcpy_vfp +# include "memcpy_impl.S" +#endif diff --git a/REORG.TODO/sysdeps/arm/armv7/strcmp.S b/REORG.TODO/sysdeps/arm/armv7/strcmp.S new file mode 100644 index 0000000000..25d055754e --- /dev/null +++ b/REORG.TODO/sysdeps/arm/armv7/strcmp.S @@ -0,0 +1,519 @@ +/* strcmp implementation for ARMv7-A, optimized for Cortex-A15. + Copyright (C) 2012-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <arm-features.h> +#include <sysdep.h> + +/* Implementation of strcmp for ARMv7 when DSP instructions are + available. Use ldrd to support wider loads, provided the data + is sufficiently aligned. Use saturating arithmetic to optimize + the compares. */ + +/* Build Options: + STRCMP_PRECHECK: Run a quick pre-check of the first byte in the + string. If comparing completely random strings the pre-check will + save time, since there is a very high probability of a mismatch in + the first character: we save significant overhead if this is the + common case. However, if strings are likely to be identical (e.g. + because we're verifying a hit in a hash table), then this check + is largely redundant. */ + +#define STRCMP_PRECHECK 1 + + .syntax unified + +#ifdef __ARM_BIG_ENDIAN +# define S2LO lsl +# define S2LOEQ lsleq +# define S2HI lsr +# define MSB 0x000000ff +# define LSB 0xff000000 +# define BYTE0_OFFSET 24 +# define BYTE1_OFFSET 16 +# define BYTE2_OFFSET 8 +# define BYTE3_OFFSET 0 +#else /* not __ARM_BIG_ENDIAN */ +# define S2LO lsr +# define S2LOEQ lsreq +# define S2HI lsl +# define BYTE0_OFFSET 0 +# define BYTE1_OFFSET 8 +# define BYTE2_OFFSET 16 +# define BYTE3_OFFSET 24 +# define MSB 0xff000000 +# define LSB 0x000000ff +#endif /* not __ARM_BIG_ENDIAN */ + +/* Parameters and result. */ +#define src1 r0 +#define src2 r1 +#define result r0 /* Overlaps src1. */ + +/* Internal variables. */ +#define tmp1 r4 +#define tmp2 r5 +#define const_m1 r12 + +/* Additional internal variables for 64-bit aligned data. */ +#define data1a r2 +#define data1b r3 +#define data2a r6 +#define data2b r7 +#define syndrome_a tmp1 +#define syndrome_b tmp2 + +/* Additional internal variables for 32-bit aligned data. */ +#define data1 r2 +#define data2 r3 +#define syndrome tmp2 + + +#ifndef NO_THUMB +/* This code is best on Thumb. */ + .thumb + +/* In Thumb code we can't use MVN with a register shift, but we do have ORN. */ +.macro prepare_mask mask_reg, nbits_reg + S2HI \mask_reg, const_m1, \nbits_reg +.endm +.macro apply_mask data_reg, mask_reg + orn \data_reg, \data_reg, \mask_reg +.endm +#else +/* In ARM code we don't have ORN, but we can use MVN with a register shift. */ +.macro prepare_mask mask_reg, nbits_reg + mvn \mask_reg, const_m1, S2HI \nbits_reg +.endm +.macro apply_mask data_reg, mask_reg + orr \data_reg, \data_reg, \mask_reg +.endm + +/* These clobber the condition codes, which the real Thumb cbz/cbnz + instructions do not. But it doesn't matter for any of the uses here. */ +.macro cbz reg, label + cmp \reg, #0 + beq \label +.endm +.macro cbnz reg, label + cmp \reg, #0 + bne \label +.endm +#endif + + + /* Macro to compute and return the result value for word-aligned + cases. */ + .macro strcmp_epilogue_aligned synd d1 d2 restore_r6 +#ifdef __ARM_BIG_ENDIAN + /* If data1 contains a zero byte, then syndrome will contain a 1 in + bit 7 of that byte. Otherwise, the highest set bit in the + syndrome will highlight the first different bit. It is therefore + sufficient to extract the eight bits starting with the syndrome + bit. */ + clz tmp1, \synd + lsl r1, \d2, tmp1 + .if \restore_r6 + ldrd r6, r7, [sp, #8] + .endif + lsl \d1, \d1, tmp1 + lsr result, \d1, #24 + ldrd r4, r5, [sp], #16 + cfi_remember_state + cfi_def_cfa_offset (0) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + sub result, result, r1, lsr #24 + bx lr +#else + /* To use the big-endian trick we'd have to reverse all three words. + that's slower than this approach. */ + rev \synd, \synd + clz tmp1, \synd + bic tmp1, tmp1, #7 + lsr r1, \d2, tmp1 + .if \restore_r6 + ldrd r6, r7, [sp, #8] + .endif + lsr \d1, \d1, tmp1 + and result, \d1, #255 + and r1, r1, #255 + ldrd r4, r5, [sp], #16 + cfi_remember_state + cfi_def_cfa_offset (0) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + sub result, result, r1 + + bx lr +#endif + .endm + + .text + .p2align 5 +.Lstrcmp_start_addr: +#if STRCMP_PRECHECK == 1 +.Lfastpath_exit: + sub r0, r2, r3 + bx lr + nop +#endif +ENTRY (strcmp) +#if STRCMP_PRECHECK == 1 + ldrb r2, [src1] + ldrb r3, [src2] + cmp r2, #1 + it cs + cmpcs r2, r3 + bne .Lfastpath_exit +#endif + strd r4, r5, [sp, #-16]! + cfi_def_cfa_offset (16) + cfi_offset (r4, -16) + cfi_offset (r5, -12) + orr tmp1, src1, src2 + strd r6, r7, [sp, #8] + cfi_offset (r6, -8) + cfi_offset (r7, -4) + mvn const_m1, #0 + lsl r2, tmp1, #29 + cbz r2, .Lloop_aligned8 + +.Lnot_aligned: + eor tmp1, src1, src2 + tst tmp1, #7 + bne .Lmisaligned8 + + /* Deal with mutual misalignment by aligning downwards and then + masking off the unwanted loaded data to prevent a difference. */ + and tmp1, src1, #7 + bic src1, src1, #7 + and tmp2, tmp1, #3 + bic src2, src2, #7 + lsl tmp2, tmp2, #3 /* Bytes -> bits. */ + ldrd data1a, data1b, [src1], #16 + tst tmp1, #4 + ldrd data2a, data2b, [src2], #16 + prepare_mask tmp1, tmp2 + apply_mask data1a, tmp1 + apply_mask data2a, tmp1 + beq .Lstart_realigned8 + apply_mask data1b, tmp1 + mov data1a, const_m1 + apply_mask data2b, tmp1 + mov data2a, const_m1 + b .Lstart_realigned8 + + /* Unwind the inner loop by a factor of 2, giving 16 bytes per + pass. */ + .p2align 5,,12 /* Don't start in the tail bytes of a cache line. */ + .p2align 2 /* Always word aligned. */ +.Lloop_aligned8: + ldrd data1a, data1b, [src1], #16 + ldrd data2a, data2b, [src2], #16 +.Lstart_realigned8: + uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */ + eor syndrome_a, data1a, data2a + sel syndrome_a, syndrome_a, const_m1 + cbnz syndrome_a, .Ldiff_in_a + uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */ + eor syndrome_b, data1b, data2b + sel syndrome_b, syndrome_b, const_m1 + cbnz syndrome_b, .Ldiff_in_b + + ldrd data1a, data1b, [src1, #-8] + ldrd data2a, data2b, [src2, #-8] + uadd8 syndrome_b, data1a, const_m1 /* Only want GE bits, */ + eor syndrome_a, data1a, data2a + sel syndrome_a, syndrome_a, const_m1 + uadd8 syndrome_b, data1b, const_m1 /* Only want GE bits. */ + eor syndrome_b, data1b, data2b + sel syndrome_b, syndrome_b, const_m1 + /* Can't use CBZ for backwards branch. */ + orrs syndrome_b, syndrome_b, syndrome_a /* Only need if s_a == 0 */ + beq .Lloop_aligned8 + +.Ldiff_found: + cbnz syndrome_a, .Ldiff_in_a + +.Ldiff_in_b: + strcmp_epilogue_aligned syndrome_b, data1b, data2b 1 + +.Ldiff_in_a: + cfi_restore_state + strcmp_epilogue_aligned syndrome_a, data1a, data2a 1 + + cfi_restore_state +.Lmisaligned8: + tst tmp1, #3 + bne .Lmisaligned4 + ands tmp1, src1, #3 + bne .Lmutual_align4 + + /* Unrolled by a factor of 2, to reduce the number of post-increment + operations. */ +.Lloop_aligned4: + ldr data1, [src1], #8 + ldr data2, [src2], #8 +.Lstart_realigned4: + uadd8 syndrome, data1, const_m1 /* Only need GE bits. */ + eor syndrome, data1, data2 + sel syndrome, syndrome, const_m1 + cbnz syndrome, .Laligned4_done + ldr data1, [src1, #-4] + ldr data2, [src2, #-4] + uadd8 syndrome, data1, const_m1 + eor syndrome, data1, data2 + sel syndrome, syndrome, const_m1 + cmp syndrome, #0 + beq .Lloop_aligned4 + +.Laligned4_done: + strcmp_epilogue_aligned syndrome, data1, data2, 0 + +.Lmutual_align4: + cfi_restore_state + /* Deal with mutual misalignment by aligning downwards and then + masking off the unwanted loaded data to prevent a difference. */ + lsl tmp1, tmp1, #3 /* Bytes -> bits. */ + bic src1, src1, #3 + ldr data1, [src1], #8 + bic src2, src2, #3 + ldr data2, [src2], #8 + + prepare_mask tmp1, tmp1 + apply_mask data1, tmp1 + apply_mask data2, tmp1 + b .Lstart_realigned4 + +.Lmisaligned4: + ands tmp1, src1, #3 + beq .Lsrc1_aligned + sub src2, src2, tmp1 + bic src1, src1, #3 + lsls tmp1, tmp1, #31 + ldr data1, [src1], #4 + beq .Laligned_m2 + bcs .Laligned_m1 + +#if STRCMP_PRECHECK == 0 + ldrb data2, [src2, #1] + uxtb tmp1, data1, ror #BYTE1_OFFSET + subs tmp1, tmp1, data2 + bne .Lmisaligned_exit + cbz data2, .Lmisaligned_exit + +.Laligned_m2: + ldrb data2, [src2, #2] + uxtb tmp1, data1, ror #BYTE2_OFFSET + subs tmp1, tmp1, data2 + bne .Lmisaligned_exit + cbz data2, .Lmisaligned_exit + +.Laligned_m1: + ldrb data2, [src2, #3] + uxtb tmp1, data1, ror #BYTE3_OFFSET + subs tmp1, tmp1, data2 + bne .Lmisaligned_exit + add src2, src2, #4 + cbnz data2, .Lsrc1_aligned +#else /* STRCMP_PRECHECK */ + /* If we've done the pre-check, then we don't need to check the + first byte again here. */ + ldrb data2, [src2, #2] + uxtb tmp1, data1, ror #BYTE2_OFFSET + subs tmp1, tmp1, data2 + bne .Lmisaligned_exit + cbz data2, .Lmisaligned_exit + +.Laligned_m2: + ldrb data2, [src2, #3] + uxtb tmp1, data1, ror #BYTE3_OFFSET + subs tmp1, tmp1, data2 + bne .Lmisaligned_exit + cbnz data2, .Laligned_m1 +#endif + +.Lmisaligned_exit: + mov result, tmp1 + ldr r4, [sp], #16 + cfi_remember_state + cfi_def_cfa_offset (0) + cfi_restore (r4) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + bx lr + +#if STRCMP_PRECHECK == 1 +.Laligned_m1: + add src2, src2, #4 +#endif +.Lsrc1_aligned: + cfi_restore_state + /* src1 is word aligned, but src2 has no common alignment + with it. */ + ldr data1, [src1], #4 + lsls tmp1, src2, #31 /* C=src2[1], Z=src2[0]. */ + + bic src2, src2, #3 + ldr data2, [src2], #4 + bhi .Loverlap1 /* C=1, Z=0 => src2[1:0] = 0b11. */ + bcs .Loverlap2 /* C=1, Z=1 => src2[1:0] = 0b10. */ + + /* (overlap3) C=0, Z=0 => src2[1:0] = 0b01. */ +.Loverlap3: + bic tmp1, data1, #MSB + uadd8 syndrome, data1, const_m1 + eors syndrome, tmp1, data2, S2LO #8 + sel syndrome, syndrome, const_m1 + bne 4f + cbnz syndrome, 5f + ldr data2, [src2], #4 + eor tmp1, tmp1, data1 + cmp tmp1, data2, S2HI #24 + bne 6f + ldr data1, [src1], #4 + b .Loverlap3 +4: + S2LO data2, data2, #8 + b .Lstrcmp_tail + +5: + bics syndrome, syndrome, #MSB + bne .Lstrcmp_done_equal + + /* We can only get here if the MSB of data1 contains 0, so + fast-path the exit. */ + ldrb result, [src2] + ldrd r4, r5, [sp], #16 + cfi_remember_state + cfi_def_cfa_offset (0) + cfi_restore (r4) + cfi_restore (r5) + /* R6/7 Not used in this sequence. */ + cfi_restore (r6) + cfi_restore (r7) + neg result, result + bx lr + +6: + cfi_restore_state + S2LO data1, data1, #24 + and data2, data2, #LSB + b .Lstrcmp_tail + + .p2align 5,,12 /* Ensure at least 3 instructions in cache line. */ +.Loverlap2: + and tmp1, data1, const_m1, S2LO #16 + uadd8 syndrome, data1, const_m1 + eors syndrome, tmp1, data2, S2LO #16 + sel syndrome, syndrome, const_m1 + bne 4f + cbnz syndrome, 5f + ldr data2, [src2], #4 + eor tmp1, tmp1, data1 + cmp tmp1, data2, S2HI #16 + bne 6f + ldr data1, [src1], #4 + b .Loverlap2 +4: + S2LO data2, data2, #16 + b .Lstrcmp_tail +5: + ands syndrome, syndrome, const_m1, S2LO #16 + bne .Lstrcmp_done_equal + + ldrh data2, [src2] + S2LO data1, data1, #16 +#ifdef __ARM_BIG_ENDIAN + lsl data2, data2, #16 +#endif + b .Lstrcmp_tail + +6: + S2LO data1, data1, #16 + and data2, data2, const_m1, S2LO #16 + b .Lstrcmp_tail + + .p2align 5,,12 /* Ensure at least 3 instructions in cache line. */ +.Loverlap1: + and tmp1, data1, #LSB + uadd8 syndrome, data1, const_m1 + eors syndrome, tmp1, data2, S2LO #24 + sel syndrome, syndrome, const_m1 + bne 4f + cbnz syndrome, 5f + ldr data2, [src2], #4 + eor tmp1, tmp1, data1 + cmp tmp1, data2, S2HI #8 + bne 6f + ldr data1, [src1], #4 + b .Loverlap1 +4: + S2LO data2, data2, #24 + b .Lstrcmp_tail +5: + tst syndrome, #LSB + bne .Lstrcmp_done_equal + ldr data2, [src2] +6: + S2LO data1, data1, #8 + bic data2, data2, #MSB + b .Lstrcmp_tail + +.Lstrcmp_done_equal: + mov result, #0 + ldrd r4, r5, [sp], #16 + cfi_remember_state + cfi_def_cfa_offset (0) + cfi_restore (r4) + cfi_restore (r5) + /* R6/7 not used in this sequence. */ + cfi_restore (r6) + cfi_restore (r7) + bx lr + +.Lstrcmp_tail: + cfi_restore_state +#ifndef __ARM_BIG_ENDIAN + rev data1, data1 + rev data2, data2 + /* Now everything looks big-endian... */ +#endif + uadd8 tmp1, data1, const_m1 + eor tmp1, data1, data2 + sel syndrome, tmp1, const_m1 + clz tmp1, syndrome + lsl data1, data1, tmp1 + lsl data2, data2, tmp1 + lsr result, data1, #24 + ldrd r4, r5, [sp], #16 + cfi_def_cfa_offset (0) + cfi_restore (r4) + cfi_restore (r5) + /* R6/7 not used in this sequence. */ + cfi_restore (r6) + cfi_restore (r7) + sub result, result, data2, lsr #24 + bx lr +END (strcmp) +libc_hidden_builtin_def (strcmp) diff --git a/REORG.TODO/sysdeps/arm/atomic-machine.h b/REORG.TODO/sysdeps/arm/atomic-machine.h new file mode 100644 index 0000000000..c22d05b766 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/atomic-machine.h @@ -0,0 +1,155 @@ +/* Atomic operations. Pure ARM version. + Copyright (C) 2002-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <stdint.h> + +typedef int8_t atomic8_t; +typedef uint8_t uatomic8_t; +typedef int_fast8_t atomic_fast8_t; +typedef uint_fast8_t uatomic_fast8_t; + +typedef int32_t atomic32_t; +typedef uint32_t uatomic32_t; +typedef int_fast32_t atomic_fast32_t; +typedef uint_fast32_t uatomic_fast32_t; + +typedef intptr_t atomicptr_t; +typedef uintptr_t uatomicptr_t; +typedef intmax_t atomic_max_t; +typedef uintmax_t uatomic_max_t; + +#define __HAVE_64B_ATOMICS 0 +#define USE_ATOMIC_COMPILER_BUILTINS 0 +#define ATOMIC_EXCHANGE_USES_CAS 1 + +void __arm_link_error (void); + +#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 +# define atomic_full_barrier() __sync_synchronize () +#else +# define atomic_full_barrier() __arm_assisted_full_barrier () +#endif + +/* An OS-specific atomic-machine.h file will define this macro if + the OS can provide something. If not, we'll fail to build + with a compiler that doesn't supply the operation. */ +#ifndef __arm_assisted_full_barrier +# define __arm_assisted_full_barrier() __arm_link_error() +#endif + +/* Use the atomic builtins provided by GCC in case the backend provides + a pattern to do this efficiently. */ +#ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 + +# define atomic_exchange_acq(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_ACQUIRE) + +# define atomic_exchange_rel(mem, value) \ + __atomic_val_bysize (__arch_exchange, int, mem, value, __ATOMIC_RELEASE) + +/* Atomic exchange (without compare). */ + +# define __arch_exchange_8_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +# define __arch_exchange_16_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +# define __arch_exchange_32_int(mem, newval, model) \ + __atomic_exchange_n (mem, newval, model) + +# define __arch_exchange_64_int(mem, newval, model) \ + (__arm_link_error (), (typeof (*mem)) 0) + +/* Compare and exchange with "acquire" semantics, ie barrier after. */ + +# define atomic_compare_and_exchange_bool_acq(mem, new, old) \ + __atomic_bool_bysize (__arch_compare_and_exchange_bool, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +# define atomic_compare_and_exchange_val_acq(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_ACQUIRE) + +/* Compare and exchange with "release" semantics, ie barrier before. */ + +# define atomic_compare_and_exchange_val_rel(mem, new, old) \ + __atomic_val_bysize (__arch_compare_and_exchange_val, int, \ + mem, new, old, __ATOMIC_RELEASE) + +/* Compare and exchange. + For all "bool" routines, we return FALSE if exchange succesful. */ + +# define __arch_compare_and_exchange_bool_8_int(mem, newval, oldval, model) \ + ({__arm_link_error (); 0; }) + +# define __arch_compare_and_exchange_bool_16_int(mem, newval, oldval, model) \ + ({__arm_link_error (); 0; }) + +# define __arch_compare_and_exchange_bool_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + !__atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + }) + +# define __arch_compare_and_exchange_bool_64_int(mem, newval, oldval, model) \ + ({__arm_link_error (); 0; }) + +# define __arch_compare_and_exchange_val_8_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_16_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_32_int(mem, newval, oldval, model) \ + ({ \ + typeof (*mem) __oldval = (oldval); \ + __atomic_compare_exchange_n (mem, (void *) &__oldval, newval, 0, \ + model, __ATOMIC_RELAXED); \ + __oldval; \ + }) + +# define __arch_compare_and_exchange_val_64_int(mem, newval, oldval, model) \ + ({__arm_link_error (); oldval; }) + +#else +# define __arch_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + __arm_assisted_compare_and_exchange_val_32_acq ((mem), (newval), (oldval)) +#endif + +#ifndef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_4 +/* We don't support atomic operations on any non-word types. + So make them link errors. */ +# define __arch_compare_and_exchange_val_8_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_16_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) + +# define __arch_compare_and_exchange_val_64_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) +#endif + +/* An OS-specific atomic-machine.h file will define this macro if + the OS can provide something. If not, we'll fail to build + with a compiler that doesn't supply the operation. */ +#ifndef __arm_assisted_compare_and_exchange_val_32_acq +# define __arm_assisted_compare_and_exchange_val_32_acq(mem, newval, oldval) \ + ({ __arm_link_error (); oldval; }) +#endif diff --git a/REORG.TODO/sysdeps/arm/backtrace.c b/REORG.TODO/sysdeps/arm/backtrace.c new file mode 100644 index 0000000000..14a8053b60 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/backtrace.c @@ -0,0 +1,126 @@ +/* Return backtrace of current program state. + Copyright (C) 2008-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Kazu Hirata <kazu@codesourcery.com>, 2008. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <libc-lock.h> +#include <dlfcn.h> +#include <execinfo.h> +#include <stdlib.h> +#include <unwind.h> + +struct trace_arg +{ + void **array; + int cnt, size; +}; + +#ifdef SHARED +static _Unwind_Reason_Code (*unwind_backtrace) (_Unwind_Trace_Fn, void *); +static _Unwind_VRS_Result (*unwind_vrs_get) (_Unwind_Context *, + _Unwind_VRS_RegClass, + _uw, + _Unwind_VRS_DataRepresentation, + void *); + +static void *libgcc_handle; + +static void +init (void) +{ + libgcc_handle = __libc_dlopen ("libgcc_s.so.1"); + + if (libgcc_handle == NULL) + return; + + unwind_backtrace = __libc_dlsym (libgcc_handle, "_Unwind_Backtrace"); + unwind_vrs_get = __libc_dlsym (libgcc_handle, "_Unwind_VRS_Get"); + if (unwind_vrs_get == NULL) + unwind_backtrace = NULL; +} + +/* This function is identical to "_Unwind_GetGR", except that it uses + "unwind_vrs_get" instead of "_Unwind_VRS_Get". */ +static inline _Unwind_Word +unwind_getgr (_Unwind_Context *context, int regno) +{ + _uw val; + unwind_vrs_get (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val); + return val; +} + +/* This macro is identical to the _Unwind_GetIP macro, except that it + uses "unwind_getgr" instead of "_Unwind_GetGR". */ +# define unwind_getip(context) \ + (unwind_getgr (context, 15) & ~(_Unwind_Word)1) +#else +# define unwind_backtrace _Unwind_Backtrace +# define unwind_getip _Unwind_GetIP +#endif + +static _Unwind_Reason_Code +backtrace_helper (struct _Unwind_Context *ctx, void *a) +{ + struct trace_arg *arg = a; + + /* We are first called with address in the __backtrace function. + Skip it. */ + if (arg->cnt != -1) + arg->array[arg->cnt] = (void *) unwind_getip (ctx); + if (++arg->cnt == arg->size) + return _URC_END_OF_STACK; + return _URC_NO_REASON; +} + +int +__backtrace (void **array, int size) +{ + struct trace_arg arg = { .array = array, .size = size, .cnt = -1 }; + + if (size <= 0) + return 0; + +#ifdef SHARED + __libc_once_define (static, once); + + __libc_once (once, init); + if (unwind_backtrace == NULL) + return 0; +#endif + + unwind_backtrace (backtrace_helper, &arg); + + if (arg.cnt > 1 && arg.array[arg.cnt - 1] == NULL) + --arg.cnt; + return arg.cnt != -1 ? arg.cnt : 0; +} +weak_alias (__backtrace, backtrace) +libc_hidden_def (__backtrace) + + +#ifdef SHARED +/* Free all resources if necessary. */ +libc_freeres_fn (free_mem) +{ + unwind_backtrace = NULL; + if (libgcc_handle != NULL) + { + __libc_dlclose (libgcc_handle); + libgcc_handle = NULL; + } +} +#endif diff --git a/REORG.TODO/sysdeps/arm/bits/endian.h b/REORG.TODO/sysdeps/arm/bits/endian.h new file mode 100644 index 0000000000..f49f6ab1c9 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/bits/endian.h @@ -0,0 +1,10 @@ +#ifndef _ENDIAN_H +# error "Never use <bits/endian.h> directly; include <endian.h> instead." +#endif + +/* ARM can be either big or little endian. */ +#ifdef __ARMEB__ +#define __BYTE_ORDER __BIG_ENDIAN +#else +#define __BYTE_ORDER __LITTLE_ENDIAN +#endif diff --git a/REORG.TODO/sysdeps/arm/bits/fenv.h b/REORG.TODO/sysdeps/arm/bits/fenv.h new file mode 100644 index 0000000000..fe9aeb36bb --- /dev/null +++ b/REORG.TODO/sysdeps/arm/bits/fenv.h @@ -0,0 +1,90 @@ +/* Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _FENV_H +# error "Never use <bits/fenv.h> directly; include <fenv.h> instead." +#endif + +/* Define bits representing exceptions in the FPU status word. */ +enum + { + FE_INVALID = +#define FE_INVALID 1 + FE_INVALID, + FE_DIVBYZERO = +#define FE_DIVBYZERO 2 + FE_DIVBYZERO, + FE_OVERFLOW = +#define FE_OVERFLOW 4 + FE_OVERFLOW, + FE_UNDERFLOW = +#define FE_UNDERFLOW 8 + FE_UNDERFLOW, + FE_INEXACT = +#define FE_INEXACT 16 + FE_INEXACT, + }; + +/* Amount to shift by to convert an exception to a mask bit. */ +#define FE_EXCEPT_SHIFT 8 + +/* All supported exceptions. */ +#define FE_ALL_EXCEPT \ + (FE_INVALID | FE_DIVBYZERO | FE_OVERFLOW | FE_UNDERFLOW | FE_INEXACT) + +/* VFP supports all of the four defined rounding modes. */ +enum + { + FE_TONEAREST = +#define FE_TONEAREST 0 + FE_TONEAREST, + FE_UPWARD = +#define FE_UPWARD 0x400000 + FE_UPWARD, + FE_DOWNWARD = +#define FE_DOWNWARD 0x800000 + FE_DOWNWARD, + FE_TOWARDZERO = +#define FE_TOWARDZERO 0xc00000 + FE_TOWARDZERO + }; + +/* Type representing exception flags. */ +typedef unsigned int fexcept_t; + +/* Type representing floating-point environment. */ +typedef struct + { + unsigned int __cw; + } +fenv_t; + +/* If the default argument is used we use this value. */ +#define FE_DFL_ENV ((const fenv_t *) -1l) + +#ifdef __USE_GNU +/* Floating-point environment where none of the exceptions are masked. */ +# define FE_NOMASK_ENV ((const fenv_t *) -2) +#endif + +#if __GLIBC_USE (IEC_60559_BFP_EXT) +/* Type representing floating-point control modes. */ +typedef unsigned int femode_t; + +/* Default floating-point control modes. */ +# define FE_DFL_MODE ((const femode_t *) -1L) +#endif diff --git a/REORG.TODO/sysdeps/arm/bits/link.h b/REORG.TODO/sysdeps/arm/bits/link.h new file mode 100644 index 0000000000..6e6d50ed27 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/bits/link.h @@ -0,0 +1,65 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _LINK_H +# error "Never include <bits/link.h> directly; use <link.h> instead." +#endif + + +/* Registers for entry into PLT on ARM. */ +typedef struct La_arm_regs +{ + uint32_t lr_reg[4]; + uint32_t lr_sp; + uint32_t lr_lr; + /* Coprocessor registers used for argument passing. The data + stored here depends on the coprocessors available in the + system which are used for function calls in the current ABI. + VFP uses eight 64-bit registers, and iWMMXt uses ten. */ + uint32_t lr_coproc[42]; +} La_arm_regs; + +/* Return values for calls from PLT on ARM. */ +typedef struct La_arm_retval +{ + /* Up to four integer registers can be used for a return value in + some ABIs (APCS complex long double). */ + uint32_t lrv_reg[4]; + + /* Any coprocessor registers which might be used to return values + in the current ABI. */ + uint32_t lrv_coproc[12]; +} La_arm_retval; + + +__BEGIN_DECLS + +extern Elf32_Addr la_arm_gnu_pltenter (Elf32_Sym *__sym, unsigned int __ndx, + uintptr_t *__refcook, + uintptr_t *__defcook, + La_arm_regs *__regs, + unsigned int *__flags, + const char *__symname, + long int *__framesizep); +extern unsigned int la_arm_gnu_pltexit (Elf32_Sym *__sym, unsigned int __ndx, + uintptr_t *__refcook, + uintptr_t *__defcook, + const La_arm_regs *__inregs, + La_arm_retval *__outregs, + const char *__symname); + +__END_DECLS diff --git a/REORG.TODO/sysdeps/arm/bits/setjmp.h b/REORG.TODO/sysdeps/arm/bits/setjmp.h new file mode 100644 index 0000000000..a0b39ae35c --- /dev/null +++ b/REORG.TODO/sysdeps/arm/bits/setjmp.h @@ -0,0 +1,36 @@ +/* Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Define the machine-dependent type `jmp_buf'. ARM EABI version. */ + +#ifndef _BITS_SETJMP_H +#define _BITS_SETJMP_H 1 + +#if !defined _SETJMP_H && !defined _PTHREAD_H +# error "Never include <bits/setjmp.h> directly; use <setjmp.h> instead." +#endif + +#ifndef __ASSEMBLER__ +/* The exact set of registers saved may depend on the particular core + in use, as some coprocessor registers may need to be saved. The C + Library ABI requires that the buffer be 8-byte aligned, and + recommends that the buffer contain 64 words. The first 26 words + are occupied by sp, lr, v1-v6, sl, fp, and d8-d15. */ +typedef int __jmp_buf[64] __attribute__((__aligned__ (8))); +#endif + +#endif diff --git a/REORG.TODO/sysdeps/arm/bsd-_setjmp.S b/REORG.TODO/sysdeps/arm/bsd-_setjmp.S new file mode 100644 index 0000000000..3fc5ef49cf --- /dev/null +++ b/REORG.TODO/sysdeps/arm/bsd-_setjmp.S @@ -0,0 +1,29 @@ +/* BSD `_setjmp' entry point to `sigsetjmp (..., 0)'. ARM version. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* This just does a tail-call to `__sigsetjmp (ARG, 1)'. + We cannot do it in C because it must be a tail-call, so frame-unwinding + in setjmp doesn't clobber the state restored by longjmp. */ + +#include <sysdep.h> + +ENTRY (_setjmp) + mov r1, #0 + b PLTJMP(HIDDEN_JUMPTARGET(__sigsetjmp)) +END (_setjmp) +libc_hidden_def (_setjmp) diff --git a/REORG.TODO/sysdeps/arm/bsd-setjmp.S b/REORG.TODO/sysdeps/arm/bsd-setjmp.S new file mode 100644 index 0000000000..2a07c6add1 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/bsd-setjmp.S @@ -0,0 +1,28 @@ +/* BSD `setjmp' entry point to `sigsetjmp (..., 1)'. ARM version. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* This just does a tail-call to `__sigsetjmp (ARG, 1)'. + We cannot do it in C because it must be a tail-call, so frame-unwinding + in setjmp doesn't clobber the state restored by longjmp. */ + +#include <sysdep.h> + +ENTRY (setjmp) + mov r1, #1 + b PLTJMP(HIDDEN_JUMPTARGET(__sigsetjmp)) +END (setjmp) diff --git a/REORG.TODO/sysdeps/arm/configure b/REORG.TODO/sysdeps/arm/configure new file mode 100644 index 0000000000..431e843b2b --- /dev/null +++ b/REORG.TODO/sysdeps/arm/configure @@ -0,0 +1,279 @@ +# This file is generated from configure.ac by Autoconf. DO NOT EDIT! + # Local configure fragment for sysdeps/arm. + +$as_echo "#define PI_STATIC_AND_HIDDEN 1" >>confdefs.h + + +# We check to see if the compiler and flags are +# selecting the hard-float ABI and if they are then +# we set libc_cv_arm_pcs_vfp to yes which causes +# HAVE_ARM_PCS_VFP to be defined in config.h and +# in include/libc-symbols.h and thus available to +# shlib-versions to select the appropriate name for +# the dynamic linker via %ifdef. + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_GREP" || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + as_fn_executable_p "$ac_path_EGREP" || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the compiler is using the ARM hard-float ABI" >&5 +$as_echo_n "checking whether the compiler is using the ARM hard-float ABI... " >&6; } +if ${libc_cv_arm_pcs_vfp+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __ARM_PCS_VFP + yes + #endif + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "yes" >/dev/null 2>&1; then : + libc_cv_arm_pcs_vfp=yes +else + libc_cv_arm_pcs_vfp=no +fi +rm -f conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_arm_pcs_vfp" >&5 +$as_echo "$libc_cv_arm_pcs_vfp" >&6; } +if test $libc_cv_arm_pcs_vfp = yes; then + $as_echo "#define HAVE_ARM_PCS_VFP 1" >>confdefs.h + + config_vars="$config_vars +default-abi = hard" +else + config_vars="$config_vars +default-abi = soft" +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether PC-relative relocs in movw/movt work properly" >&5 +$as_echo_n "checking whether PC-relative relocs in movw/movt work properly... " >&6; } +if ${libc_cv_arm_pcrel_movw+:} false; then : + $as_echo_n "(cached) " >&6 +else + +cat > conftest.s <<\EOF + .syntax unified + .arm + .arch armv7-a + + .text + .globl foo + .type foo,%function +foo: movw r0, #:lower16:symbol - 1f - 8 + movt r0, #:upper16:symbol - 1f - 8 +1: add r0, pc + @ And now a case with a local symbol. + movw r0, #:lower16:3f - 2f - 8 + movt r0, #:upper16:3f - 2f - 8 +2: add r0, pc + bx lr + +.data + .globl symbol + .hidden symbol +symbol: .long 23 +3: .long 17 +EOF +libc_cv_arm_pcrel_movw=no +${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS \ + -nostartfiles -nostdlib -shared \ + -o conftest.so conftest.s 1>&5 2>&5 && +LC_ALL=C $READELF -dr conftest.so > conftest.dr 2>&5 && +{ + cat conftest.dr 1>&5 + fgrep 'TEXTREL +R_ARM_NONE' conftest.dr > /dev/null || libc_cv_arm_pcrel_movw=yes +} +rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_arm_pcrel_movw" >&5 +$as_echo "$libc_cv_arm_pcrel_movw" >&6; } +if test $libc_cv_arm_pcrel_movw = yes; then + $as_echo "#define ARM_PCREL_MOVW_OK 1" >>confdefs.h + +fi + +# This was buggy in assemblers from GNU binutils versions before 2.25.1 +# (it's known to be broken in 2.24 and 2.25; see +# https://sourceware.org/bugzilla/show_bug.cgi?id=18383). +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether TPOFF relocs with addends are assembled correctly" >&5 +$as_echo_n "checking whether TPOFF relocs with addends are assembled correctly... " >&6; } +if ${libc_cv_arm_tpoff_addend+:} false; then : + $as_echo_n "(cached) " >&6 +else + +cat > conftest.s <<\EOF + .syntax unified + .arm + .arch armv7-a + + .text +foo: + .word tbase(tpoff)+4 + + .section .tdata,"awT",%progbits + .word -4 +tbase: .word 0 + .word 4 +EOF +libc_cv_arm_tpoff_addend=no +${CC-cc} -c $CFLAGS $CPPFLAGS \ + -o conftest.o conftest.s 1>&5 2>&5 && +LC_ALL=C $READELF -x.text conftest.o > conftest.x 2>&5 && +{ + cat conftest.x 1>&5 + $AWK 'BEGIN { result = 2 } +$1 ~ /0x0+/ && $2 ~ /[0-9a-f]+/ { +# Check for little-endian or big-endian encoding of 4 in the in-place addend. + result = ($2 == "04000000" || $2 == "00000004") ? 0 : 1 +} +END { exit(result) } +' conftest.x 2>&5 && libc_cv_arm_tpoff_addend=yes +} +rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libc_cv_arm_tpoff_addend" >&5 +$as_echo "$libc_cv_arm_tpoff_addend" >&6; } +if test $libc_cv_arm_tpoff_addend = no; then + config_vars="$config_vars +test-xfail-tst-tlsalign = yes" + config_vars="$config_vars +test-xfail-tst-tlsalign-static = yes" +fi + + +libc_cv_gcc_unwind_find_fde=no + +# Remove -fno-unwind-tables that was added in sysdeps/arm/preconfigure.ac. +CFLAGS=${CFLAGS% -fno-unwind-tables} diff --git a/REORG.TODO/sysdeps/arm/configure.ac b/REORG.TODO/sysdeps/arm/configure.ac new file mode 100644 index 0000000000..90cdd69c75 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/configure.ac @@ -0,0 +1,112 @@ +GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory. +# Local configure fragment for sysdeps/arm. + +dnl It is always possible to access static and hidden symbols in an +dnl position independent way. This has been true since GCC 4.1, +dnl which is older than the minimum version required to build libc. +AC_DEFINE(PI_STATIC_AND_HIDDEN) + +# We check to see if the compiler and flags are +# selecting the hard-float ABI and if they are then +# we set libc_cv_arm_pcs_vfp to yes which causes +# HAVE_ARM_PCS_VFP to be defined in config.h and +# in include/libc-symbols.h and thus available to +# shlib-versions to select the appropriate name for +# the dynamic linker via %ifdef. +AC_CACHE_CHECK([whether the compiler is using the ARM hard-float ABI], + [libc_cv_arm_pcs_vfp], + [AC_EGREP_CPP(yes,[#ifdef __ARM_PCS_VFP + yes + #endif + ], libc_cv_arm_pcs_vfp=yes, libc_cv_arm_pcs_vfp=no)]) +if test $libc_cv_arm_pcs_vfp = yes; then + AC_DEFINE(HAVE_ARM_PCS_VFP) + LIBC_CONFIG_VAR([default-abi], [hard]) +else + LIBC_CONFIG_VAR([default-abi], [soft]) +fi + +AC_CACHE_CHECK([whether PC-relative relocs in movw/movt work properly], + libc_cv_arm_pcrel_movw, [ +cat > conftest.s <<\EOF + .syntax unified + .arm + .arch armv7-a + + .text + .globl foo + .type foo,%function +foo: movw r0, #:lower16:symbol - 1f - 8 + movt r0, #:upper16:symbol - 1f - 8 +1: add r0, pc + @ And now a case with a local symbol. + movw r0, #:lower16:3f - 2f - 8 + movt r0, #:upper16:3f - 2f - 8 +2: add r0, pc + bx lr + +.data + .globl symbol + .hidden symbol +symbol: .long 23 +3: .long 17 +EOF +libc_cv_arm_pcrel_movw=no +${CC-cc} $CFLAGS $CPPFLAGS $LDFLAGS \ + -nostartfiles -nostdlib -shared \ + -o conftest.so conftest.s 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && +LC_ALL=C $READELF -dr conftest.so > conftest.dr 2>&AS_MESSAGE_LOG_FD && +{ + cat conftest.dr 1>&AS_MESSAGE_LOG_FD + fgrep 'TEXTREL +R_ARM_NONE' conftest.dr > /dev/null || libc_cv_arm_pcrel_movw=yes +} +rm -f conftest*]) +if test $libc_cv_arm_pcrel_movw = yes; then + AC_DEFINE([ARM_PCREL_MOVW_OK]) +fi + +# This was buggy in assemblers from GNU binutils versions before 2.25.1 +# (it's known to be broken in 2.24 and 2.25; see +# https://sourceware.org/bugzilla/show_bug.cgi?id=18383). +AC_CACHE_CHECK([whether TPOFF relocs with addends are assembled correctly], + libc_cv_arm_tpoff_addend, [ +cat > conftest.s <<\EOF + .syntax unified + .arm + .arch armv7-a + + .text +foo: + .word tbase(tpoff)+4 + + .section .tdata,"awT",%progbits + .word -4 +tbase: .word 0 + .word 4 +EOF +libc_cv_arm_tpoff_addend=no +${CC-cc} -c $CFLAGS $CPPFLAGS \ + -o conftest.o conftest.s 1>&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD && +LC_ALL=C $READELF -x.text conftest.o > conftest.x 2>&AS_MESSAGE_LOG_FD && +{ + cat conftest.x 1>&AS_MESSAGE_LOG_FD + $AWK 'BEGIN { result = 2 } +$1 ~ /0x0+/ && $2 ~ /[[0-9a-f]]+/ { +# Check for little-endian or big-endian encoding of 4 in the in-place addend. + result = ($2 == "04000000" || $2 == "00000004") ? 0 : 1 +} +END { exit(result) } +' conftest.x 2>&AS_MESSAGE_LOG_FD && libc_cv_arm_tpoff_addend=yes +} +rm -f conftest*]) +if test $libc_cv_arm_tpoff_addend = no; then + LIBC_CONFIG_VAR([test-xfail-tst-tlsalign], [yes]) + LIBC_CONFIG_VAR([test-xfail-tst-tlsalign-static], [yes]) +fi + + +libc_cv_gcc_unwind_find_fde=no + +# Remove -fno-unwind-tables that was added in sysdeps/arm/preconfigure.ac. +CFLAGS=${CFLAGS% -fno-unwind-tables} diff --git a/REORG.TODO/sysdeps/arm/crti.S b/REORG.TODO/sysdeps/arm/crti.S new file mode 100644 index 0000000000..c71f5bd513 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/crti.S @@ -0,0 +1,95 @@ +/* Special .init and .fini section support for ARM. + Copyright (C) 1995-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* crti.S puts a function prologue at the beginning of the .init and + .fini sections and defines global symbols for those addresses, so + they can be called as functions. The symbols _init and _fini are + magic and cause the linker to emit DT_INIT and DT_FINI. */ + +/* Always build .init and .fini sections in ARM mode. */ +#define NO_THUMB +#include <libc-symbols.h> +#include <sysdep.h> + +#ifndef PREINIT_FUNCTION +# define PREINIT_FUNCTION __gmon_start__ +#endif + +#ifndef PREINIT_FUNCTION_WEAK +# define PREINIT_FUNCTION_WEAK 1 +#endif + +#if PREINIT_FUNCTION_WEAK + weak_extern (PREINIT_FUNCTION) +#else + .hidden PREINIT_FUNCTION +#endif + +#if PREINIT_FUNCTION_WEAK + .p2align 2 + .type call_weak_fn, %function +call_weak_fn: + ldr r3, .LGOT + ldr r2, .LGOT+4 +.LPIC: + add r3, pc, r3 + ldr r2, [r3, r2] + cmp r2, #0 + bxeq lr + b PREINIT_FUNCTION + .p2align 2 +.LGOT: + .word _GLOBAL_OFFSET_TABLE_-(.LPIC+8) + .word PREINIT_FUNCTION(GOT) +#endif + + .section .init,"ax",%progbits + .p2align 2 + .globl _init + .type _init, %function +_init: + push {r3, lr} +#if PREINIT_FUNCTION_WEAK + bl call_weak_fn +#else + bl PREINIT_FUNCTION +#endif + + .section .fini,"ax",%progbits + .p2align 2 + .globl _fini + .type _fini, %function +_fini: + push {r3, lr} diff --git a/REORG.TODO/sysdeps/arm/crtn.S b/REORG.TODO/sysdeps/arm/crtn.S new file mode 100644 index 0000000000..982933826d --- /dev/null +++ b/REORG.TODO/sysdeps/arm/crtn.S @@ -0,0 +1,57 @@ +/* Special .init and .fini section support for ARM. + Copyright (C) 1995-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Always build .init and .fini sections in ARM mode. */ +#define NO_THUMB +#include <sysdep.h> + +/* crtn.S puts function epilogues in the .init and .fini sections + corresponding to the prologues in crti.S. */ + + .section .init,"ax",%progbits +#ifdef __ARM_ARCH_4T__ + pop {r3, lr} + bx lr +#else + pop {r3, pc} +#endif + + .section .fini,"ax",%progbits +#ifdef __ARM_ARCH_4T__ + pop {r3, lr} + bx lr +#else + pop {r3, pc} +#endif diff --git a/REORG.TODO/sysdeps/arm/dl-irel.h b/REORG.TODO/sysdeps/arm/dl-irel.h new file mode 100644 index 0000000000..dfd7850433 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-irel.h @@ -0,0 +1,52 @@ +/* Machine-dependent ELF indirect relocation inline functions. + ARM version. + Copyright (C) 2009-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _DL_IREL_H +#define _DL_IREL_H + +#include <stdio.h> +#include <unistd.h> +#include <ldsodefs.h> + +#define ELF_MACHINE_IREL 1 + +static inline Elf32_Addr +__attribute ((always_inline)) +elf_ifunc_invoke (Elf32_Addr addr) +{ + return ((Elf32_Addr (*) (unsigned long int)) (addr)) (GLRO(dl_hwcap)); +} + +static inline void +__attribute ((always_inline)) +elf_irel (const Elf32_Rel *reloc) +{ + Elf32_Addr *const reloc_addr = (void *) reloc->r_offset; + const unsigned long int r_type = ELF32_R_TYPE (reloc->r_info); + + if (__builtin_expect (r_type == R_ARM_IRELATIVE, 1)) + { + Elf32_Addr value = elf_ifunc_invoke (*reloc_addr); + *reloc_addr = value; + } + else + __libc_fatal ("unexpected reloc type in static binary"); +} + +#endif /* dl-irel.h */ diff --git a/REORG.TODO/sysdeps/arm/dl-lookupcfg.h b/REORG.TODO/sysdeps/arm/dl-lookupcfg.h new file mode 100644 index 0000000000..23b4e7e9af --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-lookupcfg.h @@ -0,0 +1,27 @@ +/* Configuration of lookup functions. + Copyright (C) 2006-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#define DL_UNMAP_IS_SPECIAL + +#include_next <dl-lookupcfg.h> + +struct link_map; + +extern void internal_function _dl_unmap (struct link_map *map); + +#define DL_UNMAP(map) _dl_unmap (map) diff --git a/REORG.TODO/sysdeps/arm/dl-machine.h b/REORG.TODO/sysdeps/arm/dl-machine.h new file mode 100644 index 0000000000..2c72972bd2 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-machine.h @@ -0,0 +1,698 @@ +/* Machine-dependent ELF dynamic relocation inline functions. ARM version. + Copyright (C) 1995-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef dl_machine_h +#define dl_machine_h + +#define ELF_MACHINE_NAME "ARM" + +#include <sys/param.h> +#include <tls.h> +#include <dl-tlsdesc.h> +#include <dl-irel.h> + +#ifndef CLEAR_CACHE +# error CLEAR_CACHE definition required to handle TEXTREL +#endif + +/* Return nonzero iff ELF header is compatible with the running host. */ +static inline int __attribute__ ((unused)) +elf_machine_matches_host (const Elf32_Ehdr *ehdr) +{ + return ehdr->e_machine == EM_ARM; +} + + +/* Return the link-time address of _DYNAMIC. Conveniently, this is the + first element of the GOT. */ +static inline Elf32_Addr __attribute__ ((unused)) +elf_machine_dynamic (void) +{ + /* Declaring this hidden ensures that a PC-relative reference is used. */ + extern const Elf32_Addr _GLOBAL_OFFSET_TABLE_[] attribute_hidden; + return _GLOBAL_OFFSET_TABLE_[0]; +} + + +/* Return the run-time load address of the shared object. */ +static inline Elf32_Addr __attribute__ ((unused)) +elf_machine_load_address (void) +{ + extern Elf32_Addr internal_function __dl_start (void *) asm ("_dl_start"); + Elf32_Addr got_addr = (Elf32_Addr) &__dl_start; + Elf32_Addr pcrel_addr; +#ifdef __thumb__ + /* Clear the low bit of the funciton address. */ + got_addr &= ~(Elf32_Addr) 1; +#endif + asm ("adr %0, _dl_start" : "=r" (pcrel_addr)); + return pcrel_addr - got_addr; +} + + +/* Set up the loaded object described by L so its unrelocated PLT + entries will jump to the on-demand fixup code in dl-runtime.c. */ + +static inline int __attribute__ ((unused)) +elf_machine_runtime_setup (struct link_map *l, int lazy, int profile) +{ + Elf32_Addr *got; + extern void _dl_runtime_resolve (Elf32_Word); + extern void _dl_runtime_profile (Elf32_Word); + + if (l->l_info[DT_JMPREL] && lazy) + { + /* patb: this is different than i386 */ + /* The GOT entries for functions in the PLT have not yet been filled + in. Their initial contents will arrange when called to push an + index into the .got section, load ip with &_GLOBAL_OFFSET_TABLE_[3], + and then jump to _GLOBAL_OFFSET_TABLE[2]. */ + got = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]); + /* If a library is prelinked but we have to relocate anyway, + we have to be able to undo the prelinking of .got.plt. + The prelinker saved us here address of .plt. */ + if (got[1]) + l->l_mach.plt = got[1] + l->l_addr; + got[1] = (Elf32_Addr) l; /* Identify this shared object. */ + + /* The got[2] entry contains the address of a function which gets + called to get the address of a so far unresolved function and + jump to it. The profiling extension of the dynamic linker allows + to intercept the calls to collect information. In this case we + don't store the address in the GOT so that all future calls also + end in this function. */ + if (profile) + { + got[2] = (Elf32_Addr) &_dl_runtime_profile; + + if (GLRO(dl_profile) != NULL + && _dl_name_match_p (GLRO(dl_profile), l)) + /* Say that we really want profiling and the timers are + started. */ + GL(dl_profile_map) = l; + } + else + /* This function will get called to fix up the GOT entry indicated by + the offset on the stack, and then jump to the resolved address. */ + got[2] = (Elf32_Addr) &_dl_runtime_resolve; + } + + if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy) + *(Elf32_Addr*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr) + = (Elf32_Addr) &_dl_tlsdesc_lazy_resolver; + + return lazy; +} + +#if defined(ARCH_HAS_BX) +#define BX(x) "bx\t" #x +#else +#define BX(x) "mov\tpc, " #x +#endif + +/* Mask identifying addresses reserved for the user program, + where the dynamic linker should not map anything. */ +#define ELF_MACHINE_USER_ADDRESS_MASK 0xf8000000UL + +/* Initial entry point code for the dynamic linker. + The C function `_dl_start' is the real entry point; + its return value is the user program's entry point. */ + +#define RTLD_START asm ("\ +.text\n\ +.globl _start\n\ +.type _start, %function\n\ +.globl _dl_start_user\n\ +.type _dl_start_user, %function\n\ +_start:\n\ + @ we are PIC code, so get global offset table\n\ + ldr sl, .L_GET_GOT\n\ + @ See if we were run as a command with the executable file\n\ + @ name as an extra leading argument.\n\ + ldr r4, .L_SKIP_ARGS\n\ + @ at start time, all the args are on the stack\n\ + mov r0, sp\n\ + bl _dl_start\n\ + @ returns user entry point in r0\n\ +_dl_start_user:\n\ + adr r6, .L_GET_GOT\n\ + add sl, sl, r6\n\ + ldr r4, [sl, r4]\n\ + @ save the entry point in another register\n\ + mov r6, r0\n\ + @ get the original arg count\n\ + ldr r1, [sp]\n\ + @ get the argv address\n\ + add r2, sp, #4\n\ + @ Fix up the stack if necessary.\n\ + cmp r4, #0\n\ + bne .L_fixup_stack\n\ +.L_done_fixup:\n\ + @ compute envp\n\ + add r3, r2, r1, lsl #2\n\ + add r3, r3, #4\n\ + @ now we call _dl_init\n\ + ldr r0, .L_LOADED\n\ + ldr r0, [sl, r0]\n\ + @ call _dl_init\n\ + bl _dl_init(PLT)\n\ + @ load the finalizer function\n\ + ldr r0, .L_FINI_PROC\n\ + add r0, sl, r0\n\ + @ jump to the user_s entry point\n\ + " BX(r6) "\n\ +\n\ + @ iWMMXt and EABI targets require the stack to be eight byte\n\ + @ aligned - shuffle arguments etc.\n\ +.L_fixup_stack:\n\ + @ subtract _dl_skip_args from original arg count\n\ + sub r1, r1, r4\n\ + @ store the new argc in the new stack location\n\ + str r1, [sp]\n\ + @ find the first unskipped argument\n\ + mov r3, r2\n\ + add r4, r2, r4, lsl #2\n\ + @ shuffle argv down\n\ +1: ldr r5, [r4], #4\n\ + str r5, [r3], #4\n\ + cmp r5, #0\n\ + bne 1b\n\ + @ shuffle envp down\n\ +1: ldr r5, [r4], #4\n\ + str r5, [r3], #4\n\ + cmp r5, #0\n\ + bne 1b\n\ + @ shuffle auxv down\n\ +1: ldmia r4!, {r0, r5}\n\ + stmia r3!, {r0, r5}\n\ + cmp r0, #0\n\ + bne 1b\n\ + @ Update _dl_argv\n\ + ldr r3, .L_ARGV\n\ + str r2, [sl, r3]\n\ + b .L_done_fixup\n\ +\n\ +.L_GET_GOT:\n\ + .word _GLOBAL_OFFSET_TABLE_ - .L_GET_GOT\n\ +.L_SKIP_ARGS:\n\ + .word _dl_skip_args(GOTOFF)\n\ +.L_FINI_PROC:\n\ + .word _dl_fini(GOTOFF)\n\ +.L_ARGV:\n\ + .word _dl_argv(GOTOFF)\n\ +.L_LOADED:\n\ + .word _rtld_local(GOTOFF)\n\ +.previous\n\ +"); + +/* ELF_RTYPE_CLASS_PLT iff TYPE describes relocation of a PLT entry or + TLS variable, so undefined references should not be allowed to + define the value. + ELF_RTYPE_CLASS_COPY iff TYPE should not be allowed to resolve to one + of the main executable's symbols, as for a COPY reloc. + ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA iff TYPE describes relocation against + protected data whose address may be external due to copy relocation. */ +#ifndef RTLD_BOOTSTRAP +# define elf_machine_type_class(type) \ + ((((type) == R_ARM_JUMP_SLOT || (type) == R_ARM_TLS_DTPMOD32 \ + || (type) == R_ARM_TLS_DTPOFF32 || (type) == R_ARM_TLS_TPOFF32 \ + || (type) == R_ARM_TLS_DESC) \ + * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_ARM_COPY) * ELF_RTYPE_CLASS_COPY) \ + | (((type) == R_ARM_GLOB_DAT) * ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA)) +#else +#define elf_machine_type_class(type) \ + ((((type) == R_ARM_JUMP_SLOT) * ELF_RTYPE_CLASS_PLT) \ + | (((type) == R_ARM_COPY) * ELF_RTYPE_CLASS_COPY) \ + | (((type) == R_ARM_GLOB_DAT) * ELF_RTYPE_CLASS_EXTERN_PROTECTED_DATA)) +#endif + +/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */ +#define ELF_MACHINE_JMP_SLOT R_ARM_JUMP_SLOT + +/* ARM never uses Elf32_Rela relocations for the dynamic linker. + Prelinked libraries may use Elf32_Rela though. */ +#define ELF_MACHINE_PLT_REL 1 + +/* We define an initialization functions. This is called very early in + _dl_sysdep_start. */ +#define DL_PLATFORM_INIT dl_platform_init () + +static inline void __attribute__ ((unused)) +dl_platform_init (void) +{ + if (GLRO(dl_platform) != NULL && *GLRO(dl_platform) == '\0') + /* Avoid an empty string which would disturb us. */ + GLRO(dl_platform) = NULL; +} + +static inline Elf32_Addr +elf_machine_fixup_plt (struct link_map *map, lookup_t t, + const Elf32_Rel *reloc, + Elf32_Addr *reloc_addr, Elf32_Addr value) +{ + return *reloc_addr = value; +} + +/* Return the final value of a plt relocation. */ +static inline Elf32_Addr +elf_machine_plt_value (struct link_map *map, const Elf32_Rel *reloc, + Elf32_Addr value) +{ + return value; +} + +#endif /* !dl_machine_h */ + + +/* ARM never uses Elf32_Rela relocations for the dynamic linker. + Prelinked libraries may use Elf32_Rela though. */ +#define ELF_MACHINE_NO_RELA defined RTLD_BOOTSTRAP +#define ELF_MACHINE_NO_REL 0 + +/* Names of the architecture-specific auditing callback functions. */ +#define ARCH_LA_PLTENTER arm_gnu_pltenter +#define ARCH_LA_PLTEXIT arm_gnu_pltexit + +#ifdef RESOLVE_MAP +/* Handle a PC24 reloc, including the out-of-range case. */ +auto void +relocate_pc24 (struct link_map *map, Elf32_Addr value, + Elf32_Addr *const reloc_addr, Elf32_Sword addend) +{ + Elf32_Addr new_value; + + /* Set NEW_VALUE based on V, and return true iff it overflows 24 bits. */ + inline bool set_new_value (Elf32_Addr v) + { + new_value = v + addend - (Elf32_Addr) reloc_addr; + Elf32_Addr topbits = new_value & 0xfe000000; + return topbits != 0xfe000000 && topbits != 0x00000000; + } + + if (set_new_value (value)) + { + /* The PC-relative address doesn't fit in 24 bits! */ + + static void *fix_page; + static size_t fix_offset; + if (fix_page == NULL) + { + void *new_page = __mmap (NULL, GLRO(dl_pagesize), + PROT_READ | PROT_WRITE | PROT_EXEC, + MAP_PRIVATE | MAP_ANON, -1, 0); + if (new_page == MAP_FAILED) + _dl_signal_error (0, map->l_name, NULL, + "could not map page for fixup"); + fix_page = new_page; + assert (fix_offset == 0); + } + + Elf32_Word *fix_address = fix_page + fix_offset; + fix_address[0] = 0xe51ff004; /* ldr pc, [pc, #-4] */ + fix_address[1] = value; + + fix_offset += sizeof fix_address[0] * 2; + if (fix_offset >= GLRO(dl_pagesize)) + { + fix_page = NULL; + fix_offset = 0; + } + + if (set_new_value ((Elf32_Addr) fix_address)) + _dl_signal_error (0, map->l_name, NULL, + "R_ARM_PC24 relocation out of range"); + } + + *reloc_addr = (*reloc_addr & 0xff000000) | ((new_value >> 2) & 0x00ffffff); +} + +/* Perform the relocation specified by RELOC and SYM (which is fully resolved). + MAP is the object containing the reloc. */ + +auto inline void +__attribute__ ((always_inline)) +elf_machine_rel (struct link_map *map, const Elf32_Rel *reloc, + const Elf32_Sym *sym, const struct r_found_version *version, + void *const reloc_addr_arg, int skip_ifunc) +{ + Elf32_Addr *const reloc_addr = reloc_addr_arg; + const unsigned int r_type = ELF32_R_TYPE (reloc->r_info); + +#if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC + if (__builtin_expect (r_type == R_ARM_RELATIVE, 0)) + { +# if !defined RTLD_BOOTSTRAP && !defined HAVE_Z_COMBRELOC + /* This is defined in rtld.c, but nowhere in the static libc.a; + make the reference weak so static programs can still link. + This declaration cannot be done when compiling rtld.c + (i.e. #ifdef RTLD_BOOTSTRAP) because rtld.c contains the + common defn for _dl_rtld_map, which is incompatible with a + weak decl in the same file. */ +# ifndef SHARED + weak_extern (_dl_rtld_map); +# endif + if (map != &GL(dl_rtld_map)) /* Already done in rtld itself. */ +# endif + *reloc_addr += map->l_addr; + } +# ifndef RTLD_BOOTSTRAP + else if (__builtin_expect (r_type == R_ARM_NONE, 0)) + return; +# endif + else +#endif + { + const Elf32_Sym *const refsym = sym; + struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type); + Elf32_Addr value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value; + + if (sym != NULL + && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0) + && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1) + && __builtin_expect (!skip_ifunc, 1)) + value = elf_ifunc_invoke (value); + + switch (r_type) + { + case R_ARM_COPY: + if (sym == NULL) + /* This can happen in trace mode if an object could not be + found. */ + break; + if (sym->st_size > refsym->st_size + || (GLRO(dl_verbose) && sym->st_size < refsym->st_size)) + { + const char *strtab; + + strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]); + _dl_error_printf ("\ +%s: Symbol `%s' has different size in shared object, consider re-linking\n", + RTLD_PROGNAME, strtab + refsym->st_name); + } + memcpy (reloc_addr_arg, (void *) value, + MIN (sym->st_size, refsym->st_size)); + break; + case R_ARM_GLOB_DAT: + case R_ARM_JUMP_SLOT: +# ifdef RTLD_BOOTSTRAP + /* Fix weak undefined references. */ + if (sym != NULL && sym->st_value == 0) + *reloc_addr = 0; + else +# endif + *reloc_addr = value; + break; + case R_ARM_ABS32: + { + struct unaligned + { + Elf32_Addr x; + } __attribute__ ((packed, may_alias)); +# ifndef RTLD_BOOTSTRAP + /* This is defined in rtld.c, but nowhere in the static + libc.a; make the reference weak so static programs can + still link. This declaration cannot be done when + compiling rtld.c (i.e. #ifdef RTLD_BOOTSTRAP) because + rtld.c contains the common defn for _dl_rtld_map, which + is incompatible with a weak decl in the same file. */ +# ifndef SHARED + weak_extern (_dl_rtld_map); +# endif + if (map == &GL(dl_rtld_map)) + /* Undo the relocation done here during bootstrapping. + Now we will relocate it anew, possibly using a + binding found in the user program or a loaded library + rather than the dynamic linker's built-in definitions + used while loading those libraries. */ + value -= map->l_addr + refsym->st_value; +# endif + /* Support relocations on mis-aligned offsets. */ + ((struct unaligned *) reloc_addr)->x += value; + break; + } + case R_ARM_TLS_DESC: + { + struct tlsdesc volatile *td = + (struct tlsdesc volatile *)reloc_addr; + +# ifndef RTLD_BOOTSTRAP + if (! sym) + td->entry = _dl_tlsdesc_undefweak; + else +# endif + { + if (ELF32_R_SYM (reloc->r_info) == STN_UNDEF) + value = td->argument.value; + else + value = sym->st_value; + +# ifndef RTLD_BOOTSTRAP +# ifndef SHARED + CHECK_STATIC_TLS (map, sym_map); +# else + if (!TRY_STATIC_TLS (map, sym_map)) + { + td->argument.pointer + = _dl_make_tlsdesc_dynamic (sym_map, value); + td->entry = _dl_tlsdesc_dynamic; + } + else +# endif +# endif + { + td->argument.value = value + sym_map->l_tls_offset; + td->entry = _dl_tlsdesc_return; + } + } + } + break; + case R_ARM_PC24: + relocate_pc24 (map, value, reloc_addr, + /* Sign-extend the 24-bit addend in the + instruction (which counts instructions), and + then shift it up two so as to count bytes. */ + (((Elf32_Sword) *reloc_addr << 8) >> 8) << 2); + break; +#if !defined RTLD_BOOTSTRAP + case R_ARM_TLS_DTPMOD32: + /* Get the information from the link map returned by the + resolv function. */ + if (sym_map != NULL) + *reloc_addr = sym_map->l_tls_modid; + break; + + case R_ARM_TLS_DTPOFF32: + if (sym != NULL) + *reloc_addr += sym->st_value; + break; + + case R_ARM_TLS_TPOFF32: + if (sym != NULL) + { + CHECK_STATIC_TLS (map, sym_map); + *reloc_addr += sym->st_value + sym_map->l_tls_offset; + } + break; + case R_ARM_IRELATIVE: + value = map->l_addr + *reloc_addr; + value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap)); + *reloc_addr = value; + break; +#endif + default: + _dl_reloc_bad_type (map, r_type, 0); + break; + } + } +} + +# ifndef RTLD_BOOTSTRAP +auto inline void +__attribute__ ((always_inline)) +elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc, + const Elf32_Sym *sym, const struct r_found_version *version, + void *const reloc_addr_arg, int skip_ifunc) +{ + Elf32_Addr *const reloc_addr = reloc_addr_arg; + const unsigned int r_type = ELF32_R_TYPE (reloc->r_info); + + if (__builtin_expect (r_type == R_ARM_RELATIVE, 0)) + *reloc_addr = map->l_addr + reloc->r_addend; + else if (__builtin_expect (r_type == R_ARM_NONE, 0)) + return; + else + { +# ifndef RESOLVE_CONFLICT_FIND_MAP + const Elf32_Sym *const refsym = sym; +# endif + struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type); + Elf32_Addr value = sym_map == NULL ? 0 : sym_map->l_addr + sym->st_value; + + if (sym != NULL + && __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC, 0) + && __builtin_expect (sym->st_shndx != SHN_UNDEF, 1) + && __builtin_expect (!skip_ifunc, 1)) + value = elf_ifunc_invoke (value); + + switch (r_type) + { +# ifndef RESOLVE_CONFLICT_FIND_MAP + /* Not needed for dl-conflict.c. */ + case R_ARM_COPY: + if (sym == NULL) + /* This can happen in trace mode if an object could not be + found. */ + break; + if (sym->st_size > refsym->st_size + || (GLRO(dl_verbose) && sym->st_size < refsym->st_size)) + { + const char *strtab; + + strtab = (const void *) D_PTR (map, l_info[DT_STRTAB]); + _dl_error_printf ("\ +%s: Symbol `%s' has different size in shared object, consider re-linking\n", + RTLD_PROGNAME, strtab + refsym->st_name); + } + memcpy (reloc_addr_arg, (void *) value, + MIN (sym->st_size, refsym->st_size)); + break; +# endif /* !RESOLVE_CONFLICT_FIND_MAP */ + case R_ARM_GLOB_DAT: + case R_ARM_JUMP_SLOT: + case R_ARM_ABS32: + *reloc_addr = value + reloc->r_addend; + break; +# ifdef RESOLVE_CONFLICT_FIND_MAP + case R_ARM_TLS_DESC: + { + struct tlsdesc volatile *td __attribute__ ((unused)) = + (struct tlsdesc volatile *) reloc_addr; + + RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr); + + /* Make sure we know what's going on. */ + assert (td->entry + == (void *) (D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)]) + + map->l_addr)); + assert (map->l_info[ADDRIDX (DT_TLSDESC_GOT)]); + + /* Set up the lazy resolver and store the pointer to our link + map in _GLOBAL_OFFSET_TABLE[1] now as for a prelinked + binary elf_machine_runtime_setup() is not called and hence + neither has been initialized. */ + *(Elf32_Addr *) (D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + + map->l_addr) + = (Elf32_Addr) &_dl_tlsdesc_lazy_resolver; + ((Elf32_Addr *) D_PTR (map, l_info[DT_PLTGOT]))[1] + = (Elf32_Addr) map; + } + break; +# endif /* RESOLVE_CONFLICT_FIND_MAP */ + case R_ARM_PC24: + relocate_pc24 (map, value, reloc_addr, reloc->r_addend); + break; +#if !defined RTLD_BOOTSTRAP + case R_ARM_TLS_DTPMOD32: + /* Get the information from the link map returned by the + resolv function. */ + if (sym_map != NULL) + *reloc_addr = sym_map->l_tls_modid; + break; + + case R_ARM_TLS_DTPOFF32: + *reloc_addr = (sym == NULL ? 0 : sym->st_value) + reloc->r_addend; + break; + + case R_ARM_TLS_TPOFF32: + if (sym != NULL) + { + CHECK_STATIC_TLS (map, sym_map); + *reloc_addr = (sym->st_value + sym_map->l_tls_offset + + reloc->r_addend); + } + break; + case R_ARM_IRELATIVE: + value = map->l_addr + reloc->r_addend; + value = ((Elf32_Addr (*) (int)) value) (GLRO(dl_hwcap)); + *reloc_addr = value; + break; +#endif + default: + _dl_reloc_bad_type (map, r_type, 0); + break; + } + } +} +# endif + +auto inline void +__attribute__ ((always_inline)) +elf_machine_rel_relative (Elf32_Addr l_addr, const Elf32_Rel *reloc, + void *const reloc_addr_arg) +{ + Elf32_Addr *const reloc_addr = reloc_addr_arg; + *reloc_addr += l_addr; +} + +# ifndef RTLD_BOOTSTRAP +auto inline void +__attribute__ ((always_inline)) +elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc, + void *const reloc_addr_arg) +{ + Elf32_Addr *const reloc_addr = reloc_addr_arg; + *reloc_addr = l_addr + reloc->r_addend; +} +# endif + +auto inline void +__attribute__ ((always_inline)) +elf_machine_lazy_rel (struct link_map *map, + Elf32_Addr l_addr, const Elf32_Rel *reloc, + int skip_ifunc) +{ + Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset); + const unsigned int r_type = ELF32_R_TYPE (reloc->r_info); + /* Check for unexpected PLT reloc type. */ + if (__builtin_expect (r_type == R_ARM_JUMP_SLOT, 1)) + { + if (__builtin_expect (map->l_mach.plt, 0) == 0) + *reloc_addr += l_addr; + else + *reloc_addr = map->l_mach.plt; + } + else if (__builtin_expect (r_type == R_ARM_TLS_DESC, 1)) + { + struct tlsdesc volatile *td = + (struct tlsdesc volatile *)reloc_addr; + + /* The linker must have given us the parameter we need in the + first GOT entry, and left the second one empty. The latter + will have been preset by the prelinker if used though. + We fill it with the resolver address. */ + assert (td->entry == 0 + || map->l_info[VALIDX (DT_GNU_PRELINKED)] != NULL); + td->entry = (void*)(D_PTR (map, l_info[ADDRIDX (DT_TLSDESC_PLT)]) + + map->l_addr); + } + else + _dl_reloc_bad_type (map, r_type, 1); +} + +#endif /* RESOLVE_MAP */ diff --git a/REORG.TODO/sysdeps/arm/dl-sysdep.h b/REORG.TODO/sysdeps/arm/dl-sysdep.h new file mode 100644 index 0000000000..ef3e1e429b --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-sysdep.h @@ -0,0 +1,25 @@ +/* System-specific settings for dynamic linker code. Alpha version. + Copyright (C) 2002-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include_next <dl-sysdep.h> + +/* _dl_argv cannot be attribute_relro, because _dl_start_user + might write into it after _dl_start returns. */ +#define DL_ARGV_NOT_RELRO 1 + +#define DL_EXTERN_PROTECTED_DATA diff --git a/REORG.TODO/sysdeps/arm/dl-tls.h b/REORG.TODO/sysdeps/arm/dl-tls.h new file mode 100644 index 0000000000..1e47e07668 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-tls.h @@ -0,0 +1,28 @@ +/* Thread-local storage handling in the ELF dynamic linker. ARM version. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + + +/* Type used for the representation of TLS information in the GOT. */ +typedef struct dl_tls_index +{ + unsigned long int ti_module; + unsigned long int ti_offset; +} tls_index; + + +extern void *__tls_get_addr (tls_index *ti); diff --git a/REORG.TODO/sysdeps/arm/dl-tlsdesc.S b/REORG.TODO/sysdeps/arm/dl-tlsdesc.S new file mode 100644 index 0000000000..e7bed02188 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-tlsdesc.S @@ -0,0 +1,218 @@ +/* Thread-local storage handling in the ELF dynamic linker. ARM version. + Copyright (C) 2006-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include <arm-features.h> +#include <tls.h> +#include "tlsdesc.h" + + .text + @ emit debug information with cfi + @ use arm-specific pseudos for unwinding itself + CFI_SECTIONS + .hidden _dl_tlsdesc_return + .global _dl_tlsdesc_return + .type _dl_tlsdesc_return,#function + cfi_startproc + eabi_fnstart + .align 2 +_dl_tlsdesc_return: + ldr r0, [r0] + BX (lr) + eabi_fnend + cfi_endproc + .size _dl_tlsdesc_return, .-_dl_tlsdesc_return + + .hidden _dl_tlsdesc_undefweak + .global _dl_tlsdesc_undefweak + .type _dl_tlsdesc_undefweak,#function + cfi_startproc + eabi_fnstart + .align 2 +_dl_tlsdesc_undefweak: + GET_TLS (r1) + rsb r0, r0, #0 + BX (lr) + cfi_endproc + eabi_fnend + .size _dl_tlsdesc_undefweak, .-_dl_tlsdesc_undefweak + +#ifdef SHARED + .hidden _dl_tlsdesc_dynamic + .global _dl_tlsdesc_dynamic + .type _dl_tlsdesc_dynamic,#function + + +/* + The assembly code that follows is a rendition of the following + C code, hand-optimized a little bit. + +ptrdiff_t +_dl_tlsdesc_dynamic(struct tlsdesc *tdp) +{ + struct tlsdesc_dynamic_arg *td = tdp->argument.pointer; + dtv_t *dtv = (dtv_t *)THREAD_DTV(); + if (__builtin_expect (td->gen_count <= dtv[0].counter + && dtv[td->tlsinfo.ti_module].pointer.val + != TLS_DTV_UNALLOCATED, + 1)) + return dtv[td->tlsinfo.ti_module].pointer.val + + td->tlsinfo.ti_offset - __builtin_thread_pointer(); + + return __tls_get_addr (&td->tlsinfo) - __builtin_thread_pointer(); +} + +*/ + cfi_startproc + eabi_fnstart + .align 2 +_dl_tlsdesc_dynamic: + /* Our calling convention is to clobber r0, r1 and the processor + flags. All others that are modified must be saved */ + eabi_save ({r2,r3,r4,lr}) + push {r2,r3,r4,lr} + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r2,0) + cfi_rel_offset (r3,4) + cfi_rel_offset (r4,8) + cfi_rel_offset (lr,12) + ldr r1, [r0] /* td */ + GET_TLS (lr) + mov r4, r0 /* r4 = tp */ + ldr r0, [r0] + ldr r2, [r1, #8] /* gen_count */ + ldr r3, [r0] + cmp r2, r3 + bhi 1f + ldr r3, [r1] +#ifndef ARM_NO_INDEX_REGISTER + ldr r2, [r0, r3, lsl #3] +#else + add lr, r0, r3, lsl #3 + ldr r2, [lr] +#endif + cmn r2, #1 + ittt ne + ldrne r3, [r1, #4] + addne r3, r2, r3 + rsbne r0, r4, r3 + bne 2f +1: mov r0, r1 + bl __tls_get_addr + rsb r0, r4, r0 +2: +#if ((defined (__ARM_ARCH_4T__) && defined (__THUMB_INTERWORK__)) \ + || defined (ARM_ALWAYS_BX)) + pop {r2,r3,r4, lr} + cfi_adjust_cfa_offset (-16) + cfi_restore (lr) + cfi_restore (r4) + cfi_restore (r3) + cfi_restore (r2) + bx lr +#else + pop {r2,r3,r4, pc} +#endif + eabi_fnend + cfi_endproc + .size _dl_tlsdesc_dynamic, .-_dl_tlsdesc_dynamic +#endif /* SHARED */ + +/* lazy resolved for tls descriptors. */ + .hidden _dl_tlsdesc_lazy_resolver + .global _dl_tlsdesc_lazy_resolver + .type _dl_tlsdesc_lazy_resolver,#function + cfi_startproc + eabi_fnstart + .align 2 +_dl_tlsdesc_lazy_resolver: + /* r0 points at the tlsdesc, + r1 points at the GOT + r2 was pushed by the trampoline and used as a temp, + we need to pop it here. + We push the remaining call-clobbered registers here, and also + R1 -- to keep the stack correctly aligned. */ + /* Tell the unwinder that r2 has already been pushed. */ + eabi_save ({r2}) + cfi_adjust_cfa_offset (4) + cfi_rel_offset (r2, 0) + eabi_save ({r0,r1,r3,ip,lr}) + push {r0, r1, r3, ip, lr} + cfi_adjust_cfa_offset (20) + cfi_rel_offset (r0, 0) + cfi_rel_offset (r1, 4) + cfi_rel_offset (r3, 8) + cfi_rel_offset (ip, 12) + cfi_rel_offset (lr, 16) + bl _dl_tlsdesc_lazy_resolver_fixup + pop {r0, r1, r3, ip, lr} + cfi_adjust_cfa_offset (-20) + cfi_restore (lr) + cfi_restore (ip) + cfi_restore (r3) + cfi_restore (r1) + cfi_restore (r0) + pop {r2} + cfi_adjust_cfa_offset (-4) + cfi_restore (r2) + ldr r1, [r0, #4] + BX (r1) + eabi_fnend + cfi_endproc + .size _dl_tlsdesc_lazy_resolver, .-_dl_tlsdesc_lazy_resolver + +/* Holder for lazy tls descriptors being resolve in another thread. + + Our calling convention is to clobber r0, r1 and the processor + flags. All others that are modified must be saved */ + .hidden _dl_tlsdesc_resolve_hold + .global _dl_tlsdesc_resolve_hold + .type _dl_tlsdesc_resolve_hold,#function + cfi_startproc + eabi_fnstart + .align 2 +_dl_tlsdesc_resolve_hold: + /* r0 is saved so its original value can be used after the call and + r1 is saved only to keep the stack aligned. (r0 points to the tls + descriptor, it is passed to _dl_tlsdesc_resolve_hold_fixup which + is a void function that may clobber r0, later r0 is used to load + the new resolver.) */ + eabi_save ({r0,r1,r2,r3,ip,lr}) + push {r0, r1, r2, r3, ip, lr} + cfi_adjust_cfa_offset (24) + cfi_rel_offset (r0, 0) + cfi_rel_offset (r1, 4) + cfi_rel_offset (r2, 8) + cfi_rel_offset (r3, 12) + cfi_rel_offset (ip, 16) + cfi_rel_offset (lr, 20) + adr r1, _dl_tlsdesc_resolve_hold + bl _dl_tlsdesc_resolve_hold_fixup + pop {r0, r1, r2, r3, ip, lr} + cfi_adjust_cfa_offset (-24) + cfi_restore (lr) + cfi_restore (ip) + cfi_restore (r3) + cfi_restore (r2) + cfi_restore (r1) + cfi_restore (r0) + ldr r1, [r0, #4] + BX (r1) + eabi_fnend + cfi_endproc + .size _dl_tlsdesc_resolve_hold, .-_dl_tlsdesc_resolve_hold diff --git a/REORG.TODO/sysdeps/arm/dl-tlsdesc.h b/REORG.TODO/sysdeps/arm/dl-tlsdesc.h new file mode 100644 index 0000000000..2770af0260 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-tlsdesc.h @@ -0,0 +1,62 @@ +/* Thread-local storage descriptor handling in the ELF dynamic linker. + ARM version. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; witout even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _ARM_DL_TLSDESC_H +# define _ARM_DL_TLSDESC_H 1 + +/* Type used to represent a TLS descriptor in the GOT. */ +struct tlsdesc +{ + union + { + void *pointer; + long value; + } argument; + ptrdiff_t (*entry)(struct tlsdesc *); +}; + + +typedef struct dl_tls_index +{ + unsigned long int ti_module; + unsigned long int ti_offset; +} tls_index; + +/* Type used as the argument in a TLS descriptor for a symbol that + needs dynamic TLS offsets. */ +struct tlsdesc_dynamic_arg +{ + tls_index tlsinfo; + size_t gen_count; +}; + +extern ptrdiff_t attribute_hidden + _dl_tlsdesc_return(struct tlsdesc *), + _dl_tlsdesc_undefweak(struct tlsdesc *), + _dl_tlsdesc_resolve_hold(struct tlsdesc *), + _dl_tlsdesc_lazy_resolver(struct tlsdesc *); + +# ifdef SHARED +extern void *_dl_make_tlsdesc_dynamic (struct link_map *map, size_t ti_offset); + +extern ptrdiff_t attribute_hidden + _dl_tlsdesc_dynamic(struct tlsdesc *); +# endif + +#endif diff --git a/REORG.TODO/sysdeps/arm/dl-trampoline.S b/REORG.TODO/sysdeps/arm/dl-trampoline.S new file mode 100644 index 0000000000..95d2efc8ea --- /dev/null +++ b/REORG.TODO/sysdeps/arm/dl-trampoline.S @@ -0,0 +1,212 @@ +/* PLT trampolines. ARM version. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* ??? Needs more rearrangement for the LDM to handle thumb mode. */ +#define NO_THUMB +#include <sysdep.h> +#include <libc-symbols.h> + + .text + .globl _dl_runtime_resolve + .type _dl_runtime_resolve, #function + CFI_SECTIONS + cfi_startproc + .align 2 +_dl_runtime_resolve: + cfi_adjust_cfa_offset (4) + cfi_rel_offset (lr, 0) + + @ we get called with + @ stack[0] contains the return address from this call + @ ip contains &GOT[n+3] (pointer to function) + @ lr points to &GOT[2] + + @ Save arguments. We save r4 to realign the stack. + push {r0-r4} + cfi_adjust_cfa_offset (20) + cfi_rel_offset (r0, 0) + cfi_rel_offset (r1, 4) + cfi_rel_offset (r2, 8) + cfi_rel_offset (r3, 12) + + @ get pointer to linker struct + ldr r0, [lr, #-4] + + @ prepare to call _dl_fixup() + @ change &GOT[n+3] into 8*n NOTE: reloc are 8 bytes each + sub r1, ip, lr + sub r1, r1, #4 + add r1, r1, r1 + + @ call fixup routine + bl _dl_fixup + + @ save the return + mov ip, r0 + + @ get arguments and return address back. We restore r4 + @ only to realign the stack. + pop {r0-r4,lr} + cfi_adjust_cfa_offset (-24) + + @ jump to the newly found address + BX(ip) + + cfi_endproc + .size _dl_runtime_resolve, .-_dl_runtime_resolve + +#ifndef PROF + .globl _dl_runtime_profile + .type _dl_runtime_profile, #function + CFI_SECTIONS + cfi_startproc + .align 2 +_dl_runtime_profile: + cfi_adjust_cfa_offset (4) + cfi_rel_offset (lr, 0) + + @ we get called with + @ stack[0] contains the return address from this call + @ ip contains &GOT[n+3] (pointer to function) + @ lr points to &GOT[2] + + @ Stack layout: + @ 212 - saved lr + @ 208 - framesize returned from pltenter + @ 16 - La_arm_regs + @ 8 - Saved two arguments to _dl_profile_fixup + @ 4 - Saved result of _dl_profile_fixup + @ 0 - outgoing argument to _dl_profile_fixup + @ For now, we only save the general purpose registers. + + sub sp, sp, #196 + cfi_adjust_cfa_offset (196) + stmia sp, {r0-r3} + cfi_rel_offset (r0, 0) + cfi_rel_offset (r1, 4) + cfi_rel_offset (r2, 8) + cfi_rel_offset (r3, 12) + + sub sp, sp, #16 + cfi_adjust_cfa_offset (16) + + @ Save sp and lr. + add r0, sp, #216 + str r0, [sp, #32] + ldr r2, [sp, #212] + str r2, [sp, #36] + + @ get pointer to linker struct + ldr r0, [lr, #-4] + + @ prepare to call _dl_profile_fixup() + @ change &GOT[n+3] into 8*n NOTE: reloc are 8 bytes each + sub r1, ip, lr + sub r1, r1, #4 + add r1, r1, r1 + + @ Save these two arguments for pltexit. + add r3, sp, #8 + stmia r3!, {r0,r1} + + @ Set up extra args for _dl_profile_fixup. + @ r2 and r3 are already loaded. + add ip, sp, #208 + str ip, [sp, #0] + + @ call profiling fixup routine + bl _dl_profile_fixup + + @ The address to call is now in r0. + + @ Check whether we're wrapping this function. + ldr ip, [sp, #208] + cmp ip, #0 + bge 1f + cfi_remember_state + + @ save the return + mov ip, r0 + + @ get arguments and return address back + add sp, sp, #16 + cfi_adjust_cfa_offset (-16) + ldmia sp, {r0-r3,sp,lr} + cfi_adjust_cfa_offset (-200) + + @ jump to the newly found address + BX(ip) + + cfi_restore_state +1: + @ The new frame size is in ip. + + @ New stack layout: + @ 268 - saved r7 + @ 264 - saved result of _dl_profile_fixup + @ 72 - La_arm_regs + @ 64 - Saved two arguments to _dl_profile_fixup + @ 0 - La_arm_retval + @ For now, we only save the general purpose registers. + + @ Build the new frame. + str r7, [sp, #212] + cfi_rel_offset (r7, 212) + sub r7, sp, #56 + cfi_def_cfa_register (r7) + cfi_adjust_cfa_offset (56) + sub sp, sp, ip + bic sp, sp, #7 + + @ Save the _dl_profile_fixup result around the call to memcpy. + str r0, [r7, #264] + + @ Copy the stack arguments. + mov r0, sp + add r1, r7, #272 + mov r2, ip + bl memcpy + + @ Call the function. + add ip, r7, #72 + ldmia ip, {r0-r3} + ldr ip, [r7, #264] + BLX(ip) + stmia r7, {r0-r3} + + @ Call pltexit. + add ip, r7, #64 + ldmia ip, {r0,r1} + add r2, r7, #72 + add r3, r7, #0 + bl _dl_call_pltexit + + @ Return to caller. + ldmia r7, {r0-r3} + mov sp, r7 + cfi_def_cfa_register (sp) + ldr r7, [sp, #268] + ldr lr, [sp, #92] + add sp, sp, #272 + cfi_adjust_cfa_offset (-272) + BX(lr) + + cfi_endproc + .size _dl_runtime_profile, .-_dl_runtime_profile +#endif + .previous diff --git a/REORG.TODO/sysdeps/arm/e_sqrt.c b/REORG.TODO/sysdeps/arm/e_sqrt.c new file mode 100644 index 0000000000..1f7d00806f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/e_sqrt.c @@ -0,0 +1,45 @@ +/* Compute square root for double. ARM version. + Copyright (C) 2016-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifdef __SOFTFP__ + +/* Use architecture-indendent sqrt implementation. */ +# include <sysdeps/ieee754/dbl-64/e_sqrt.c> + +#else + +/* Use VFP square root instruction. */ +# include <math.h> +# include <sysdep.h> + +double +__ieee754_sqrt (double x) +{ + double ret; +# if __ARM_ARCH >= 6 + asm ("vsqrt.f64 %P0, %P1" : "=w" (ret) : "w" (x)); +# else + /* As in GCC, for VFP9 Erratum 760019 avoid overwriting the + input. */ + asm ("vsqrt.f64 %P0, %P1" : "=&w" (ret) : "w" (x)); +# endif + return ret; +} +strong_alias (__ieee754_sqrt, __sqrt_finite) + +#endif diff --git a/REORG.TODO/sysdeps/arm/e_sqrtf.c b/REORG.TODO/sysdeps/arm/e_sqrtf.c new file mode 100644 index 0000000000..abf1a7739f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/e_sqrtf.c @@ -0,0 +1,45 @@ +/* Compute square root for float. ARM version. + Copyright (C) 2016-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifdef __SOFTFP__ + +/* Use architecture-indendent sqrtf implementation. */ +# include <sysdeps/ieee754/flt-32/e_sqrtf.c> + +#else + +/* Use VFP square root instruction. */ +# include <math.h> +# include <sysdep.h> + +float +__ieee754_sqrtf (float x) +{ + float ret; +# if __ARM_ARCH >= 6 + asm ("vsqrt.f32 %0, %1" : "=t" (ret) : "t" (x)); +# else + /* As in GCC, for VFP9 Erratum 760019 avoid overwriting the + input. */ + asm ("vsqrt.f32 %0, %1" : "=&t" (ret) : "t" (x)); +# endif + return ret; +} +strong_alias (__ieee754_sqrtf, __sqrtf_finite) + +#endif diff --git a/REORG.TODO/sysdeps/arm/fclrexcpt.c b/REORG.TODO/sysdeps/arm/fclrexcpt.c new file mode 100644 index 0000000000..6abaf4f003 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fclrexcpt.c @@ -0,0 +1,43 @@ +/* Clear given exceptions in current floating-point environment. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +feclearexcept (int excepts) +{ + fpu_control_t fpscr, new_fpscr; + + /* Fail if a VFP unit isn't present unless nothing needs to be done. */ + if (!ARM_HAVE_VFP) + return (excepts != 0); + + _FPU_GETCW (fpscr); + excepts &= FE_ALL_EXCEPT; + new_fpscr = fpscr & ~excepts; + + /* Write new exception flags if changed. */ + if (new_fpscr != fpscr) + _FPU_SETCW (new_fpscr); + + return 0; +} +libm_hidden_def (feclearexcept) diff --git a/REORG.TODO/sysdeps/arm/fedisblxcpt.c b/REORG.TODO/sysdeps/arm/fedisblxcpt.c new file mode 100644 index 0000000000..d0b3edef50 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fedisblxcpt.c @@ -0,0 +1,43 @@ +/* Disable floating-point exceptions. + Copyright (C) 2001-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Philip Blundell <philb@gnu.org>, 2001. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +fedisableexcept (int excepts) +{ + fpu_control_t fpscr, new_fpscr; + + /* Fail if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return -1; + + _FPU_GETCW (fpscr); + excepts &= FE_ALL_EXCEPT; + new_fpscr = fpscr & ~(excepts << FE_EXCEPT_SHIFT); + + /* Write new exceptions if changed. */ + if (new_fpscr != fpscr) + _FPU_SETCW (new_fpscr); + + return (fpscr >> FE_EXCEPT_SHIFT) & FE_ALL_EXCEPT; +} diff --git a/REORG.TODO/sysdeps/arm/feenablxcpt.c b/REORG.TODO/sysdeps/arm/feenablxcpt.c new file mode 100644 index 0000000000..6e36371da3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/feenablxcpt.c @@ -0,0 +1,51 @@ +/* Enable floating-point exceptions. + Copyright (C) 2001-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Philip Blundell <philb@gnu.org>, 2001. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +feenableexcept (int excepts) +{ + fpu_control_t fpscr, new_fpscr, updated_fpscr; + + /* Fail if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return -1; + + _FPU_GETCW (fpscr); + excepts &= FE_ALL_EXCEPT; + new_fpscr = fpscr | (excepts << FE_EXCEPT_SHIFT); + + if (new_fpscr != fpscr) + { + _FPU_SETCW (new_fpscr); + + /* Not all VFP architectures support trapping exceptions, so + test whether the relevant bits were set and fail if not. */ + _FPU_GETCW (updated_fpscr); + + if (new_fpscr & ~updated_fpscr) + return -1; + } + + return (fpscr >> FE_EXCEPT_SHIFT) & FE_ALL_EXCEPT; +} diff --git a/REORG.TODO/sysdeps/arm/fegetenv.c b/REORG.TODO/sysdeps/arm/fegetenv.c new file mode 100644 index 0000000000..b76524194e --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fegetenv.c @@ -0,0 +1,39 @@ +/* Store current floating-point environment. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +__fegetenv (fenv_t *envp) +{ + fpu_control_t fpscr; + + /* Fail if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return 1; + + _FPU_GETCW (fpscr); + envp->__cw = fpscr; + return 0; +} +libm_hidden_def (__fegetenv) +weak_alias (__fegetenv, fegetenv) +libm_hidden_weak (fegetenv) diff --git a/REORG.TODO/sysdeps/arm/fegetexcept.c b/REORG.TODO/sysdeps/arm/fegetexcept.c new file mode 100644 index 0000000000..0757b52975 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fegetexcept.c @@ -0,0 +1,37 @@ +/* Get floating-point exceptions. + Copyright (C) 2001-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Philip Blundell <philb@gnu.org>, 2001 + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +fegetexcept (void) +{ + fpu_control_t fpscr; + + /* Return with all exceptions disabled if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return 0; + + _FPU_GETCW (fpscr); + + return (fpscr >> FE_EXCEPT_SHIFT) & FE_ALL_EXCEPT; +} diff --git a/REORG.TODO/sysdeps/arm/fegetmode.c b/REORG.TODO/sysdeps/arm/fegetmode.c new file mode 100644 index 0000000000..e1acf3a009 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fegetmode.c @@ -0,0 +1,29 @@ +/* Store current floating-point control modes. ARM version. + Copyright (C) 2016-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + +int +fegetmode (femode_t *modep) +{ + if (ARM_HAVE_VFP) + _FPU_GETCW (*modep); + return 0; +} diff --git a/REORG.TODO/sysdeps/arm/fegetround.c b/REORG.TODO/sysdeps/arm/fegetround.c new file mode 100644 index 0000000000..e16c1d586c --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fegetround.c @@ -0,0 +1,29 @@ +/* Return current rounding direction. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <get-rounding-mode.h> + + +int +__fegetround (void) +{ + return get_rounding_mode (); +} +libm_hidden_def (__fegetround) +weak_alias (__fegetround, fegetround) +libm_hidden_weak (fegetround) diff --git a/REORG.TODO/sysdeps/arm/feholdexcpt.c b/REORG.TODO/sysdeps/arm/feholdexcpt.c new file mode 100644 index 0000000000..36ed2408e8 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/feholdexcpt.c @@ -0,0 +1,35 @@ +/* Store current floating-point environment and clear exceptions. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv_private.h> +#include <arm-features.h> + + +int +__feholdexcept (fenv_t *envp) +{ + /* Fail if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return 1; + + libc_feholdexcept_vfp (envp); + return 0; +} +libm_hidden_def (__feholdexcept) +weak_alias (__feholdexcept, feholdexcept) +libm_hidden_weak (feholdexcept) diff --git a/REORG.TODO/sysdeps/arm/fenv_private.h b/REORG.TODO/sysdeps/arm/fenv_private.h new file mode 100644 index 0000000000..af31025d95 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fenv_private.h @@ -0,0 +1,249 @@ +/* Private floating point rounding and exceptions handling. ARM VFP version. + Copyright (C) 2014-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef FENV_PRIVATE_H +#define FENV_PRIVATE_H 1 + +#include <fenv.h> +#include <fpu_control.h> + +static __always_inline void +libc_feholdexcept_vfp (fenv_t *envp) +{ + fpu_control_t fpscr; + + _FPU_GETCW (fpscr); + envp->__cw = fpscr; + + /* Clear exception flags and set all exceptions to non-stop. */ + fpscr &= ~_FPU_MASK_EXCEPT; + _FPU_SETCW (fpscr); +} + +static __always_inline void +libc_fesetround_vfp (int round) +{ + fpu_control_t fpscr; + + _FPU_GETCW (fpscr); + + /* Set new rounding mode if different. */ + if (__glibc_unlikely ((fpscr & _FPU_MASK_RM) != round)) + _FPU_SETCW ((fpscr & ~_FPU_MASK_RM) | round); +} + +static __always_inline void +libc_feholdexcept_setround_vfp (fenv_t *envp, int round) +{ + fpu_control_t fpscr; + + _FPU_GETCW (fpscr); + envp->__cw = fpscr; + + /* Clear exception flags, set all exceptions to non-stop, + and set new rounding mode. */ + fpscr &= ~(_FPU_MASK_EXCEPT | _FPU_MASK_RM); + _FPU_SETCW (fpscr | round); +} + +static __always_inline void +libc_feholdsetround_vfp (fenv_t *envp, int round) +{ + fpu_control_t fpscr; + + _FPU_GETCW (fpscr); + envp->__cw = fpscr; + + /* Set new rounding mode if different. */ + if (__glibc_unlikely ((fpscr & _FPU_MASK_RM) != round)) + _FPU_SETCW ((fpscr & ~_FPU_MASK_RM) | round); +} + +static __always_inline void +libc_feresetround_vfp (fenv_t *envp) +{ + fpu_control_t fpscr, round; + + _FPU_GETCW (fpscr); + + /* Check whether rounding modes are different. */ + round = (envp->__cw ^ fpscr) & _FPU_MASK_RM; + + /* Restore the rounding mode if it was changed. */ + if (__glibc_unlikely (round != 0)) + _FPU_SETCW (fpscr ^ round); +} + +static __always_inline int +libc_fetestexcept_vfp (int ex) +{ + fpu_control_t fpscr; + + _FPU_GETCW (fpscr); + return fpscr & ex & FE_ALL_EXCEPT; +} + +static __always_inline void +libc_fesetenv_vfp (const fenv_t *envp) +{ + fpu_control_t fpscr, new_fpscr; + + _FPU_GETCW (fpscr); + new_fpscr = envp->__cw; + + /* Write new FPSCR if different (ignoring NZCV flags). */ + if (__glibc_unlikely (((fpscr ^ new_fpscr) & ~_FPU_MASK_NZCV) != 0)) + _FPU_SETCW (new_fpscr); +} + +static __always_inline int +libc_feupdateenv_test_vfp (const fenv_t *envp, int ex) +{ + fpu_control_t fpscr, new_fpscr; + int excepts; + + _FPU_GETCW (fpscr); + + /* Merge current exception flags with the saved fenv. */ + excepts = fpscr & FE_ALL_EXCEPT; + new_fpscr = envp->__cw | excepts; + + /* Write new FPSCR if different (ignoring NZCV flags). */ + if (__glibc_unlikely (((fpscr ^ new_fpscr) & ~_FPU_MASK_NZCV) != 0)) + _FPU_SETCW (new_fpscr); + + /* Raise the exceptions if enabled in the new FP state. */ + if (__glibc_unlikely (excepts & (new_fpscr >> FE_EXCEPT_SHIFT))) + __feraiseexcept (excepts); + + return excepts & ex; +} + +static __always_inline void +libc_feupdateenv_vfp (const fenv_t *envp) +{ + libc_feupdateenv_test_vfp (envp, 0); +} + +static __always_inline void +libc_feholdsetround_vfp_ctx (struct rm_ctx *ctx, int r) +{ + fpu_control_t fpscr, round; + + _FPU_GETCW (fpscr); + ctx->updated_status = false; + ctx->env.__cw = fpscr; + + /* Check whether rounding modes are different. */ + round = (fpscr ^ r) & _FPU_MASK_RM; + + /* Set the rounding mode if changed. */ + if (__glibc_unlikely (round != 0)) + { + ctx->updated_status = true; + _FPU_SETCW (fpscr ^ round); + } +} + +static __always_inline void +libc_feresetround_vfp_ctx (struct rm_ctx *ctx) +{ + /* Restore the rounding mode if updated. */ + if (__glibc_unlikely (ctx->updated_status)) + { + fpu_control_t fpscr; + + _FPU_GETCW (fpscr); + fpscr = (fpscr & ~_FPU_MASK_RM) | (ctx->env.__cw & _FPU_MASK_RM); + _FPU_SETCW (fpscr); + } +} + +static __always_inline void +libc_fesetenv_vfp_ctx (struct rm_ctx *ctx) +{ + fpu_control_t fpscr, new_fpscr; + + _FPU_GETCW (fpscr); + new_fpscr = ctx->env.__cw; + + /* Write new FPSCR if different (ignoring NZCV flags). */ + if (__glibc_unlikely (((fpscr ^ new_fpscr) & ~_FPU_MASK_NZCV) != 0)) + _FPU_SETCW (new_fpscr); +} + +#ifndef __SOFTFP__ + +# define libc_feholdexcept libc_feholdexcept_vfp +# define libc_feholdexceptf libc_feholdexcept_vfp +# define libc_feholdexceptl libc_feholdexcept_vfp + +# define libc_fesetround libc_fesetround_vfp +# define libc_fesetroundf libc_fesetround_vfp +# define libc_fesetroundl libc_fesetround_vfp + +# define libc_feresetround libc_feresetround_vfp +# define libc_feresetroundf libc_feresetround_vfp +# define libc_feresetroundl libc_feresetround_vfp + +# define libc_feresetround_noex libc_fesetenv_vfp +# define libc_feresetround_noexf libc_fesetenv_vfp +# define libc_feresetround_noexl libc_fesetenv_vfp + +# define libc_feholdexcept_setround libc_feholdexcept_setround_vfp +# define libc_feholdexcept_setroundf libc_feholdexcept_setround_vfp +# define libc_feholdexcept_setroundl libc_feholdexcept_setround_vfp + +# define libc_feholdsetround libc_feholdsetround_vfp +# define libc_feholdsetroundf libc_feholdsetround_vfp +# define libc_feholdsetroundl libc_feholdsetround_vfp + +# define libc_fetestexcept libc_fetestexcept_vfp +# define libc_fetestexceptf libc_fetestexcept_vfp +# define libc_fetestexceptl libc_fetestexcept_vfp + +# define libc_fesetenv libc_fesetenv_vfp +# define libc_fesetenvf libc_fesetenv_vfp +# define libc_fesetenvl libc_fesetenv_vfp + +# define libc_feupdateenv libc_feupdateenv_vfp +# define libc_feupdateenvf libc_feupdateenv_vfp +# define libc_feupdateenvl libc_feupdateenv_vfp + +# define libc_feupdateenv_test libc_feupdateenv_test_vfp +# define libc_feupdateenv_testf libc_feupdateenv_test_vfp +# define libc_feupdateenv_testl libc_feupdateenv_test_vfp + +/* We have support for rounding mode context. */ +#define HAVE_RM_CTX 1 + +# define libc_feholdsetround_ctx libc_feholdsetround_vfp_ctx +# define libc_feresetround_ctx libc_feresetround_vfp_ctx +# define libc_feresetround_noex_ctx libc_fesetenv_vfp_ctx + +# define libc_feholdsetroundf_ctx libc_feholdsetround_vfp_ctx +# define libc_feresetroundf_ctx libc_feresetround_vfp_ctx +# define libc_feresetround_noexf_ctx libc_fesetenv_vfp_ctx + +# define libc_feholdsetroundl_ctx libc_feholdsetround_vfp_ctx +# define libc_feresetroundl_ctx libc_feresetround_vfp_ctx +# define libc_feresetround_noexl_ctx libc_fesetenv_vfp_ctx + +#endif + +#endif /* FENV_PRIVATE_H */ diff --git a/REORG.TODO/sysdeps/arm/fesetenv.c b/REORG.TODO/sysdeps/arm/fesetenv.c new file mode 100644 index 0000000000..e28d0a5d02 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fesetenv.c @@ -0,0 +1,66 @@ +/* Install given floating-point environment. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +__fesetenv (const fenv_t *envp) +{ + fpu_control_t fpscr, new_fpscr, updated_fpscr; + + /* Fail if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return 1; + + _FPU_GETCW (fpscr); + + if ((envp != FE_DFL_ENV) && (envp != FE_NOMASK_ENV)) + { + /* The new FPSCR is valid, so don't merge the reserved flags. */ + new_fpscr = envp->__cw; + + /* Write new FPSCR if different (ignoring NZCV flags). */ + if (((fpscr ^ new_fpscr) & ~_FPU_MASK_NZCV) != 0) + _FPU_SETCW (new_fpscr); + + return 0; + } + + /* Preserve the reserved FPSCR flags. */ + new_fpscr = fpscr & _FPU_RESERVED; + new_fpscr |= (envp == FE_DFL_ENV) ? _FPU_DEFAULT : _FPU_IEEE; + + if (((new_fpscr ^ fpscr) & ~_FPU_MASK_NZCV) != 0) + { + _FPU_SETCW (new_fpscr); + + /* Not all VFP architectures support trapping exceptions, so + test whether the relevant bits were set and fail if not. */ + _FPU_GETCW (updated_fpscr); + + return new_fpscr & ~updated_fpscr; + } + + return 0; +} +libm_hidden_def (__fesetenv) +weak_alias (__fesetenv, fesetenv) +libm_hidden_weak (fesetenv) diff --git a/REORG.TODO/sysdeps/arm/fesetexcept.c b/REORG.TODO/sysdeps/arm/fesetexcept.c new file mode 100644 index 0000000000..994ccf5bb8 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fesetexcept.c @@ -0,0 +1,38 @@ +/* Set given exception flags. ARM version. + Copyright (C) 2016-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + +int +fesetexcept (int excepts) +{ + fpu_control_t fpscr, new_fpscr; + + /* Fail if a VFP unit isn't present unless nothing needs to be done. */ + if (!ARM_HAVE_VFP) + return (excepts != 0); + + _FPU_GETCW (fpscr); + new_fpscr = fpscr | (excepts & FE_ALL_EXCEPT); + if (new_fpscr != fpscr) + _FPU_SETCW (new_fpscr); + + return 0; +} diff --git a/REORG.TODO/sysdeps/arm/fesetmode.c b/REORG.TODO/sysdeps/arm/fesetmode.c new file mode 100644 index 0000000000..d2006985eb --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fesetmode.c @@ -0,0 +1,45 @@ +/* Install given floating-point control modes. ARM version. + Copyright (C) 2016-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + +/* NZCV flags, QC bit, IDC bit and bits for IEEE exception status. */ +#define FPU_STATUS_BITS 0xf800009f + +int +fesetmode (const femode_t *modep) +{ + fpu_control_t fpscr, new_fpscr; + + if (!ARM_HAVE_VFP) + /* Nothing to do. */ + return 0; + + _FPU_GETCW (fpscr); + if (modep == FE_DFL_MODE) + new_fpscr = (fpscr & (_FPU_RESERVED | FPU_STATUS_BITS)) | _FPU_DEFAULT; + else + new_fpscr = (fpscr & FPU_STATUS_BITS) | (*modep & ~FPU_STATUS_BITS); + + if (((new_fpscr ^ fpscr) & ~_FPU_MASK_NZCV) != 0) + _FPU_SETCW (new_fpscr); + + return 0; +} diff --git a/REORG.TODO/sysdeps/arm/fesetround.c b/REORG.TODO/sysdeps/arm/fesetround.c new file mode 100644 index 0000000000..e3ebd20055 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fesetround.c @@ -0,0 +1,39 @@ +/* Set current rounding direction. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv_private.h> +#include <arm-features.h> + + +int +__fesetround (int round) +{ + /* FE_TONEAREST is the only supported rounding mode + if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return (round == FE_TONEAREST) ? 0 : 1; + + if (round & ~_FPU_MASK_RM) + return 1; + + libc_fesetround_vfp (round); + return 0; +} +libm_hidden_def (__fesetround) +weak_alias (__fesetround, fesetround) +libm_hidden_weak (fesetround) diff --git a/REORG.TODO/sysdeps/arm/feupdateenv.c b/REORG.TODO/sysdeps/arm/feupdateenv.c new file mode 100644 index 0000000000..21afa51d4a --- /dev/null +++ b/REORG.TODO/sysdeps/arm/feupdateenv.c @@ -0,0 +1,78 @@ +/* Install given floating-point environment and raise exceptions. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +__feupdateenv (const fenv_t *envp) +{ + fpu_control_t fpscr, new_fpscr, updated_fpscr; + int excepts; + + /* Fail if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return 1; + + _FPU_GETCW (fpscr); + excepts = fpscr & FE_ALL_EXCEPT; + + if ((envp != FE_DFL_ENV) && (envp != FE_NOMASK_ENV)) + { + /* Merge current exception flags with the saved fenv. */ + new_fpscr = envp->__cw | excepts; + + /* Write new FPSCR if different (ignoring NZCV flags). */ + if (((fpscr ^ new_fpscr) & ~_FPU_MASK_NZCV) != 0) + _FPU_SETCW (new_fpscr); + + /* Raise the exceptions if enabled in the new FP state. */ + if (excepts & (new_fpscr >> FE_EXCEPT_SHIFT)) + return __feraiseexcept (excepts); + + return 0; + } + + /* Preserve the reserved FPSCR flags. */ + new_fpscr = fpscr & (_FPU_RESERVED | FE_ALL_EXCEPT); + new_fpscr |= (envp == FE_DFL_ENV) ? _FPU_DEFAULT : _FPU_IEEE; + + if (((new_fpscr ^ fpscr) & ~_FPU_MASK_NZCV) != 0) + { + _FPU_SETCW (new_fpscr); + + /* Not all VFP architectures support trapping exceptions, so + test whether the relevant bits were set and fail if not. */ + _FPU_GETCW (updated_fpscr); + + if (new_fpscr & ~updated_fpscr) + return 1; + } + + /* Raise the exceptions if enabled in the new FP state. */ + if (excepts & (new_fpscr >> FE_EXCEPT_SHIFT)) + return __feraiseexcept (excepts); + + return 0; +} +libm_hidden_def (__feupdateenv) +weak_alias (__feupdateenv, feupdateenv) +libm_hidden_weak (feupdateenv) diff --git a/REORG.TODO/sysdeps/arm/fgetexcptflg.c b/REORG.TODO/sysdeps/arm/fgetexcptflg.c new file mode 100644 index 0000000000..272dd22e9c --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fgetexcptflg.c @@ -0,0 +1,33 @@ +/* Store current representation for exceptions. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper <drepper@cygnus.com>, 1997. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv_private.h> +#include <arm-features.h> + + +int +fegetexceptflag (fexcept_t *flagp, int excepts) +{ + /* Fail if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return 1; + + *flagp = libc_fetestexcept_vfp (excepts); + return 0; +} diff --git a/REORG.TODO/sysdeps/arm/find_exidx.c b/REORG.TODO/sysdeps/arm/find_exidx.c new file mode 100644 index 0000000000..1e6f1dc773 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/find_exidx.c @@ -0,0 +1,79 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <link.h> +#include <unwind.h> + +struct unw_eh_callback_data +{ + _Unwind_Ptr pc; + _Unwind_Ptr exidx_start; + int exidx_len; +}; + + +/* Callback to determins if the PC lies within an object, and remember the + location of the exception index table if it does. */ + +static int +find_exidx_callback (struct dl_phdr_info * info, size_t size, void * ptr) +{ + struct unw_eh_callback_data * data; + const ElfW(Phdr) *phdr; + int i; + int match; + _Unwind_Ptr load_base; + + data = (struct unw_eh_callback_data *) ptr; + load_base = info->dlpi_addr; + phdr = info->dlpi_phdr; + + match = 0; + for (i = info->dlpi_phnum; i > 0; i--, phdr++) + { + if (phdr->p_type == PT_LOAD) + { + _Unwind_Ptr vaddr = phdr->p_vaddr + load_base; + if (data->pc >= vaddr && data->pc < vaddr + phdr->p_memsz) + match = 1; + } + else if (phdr->p_type == PT_ARM_EXIDX) + { + data->exidx_start = (_Unwind_Ptr) (phdr->p_vaddr + load_base); + data->exidx_len = phdr->p_memsz; + } + } + + return match; +} + + +/* Find the exception index table containing PC. */ + +_Unwind_Ptr +__gnu_Unwind_Find_exidx (_Unwind_Ptr pc, int * pcount) +{ + struct unw_eh_callback_data data; + + data.pc = pc; + data.exidx_start = 0; + if (__dl_iterate_phdr (find_exidx_callback, &data) <= 0) + return 0; + + *pcount = data.exidx_len / 8; + return data.exidx_start; +} diff --git a/REORG.TODO/sysdeps/arm/fix-fp-int-convert-overflow.h b/REORG.TODO/sysdeps/arm/fix-fp-int-convert-overflow.h new file mode 100644 index 0000000000..cb7fea7abb --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fix-fp-int-convert-overflow.h @@ -0,0 +1,34 @@ +/* Fix for conversion of floating point to integer overflow. ARM version. + Copyright (C) 2015-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef FIX_FP_INT_CONVERT_OVERFLOW_H +#define FIX_FP_INT_CONVERT_OVERFLOW_H 1 + +/* As of GCC 5, the generic libgcc2.c conversions from floating point + to long long may not raise the correct exceptions on overflow (and + may raise spurious "inexact" exceptions even in non-overflow cases, + see <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=59412>). */ +#define FIX_FLT_LLONG_CONVERT_OVERFLOW 1 +#define FIX_DBL_LLONG_CONVERT_OVERFLOW 1 +#define FIX_LDBL_LLONG_CONVERT_OVERFLOW 0 + +#define FIX_FLT_LONG_CONVERT_OVERFLOW 0 +#define FIX_DBL_LONG_CONVERT_OVERFLOW 0 +#define FIX_LDBL_LONG_CONVERT_OVERFLOW 0 + +#endif /* fix-fp-int-convert-overflow.h */ diff --git a/REORG.TODO/sysdeps/arm/fpu_control.h b/REORG.TODO/sysdeps/arm/fpu_control.h new file mode 100644 index 0000000000..405d427c84 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fpu_control.h @@ -0,0 +1,75 @@ +/* FPU control word definitions. ARM VFP version. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _FPU_CONTROL_H +#define _FPU_CONTROL_H + +#if !(defined(_LIBC) && !defined(_LIBC_TEST)) && defined(__SOFTFP__) + +#define _FPU_RESERVED 0xffffffff +#define _FPU_DEFAULT 0x00000000 +typedef unsigned int fpu_control_t; +#define _FPU_GETCW(cw) (cw) = 0 +#define _FPU_SETCW(cw) (void) (cw) +extern fpu_control_t __fpu_control; + +#else + +/* masking of interrupts */ +#define _FPU_MASK_IM 0x00000100 /* invalid operation */ +#define _FPU_MASK_ZM 0x00000200 /* divide by zero */ +#define _FPU_MASK_OM 0x00000400 /* overflow */ +#define _FPU_MASK_UM 0x00000800 /* underflow */ +#define _FPU_MASK_PM 0x00001000 /* inexact */ + +#define _FPU_MASK_NZCV 0xf0000000 /* NZCV flags */ +#define _FPU_MASK_RM 0x00c00000 /* rounding mode */ +#define _FPU_MASK_EXCEPT 0x00001f1f /* all exception flags */ + +/* Some bits in the FPSCR are not yet defined. They must be preserved when + modifying the contents. */ +#define _FPU_RESERVED 0x00086060 +#define _FPU_DEFAULT 0x00000000 + +/* Default + exceptions enabled. */ +#define _FPU_IEEE (_FPU_DEFAULT | 0x00001f00) + +/* Type of the control word. */ +typedef unsigned int fpu_control_t; + +/* Macros for accessing the hardware control word. */ +#ifdef __SOFTFP__ +/* This is fmrx %0, fpscr. */ +# define _FPU_GETCW(cw) \ + __asm__ __volatile__ ("mrc p10, 7, %0, cr1, cr0, 0" : "=r" (cw)) +/* This is fmxr fpscr, %0. */ +# define _FPU_SETCW(cw) \ + __asm__ __volatile__ ("mcr p10, 7, %0, cr1, cr0, 0" : : "r" (cw)) +#else +# define _FPU_GETCW(cw) \ + __asm__ __volatile__ ("vmrs %0, fpscr" : "=r" (cw)) +# define _FPU_SETCW(cw) \ + __asm__ __volatile__ ("vmsr fpscr, %0" : : "r" (cw)) +#endif + +/* Default control word set at startup. */ +extern fpu_control_t __fpu_control; + +#endif /* __SOFTFP__ */ + +#endif /* _FPU_CONTROL_H */ diff --git a/REORG.TODO/sysdeps/arm/fraiseexcpt.c b/REORG.TODO/sysdeps/arm/fraiseexcpt.c new file mode 100644 index 0000000000..be53409b36 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fraiseexcpt.c @@ -0,0 +1,107 @@ +/* Raise given exceptions. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fpu_control.h> +#include <fenv.h> +#include <float.h> +#include <arm-features.h> + + +int +__feraiseexcept (int excepts) +{ + /* Fail if a VFP unit isn't present unless nothing needs to be done. */ + if (!ARM_HAVE_VFP) + return (excepts != 0); + else + { + fpu_control_t fpscr; + const float fp_zero = 0.0, fp_one = 1.0, fp_max = FLT_MAX, + fp_min = FLT_MIN, fp_1e32 = 1.0e32f, fp_two = 2.0, + fp_three = 3.0; + + /* Raise exceptions represented by EXPECTS. But we must raise only + one signal at a time. It is important that if the overflow/underflow + exception and the inexact exception are given at the same time, + the overflow/underflow exception follows the inexact exception. After + each exception we read from the fpscr, to force the exception to be + raised immediately. */ + + /* There are additional complications because this file may be compiled + without VFP support enabled, and we also can't assume that the + assembler has VFP instructions enabled. To get around this we use the + generic coprocessor mnemonics and avoid asking GCC to put float values + in VFP registers. */ + + /* First: invalid exception. */ + if (FE_INVALID & excepts) + __asm__ __volatile__ ( + "ldc p10, cr0, %1\n\t" /* flds s0, %1 */ + "cdp p10, 8, cr0, cr0, cr0, 0\n\t" /* fdivs s0, s0, s0 */ + "mrc p10, 7, %0, cr1, cr0, 0" : "=r" (fpscr) /* fmrx %0, fpscr */ + : "m" (fp_zero) + : "s0"); + + /* Next: division by zero. */ + if (FE_DIVBYZERO & excepts) + __asm__ __volatile__ ( + "ldc p10, cr0, %1\n\t" /* flds s0, %1 */ + "ldcl p10, cr0, %2\n\t" /* flds s1, %2 */ + "cdp p10, 8, cr0, cr0, cr0, 1\n\t" /* fdivs s0, s0, s1 */ + "mrc p10, 7, %0, cr1, cr0, 0" : "=r" (fpscr) /* fmrx %0, fpscr */ + : "m" (fp_one), "m" (fp_zero) + : "s0", "s1"); + + /* Next: overflow. */ + if (FE_OVERFLOW & excepts) + /* There's no way to raise overflow without also raising inexact. */ + __asm__ __volatile__ ( + "ldc p10, cr0, %1\n\t" /* flds s0, %1 */ + "ldcl p10, cr0, %2\n\t" /* flds s1, %2 */ + "cdp p10, 3, cr0, cr0, cr0, 1\n\t" /* fadds s0, s0, s1 */ + "mrc p10, 7, %0, cr1, cr0, 0" : "=r" (fpscr) /* fmrx %0, fpscr */ + : "m" (fp_max), "m" (fp_1e32) + : "s0", "s1"); + + /* Next: underflow. */ + if (FE_UNDERFLOW & excepts) + __asm__ __volatile__ ( + "ldc p10, cr0, %1\n\t" /* flds s0, %1 */ + "ldcl p10, cr0, %2\n\t" /* flds s1, %2 */ + "cdp p10, 8, cr0, cr0, cr0, 1\n\t" /* fdivs s0, s0, s1 */ + "mrc p10, 7, %0, cr1, cr0, 0" : "=r" (fpscr) /* fmrx %0, fpscr */ + : "m" (fp_min), "m" (fp_three) + : "s0", "s1"); + + /* Last: inexact. */ + if (FE_INEXACT & excepts) + __asm__ __volatile__ ( + "ldc p10, cr0, %1\n\t" /* flds s0, %1 */ + "ldcl p10, cr0, %2\n\t" /* flds s1, %2 */ + "cdp p10, 8, cr0, cr0, cr0, 1\n\t" /* fdivs s0, s0, s1 */ + "mrc p10, 7, %0, cr1, cr0, 0" : "=r" (fpscr) /* fmrx %0, fpscr */ + : "m" (fp_two), "m" (fp_three) + : "s0", "s1"); + + /* Success. */ + return 0; + } +} +libm_hidden_def (__feraiseexcept) +weak_alias (__feraiseexcept, feraiseexcept) +libm_hidden_weak (feraiseexcept) diff --git a/REORG.TODO/sysdeps/arm/frame.h b/REORG.TODO/sysdeps/arm/frame.h new file mode 100644 index 0000000000..22dabfd207 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/frame.h @@ -0,0 +1,27 @@ +/* Definition of stack frame structure. ARM/APCS version. + Copyright (C) 2000-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* This is the APCS stack backtrace structure. */ +struct layout +{ + struct layout *next; + void *sp; + void *return_address; +}; + +#define FIRST_FRAME_POINTER ADVANCE_STACK_FRAME (__builtin_frame_address (0)) diff --git a/REORG.TODO/sysdeps/arm/framestate.c b/REORG.TODO/sysdeps/arm/framestate.c new file mode 100644 index 0000000000..710cecca97 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/framestate.c @@ -0,0 +1 @@ +/* Empty */ diff --git a/REORG.TODO/sysdeps/arm/fsetexcptflg.c b/REORG.TODO/sysdeps/arm/fsetexcptflg.c new file mode 100644 index 0000000000..4871a9bd5c --- /dev/null +++ b/REORG.TODO/sysdeps/arm/fsetexcptflg.c @@ -0,0 +1,45 @@ +/* Set floating-point environment exception handling. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv.h> +#include <fpu_control.h> +#include <arm-features.h> + + +int +fesetexceptflag (const fexcept_t *flagp, int excepts) +{ + fpu_control_t fpscr, new_fpscr; + + /* Fail if a VFP unit isn't present unless nothing needs to be done. */ + if (!ARM_HAVE_VFP) + return (excepts != 0); + + _FPU_GETCW (fpscr); + excepts &= FE_ALL_EXCEPT; + + /* Set the desired exception mask. */ + new_fpscr = fpscr & ~excepts; + new_fpscr |= *flagp & excepts; + + /* Write new exception flags if changed. */ + if (new_fpscr != fpscr) + _FPU_SETCW (new_fpscr); + + return 0; +} diff --git a/REORG.TODO/sysdeps/arm/ftestexcept.c b/REORG.TODO/sysdeps/arm/ftestexcept.c new file mode 100644 index 0000000000..b1e13d751b --- /dev/null +++ b/REORG.TODO/sysdeps/arm/ftestexcept.c @@ -0,0 +1,32 @@ +/* Test exception in current environment. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <fenv_private.h> +#include <arm-features.h> + + +int +fetestexcept (int excepts) +{ + /* Return no exception flags if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return 0; + + return libc_fetestexcept_vfp (excepts); +} +libm_hidden_def (fetestexcept) diff --git a/REORG.TODO/sysdeps/arm/gcc-compat.h b/REORG.TODO/sysdeps/arm/gcc-compat.h new file mode 100644 index 0000000000..a8524ccc69 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/gcc-compat.h @@ -0,0 +1,35 @@ +/* Macros for checking required GCC compatibility. ARM version. + Copyright (C) 2014-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _ARM_GCC_COMPAT_H +#define _ARM_GCC_COMPAT_H 1 + +#ifndef GCC_COMPAT_VERSION +# ifdef __ARM_PCS_VFP +/* The hard-float ABI was first supported in 4.5. */ +# define GCC_COMPAT_VERSION GCC_VERSION (4, 5) +# else +/* The EABI configurations (the only ones we handle) were first supported + in 4.1. */ +# define GCC_COMPAT_VERSION GCC_VERSION (4, 1) +# endif +#endif + +#include_next <gcc-compat.h> + +#endif diff --git a/REORG.TODO/sysdeps/arm/gccframe.h b/REORG.TODO/sysdeps/arm/gccframe.h new file mode 100644 index 0000000000..13e7eed89f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/gccframe.h @@ -0,0 +1,21 @@ +/* Definition of object in frame unwind info. arm version. + Copyright (C) 2001-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#define FIRST_PSEUDO_REGISTER 27 + +#include <sysdeps/generic/gccframe.h> diff --git a/REORG.TODO/sysdeps/arm/get-rounding-mode.h b/REORG.TODO/sysdeps/arm/get-rounding-mode.h new file mode 100644 index 0000000000..a76d4bacb6 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/get-rounding-mode.h @@ -0,0 +1,42 @@ +/* Determine floating-point rounding mode within libc. ARM version. + Copyright (C) 2012-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _ARM_GET_ROUNDING_MODE_H +#define _ARM_GET_ROUNDING_MODE_H 1 + +#include <arm-features.h> +#include <fenv.h> +#include <fpu_control.h> + +/* Return the floating-point rounding mode. */ + +static inline int +get_rounding_mode (void) +{ + fpu_control_t fpscr; + + /* FE_TONEAREST is the only supported rounding mode + if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return FE_TONEAREST; + + _FPU_GETCW (fpscr); + return fpscr & _FPU_MASK_RM; +} + +#endif /* get-rounding-mode.h */ diff --git a/REORG.TODO/sysdeps/arm/gmp-mparam.h b/REORG.TODO/sysdeps/arm/gmp-mparam.h new file mode 100644 index 0000000000..5731dbde8f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/gmp-mparam.h @@ -0,0 +1,36 @@ +/* gmp-mparam.h -- Compiler/machine parameter header file. + +Copyright (C) 1991-2017 Free Software Foundation, Inc. + +This file is part of the GNU MP Library. + +The GNU MP Library is free software; you can redistribute it and/or modify +it under the terms of the GNU Lesser General Public License as published by +the Free Software Foundation; either version 2.1 of the License, or (at your +option) any later version. + +The GNU MP Library is distributed in the hope that it will be useful, but +WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY +or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public +License for more details. + +You should have received a copy of the GNU Lesser General Public License +along with the GNU MP Library. If not, see <http://www.gnu.org/licenses/>. */ + +#define BITS_PER_MP_LIMB 32 +#define BYTES_PER_MP_LIMB 4 +#define BITS_PER_LONGINT 32 +#define BITS_PER_INT 32 +#define BITS_PER_SHORTINT 16 +#define BITS_PER_CHAR 8 + +#if defined(__ARMEB__) +# define IEEE_DOUBLE_MIXED_ENDIAN 0 +# define IEEE_DOUBLE_BIG_ENDIAN 1 +#elif defined(__VFP_FP__) +# define IEEE_DOUBLE_MIXED_ENDIAN 0 +# define IEEE_DOUBLE_BIG_ENDIAN 0 +#else +# define IEEE_DOUBLE_BIG_ENDIAN 0 +# define IEEE_DOUBLE_MIXED_ENDIAN 1 +#endif diff --git a/REORG.TODO/sysdeps/arm/include/bits/setjmp.h b/REORG.TODO/sysdeps/arm/include/bits/setjmp.h new file mode 100644 index 0000000000..9e24705f22 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/include/bits/setjmp.h @@ -0,0 +1,36 @@ +/* Private jmp_buf-related definitions. ARM EABI version. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _INCLUDE_BITS_SETJMP_H +#define _INCLUDE_BITS_SETJMP_H 1 + +#ifndef __ASSEMBLER__ +/* Get the public declarations. */ +# include <sysdeps/arm/bits/setjmp.h> +#endif + +#ifndef _ISOMAC +/* Register list for a ldm/stm instruction to load/store + the general registers from a __jmp_buf. */ +# define JMP_BUF_REGLIST {v1-v6, sl, fp} + +/* Index of __jmp_buf where the sp register resides. */ +# define __JMP_BUF_SP 0 +#endif + +#endif /* include/bits/setjmp.h */ diff --git a/REORG.TODO/sysdeps/arm/jmpbuf-unwind.h b/REORG.TODO/sysdeps/arm/jmpbuf-unwind.h new file mode 100644 index 0000000000..7a4e49ea78 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/jmpbuf-unwind.h @@ -0,0 +1,45 @@ +/* Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <setjmp.h> +#include <stdint.h> +#include <sysdep.h> +#include <unwind.h> + +/* Test if longjmp to JMPBUF would unwind the frame + containing a local variable at ADDRESS. */ +#define _JMPBUF_UNWINDS(jmpbuf, address, demangle) \ + ((void *) (address) < (void *) demangle (jmpbuf[__JMP_BUF_SP])) + +#define _JMPBUF_CFA_UNWINDS_ADJ(_jmpbuf, _context, _adj) \ + _JMPBUF_UNWINDS_ADJ (_jmpbuf, (void *) _Unwind_GetCFA (_context), _adj) + +static inline uintptr_t __attribute__ ((unused)) +_jmpbuf_sp (__jmp_buf regs) +{ + uintptr_t sp = regs[__JMP_BUF_SP]; +#ifdef PTR_DEMANGLE + PTR_DEMANGLE (sp); +#endif + return sp; +} + +#define _JMPBUF_UNWINDS_ADJ(_jmpbuf, _address, _adj) \ + ((uintptr_t) (_address) - (_adj) < _jmpbuf_sp (_jmpbuf) - (_adj)) + +/* We use the normal longjmp for unwinding. */ +#define __libc_unwind_longjmp(buf, val) __libc_longjmp (buf, val) diff --git a/REORG.TODO/sysdeps/arm/ldsodefs.h b/REORG.TODO/sysdeps/arm/ldsodefs.h new file mode 100644 index 0000000000..0dd568cff3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/ldsodefs.h @@ -0,0 +1,40 @@ +/* Run-time dynamic linker data structures for loaded ELF shared objects. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _ARM_LDSODEFS_H +#define _ARM_LDSODEFS_H 1 + +#include <elf.h> + +struct La_arm_regs; +struct La_arm_retval; + +#define ARCH_PLTENTER_MEMBERS \ + Elf32_Addr (*arm_gnu_pltenter) (Elf32_Sym *, unsigned int, uintptr_t *, \ + uintptr_t *, struct La_arm_regs *, \ + unsigned int *, const char *, \ + long int *) + +#define ARCH_PLTEXIT_MEMBERS \ + Elf32_Addr (*arm_gnu_pltexit) (Elf32_Sym *, unsigned int, uintptr_t *, \ + uintptr_t *, const struct La_arm_regs *, \ + struct La_arm_retval *, const char *) + +#include_next <ldsodefs.h> + +#endif diff --git a/REORG.TODO/sysdeps/arm/libc-aeabi_read_tp.S b/REORG.TODO/sysdeps/arm/libc-aeabi_read_tp.S new file mode 100644 index 0000000000..6132afc151 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/libc-aeabi_read_tp.S @@ -0,0 +1 @@ +#include <aeabi_read_tp.S> diff --git a/REORG.TODO/sysdeps/arm/libc-tls.c b/REORG.TODO/sysdeps/arm/libc-tls.c new file mode 100644 index 0000000000..9b2a52e0ec --- /dev/null +++ b/REORG.TODO/sysdeps/arm/libc-tls.c @@ -0,0 +1,32 @@ +/* Thread-local storage handling in the ELF dynamic linker. ARM version. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <csu/libc-tls.c> +#include <dl-tls.h> + +/* On ARM, linker optimizations are not required, so __tls_get_addr + can be called even in statically linked binaries. In this case module + must be always 1 and PT_TLS segment exist in the binary, otherwise it + would not link. */ + +void * +__tls_get_addr (tls_index *ti) +{ + dtv_t *dtv = THREAD_DTV (); + return (char *) dtv[1].pointer.val + ti->ti_offset; +} diff --git a/REORG.TODO/sysdeps/arm/libm-test-ulps b/REORG.TODO/sysdeps/arm/libm-test-ulps new file mode 100644 index 0000000000..c6ee80095a --- /dev/null +++ b/REORG.TODO/sysdeps/arm/libm-test-ulps @@ -0,0 +1,1690 @@ +# Begin of automatic generation + +# Maximal error of functions: +Function: "acos": +float: 1 +ifloat: 1 + +Function: "acos_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "acos_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "acos_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "acosh": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "acosh_downward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "acosh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "acosh_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "asin": +float: 1 +ifloat: 1 + +Function: "asin_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "asin_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "asin_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "asinh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "asinh_downward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "asinh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "asinh_upward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "atan": +float: 1 +ifloat: 1 + +Function: "atan2": +float: 1 +ifloat: 1 + +Function: "atan2_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "atan2_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "atan2_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "atan_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "atan_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "atan_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "atanh": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "atanh_downward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "atanh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "atanh_upward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "cabs": +double: 1 +idouble: 1 + +Function: "cabs_downward": +double: 1 +idouble: 1 + +Function: "cabs_towardzero": +double: 1 +idouble: 1 + +Function: "cabs_upward": +double: 1 +idouble: 1 + +Function: Real part of "cacos": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "cacos": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "cacos_downward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: Imaginary part of "cacos_downward": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Real part of "cacos_towardzero": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: Imaginary part of "cacos_towardzero": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Real part of "cacos_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "cacos_upward": +double: 5 +float: 7 +idouble: 5 +ifloat: 7 + +Function: Real part of "cacosh": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "cacosh": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "cacosh_downward": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Imaginary part of "cacosh_downward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: Real part of "cacosh_towardzero": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Imaginary part of "cacosh_towardzero": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: Real part of "cacosh_upward": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +Function: Imaginary part of "cacosh_upward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "carg": +float: 1 +ifloat: 1 + +Function: "carg_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "carg_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "carg_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "casin": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "casin": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "casin_downward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: Imaginary part of "casin_downward": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Real part of "casin_towardzero": +double: 3 +float: 1 +idouble: 3 +ifloat: 1 + +Function: Imaginary part of "casin_towardzero": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Real part of "casin_upward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: Imaginary part of "casin_upward": +double: 5 +float: 7 +idouble: 5 +ifloat: 7 + +Function: Real part of "casinh": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "casinh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "casinh_downward": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Imaginary part of "casinh_downward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: Real part of "casinh_towardzero": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Imaginary part of "casinh_towardzero": +double: 3 +float: 1 +idouble: 3 +ifloat: 1 + +Function: Real part of "casinh_upward": +double: 5 +float: 7 +idouble: 5 +ifloat: 7 + +Function: Imaginary part of "casinh_upward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: Real part of "catan": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "catan": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "catan_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "catan_downward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "catan_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "catan_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "catan_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "catan_upward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: Real part of "catanh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "catanh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "catanh_downward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "catanh_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "catanh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "catanh_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "catanh_upward": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +Function: Imaginary part of "catanh_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "cbrt": +double: 3 +float: 1 +idouble: 3 +ifloat: 1 + +Function: "cbrt_downward": +double: 4 +float: 1 +idouble: 4 +ifloat: 1 + +Function: "cbrt_towardzero": +double: 3 +float: 1 +idouble: 3 +ifloat: 1 + +Function: "cbrt_upward": +double: 5 +float: 1 +idouble: 5 +ifloat: 1 + +Function: Real part of "ccos": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "ccos": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "ccos_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "ccos_downward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Real part of "ccos_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "ccos_towardzero": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Real part of "ccos_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "ccos_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "ccosh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "ccosh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "ccosh_downward": +double: 1 +float: 3 +idouble: 1 +ifloat: 3 + +Function: Imaginary part of "ccosh_downward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Real part of "ccosh_towardzero": +double: 1 +float: 3 +idouble: 1 +ifloat: 3 + +Function: Imaginary part of "ccosh_towardzero": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Real part of "ccosh_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "ccosh_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "cexp": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: Imaginary part of "cexp": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "cexp_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "cexp_downward": +double: 1 +float: 3 +idouble: 1 +ifloat: 3 + +Function: Real part of "cexp_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "cexp_towardzero": +double: 1 +float: 3 +idouble: 1 +ifloat: 3 + +Function: Real part of "cexp_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "cexp_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "clog": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: Imaginary part of "clog": +float: 1 +ifloat: 1 + +Function: Real part of "clog10": +double: 3 +float: 4 +idouble: 3 +ifloat: 4 + +Function: Imaginary part of "clog10": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "clog10_downward": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: Imaginary part of "clog10_downward": +double: 2 +float: 4 +idouble: 2 +ifloat: 4 + +Function: Real part of "clog10_towardzero": +double: 5 +float: 5 +idouble: 5 +ifloat: 5 + +Function: Imaginary part of "clog10_towardzero": +double: 2 +float: 4 +idouble: 2 +ifloat: 4 + +Function: Real part of "clog10_upward": +double: 6 +float: 5 +idouble: 6 +ifloat: 5 + +Function: Imaginary part of "clog10_upward": +double: 2 +float: 4 +idouble: 2 +ifloat: 4 + +Function: Real part of "clog_downward": +double: 4 +float: 3 +idouble: 4 +ifloat: 3 + +Function: Imaginary part of "clog_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "clog_towardzero": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +Function: Imaginary part of "clog_towardzero": +double: 1 +float: 3 +idouble: 1 +ifloat: 3 + +Function: Real part of "clog_upward": +double: 4 +float: 3 +idouble: 4 +ifloat: 3 + +Function: Imaginary part of "clog_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "cos": +float: 1 +ifloat: 1 + +Function: "cos_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "cos_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "cos_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "cosh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "cosh_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "cosh_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "cosh_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "cpow": +double: 2 +float: 4 +idouble: 2 +ifloat: 4 + +Function: Imaginary part of "cpow": +float: 2 +ifloat: 2 + +Function: Real part of "cpow_downward": +double: 4 +float: 8 +idouble: 4 +ifloat: 8 + +Function: Imaginary part of "cpow_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "cpow_towardzero": +double: 4 +float: 8 +idouble: 4 +ifloat: 8 + +Function: Imaginary part of "cpow_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "cpow_upward": +double: 4 +float: 1 +idouble: 4 +ifloat: 1 + +Function: Imaginary part of "cpow_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "csin": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "csin_downward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Imaginary part of "csin_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "csin_towardzero": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Imaginary part of "csin_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "csin_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "csin_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Real part of "csinh": +float: 1 +ifloat: 1 + +Function: Imaginary part of "csinh": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Real part of "csinh_downward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "csinh_downward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Real part of "csinh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "csinh_towardzero": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Real part of "csinh_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: Imaginary part of "csinh_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "csqrt": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "csqrt": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "csqrt_downward": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: Imaginary part of "csqrt_downward": +double: 4 +float: 3 +idouble: 4 +ifloat: 3 + +Function: Real part of "csqrt_towardzero": +double: 4 +float: 3 +idouble: 4 +ifloat: 3 + +Function: Imaginary part of "csqrt_towardzero": +double: 4 +float: 3 +idouble: 4 +ifloat: 3 + +Function: Real part of "csqrt_upward": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: Imaginary part of "csqrt_upward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: Real part of "ctan": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: Imaginary part of "ctan": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: Real part of "ctan_downward": +double: 6 +float: 5 +idouble: 6 +ifloat: 5 + +Function: Imaginary part of "ctan_downward": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: Real part of "ctan_towardzero": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Imaginary part of "ctan_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "ctan_upward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Imaginary part of "ctan_upward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Real part of "ctanh": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: Imaginary part of "ctanh": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Real part of "ctanh_downward": +double: 4 +float: 1 +idouble: 4 +ifloat: 1 + +Function: Imaginary part of "ctanh_downward": +double: 6 +float: 5 +idouble: 6 +ifloat: 5 + +Function: Real part of "ctanh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: Imaginary part of "ctanh_towardzero": +double: 5 +float: 3 +idouble: 5 +ifloat: 3 + +Function: Real part of "ctanh_upward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: Imaginary part of "ctanh_upward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: "erf": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "erf_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "erf_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "erf_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "erfc": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "erfc_downward": +double: 5 +float: 6 +idouble: 5 +ifloat: 6 + +Function: "erfc_towardzero": +double: 3 +float: 4 +idouble: 3 +ifloat: 4 + +Function: "erfc_upward": +double: 5 +float: 6 +idouble: 5 +ifloat: 6 + +Function: "exp": +float: 1 +ifloat: 1 + +Function: "exp10": +double: 2 +idouble: 2 + +Function: "exp10_downward": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "exp10_towardzero": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "exp10_upward": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "exp2": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "exp2_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "exp2_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "exp2_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "exp_downward": +double: 1 +idouble: 1 + +Function: "exp_towardzero": +double: 1 +idouble: 1 + +Function: "exp_upward": +double: 1 +idouble: 1 + +Function: "expm1": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "expm1_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "expm1_towardzero": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "expm1_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "gamma": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +Function: "gamma_downward": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: "gamma_towardzero": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: "gamma_upward": +double: 5 +float: 5 +idouble: 5 +ifloat: 5 + +Function: "hypot": +double: 1 +idouble: 1 + +Function: "hypot_downward": +double: 1 +idouble: 1 + +Function: "hypot_towardzero": +double: 1 +idouble: 1 + +Function: "hypot_upward": +double: 1 +idouble: 1 + +Function: "j0": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "j0_downward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: "j0_towardzero": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "j0_upward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "j1": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "j1_downward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "j1_towardzero": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "j1_upward": +double: 3 +float: 5 +idouble: 3 +ifloat: 5 + +Function: "jn": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +Function: "jn_downward": +double: 5 +float: 5 +idouble: 5 +ifloat: 5 + +Function: "jn_towardzero": +double: 5 +float: 5 +idouble: 5 +ifloat: 5 + +Function: "jn_upward": +double: 5 +float: 5 +idouble: 5 +ifloat: 5 + +Function: "lgamma": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +Function: "lgamma_downward": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: "lgamma_towardzero": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: "lgamma_upward": +double: 5 +float: 5 +idouble: 5 +ifloat: 5 + +Function: "log": +float: 1 +ifloat: 1 + +Function: "log10": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "log10_downward": +double: 2 +float: 3 +idouble: 2 +ifloat: 3 + +Function: "log10_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "log10_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "log1p": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "log1p_downward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "log1p_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "log1p_upward": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "log2": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "log2_downward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "log2_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "log2_upward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "log_downward": +float: 2 +ifloat: 2 + +Function: "log_towardzero": +float: 2 +ifloat: 2 + +Function: "log_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "pow": +float: 1 +ifloat: 1 + +Function: "pow10": +double: 2 +idouble: 2 + +Function: "pow10_downward": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "pow10_towardzero": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "pow10_upward": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "pow_downward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "pow_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "pow_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "sin": +float: 1 +ifloat: 1 + +Function: "sin_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "sin_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "sin_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "sincos": +float: 1 +ifloat: 1 + +Function: "sincos_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "sincos_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "sincos_upward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "sinh": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "sinh_downward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "sinh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "sinh_upward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "tan": +float: 1 +ifloat: 1 + +Function: "tan_downward": +double: 1 +float: 2 +idouble: 1 +ifloat: 2 + +Function: "tan_towardzero": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "tan_upward": +double: 1 +float: 1 +idouble: 1 +ifloat: 1 + +Function: "tanh": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "tanh_downward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "tanh_towardzero": +double: 2 +float: 2 +idouble: 2 +ifloat: 2 + +Function: "tanh_upward": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "tgamma": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: "tgamma_downward": +double: 5 +float: 5 +idouble: 5 +ifloat: 5 + +Function: "tgamma_towardzero": +double: 5 +float: 4 +idouble: 5 +ifloat: 4 + +Function: "tgamma_upward": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +Function: "y0": +double: 2 +float: 1 +idouble: 2 +ifloat: 1 + +Function: "y0_downward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "y0_towardzero": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "y0_upward": +double: 3 +float: 4 +idouble: 3 +ifloat: 4 + +Function: "y1": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "y1_downward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "y1_towardzero": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "y1_upward": +double: 7 +float: 2 +idouble: 7 +ifloat: 2 + +Function: "yn": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "yn_downward": +double: 3 +float: 2 +idouble: 3 +ifloat: 2 + +Function: "yn_towardzero": +double: 3 +float: 3 +idouble: 3 +ifloat: 3 + +Function: "yn_upward": +double: 4 +float: 4 +idouble: 4 +ifloat: 4 + +# end of automatic generation diff --git a/REORG.TODO/sysdeps/arm/libm-test-ulps-name b/REORG.TODO/sysdeps/arm/libm-test-ulps-name new file mode 100644 index 0000000000..74f9acfffa --- /dev/null +++ b/REORG.TODO/sysdeps/arm/libm-test-ulps-name @@ -0,0 +1 @@ +ARM diff --git a/REORG.TODO/sysdeps/arm/linkmap.h b/REORG.TODO/sysdeps/arm/linkmap.h new file mode 100644 index 0000000000..7c2b4a6da8 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/linkmap.h @@ -0,0 +1,5 @@ +struct link_map_machine + { + Elf32_Addr plt; /* Address of .plt */ + void *tlsdesc_table; /* Address of TLS descriptor hash table. */ + }; diff --git a/REORG.TODO/sysdeps/arm/machine-gmon.h b/REORG.TODO/sysdeps/arm/machine-gmon.h new file mode 100644 index 0000000000..8e9f43a662 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/machine-gmon.h @@ -0,0 +1,33 @@ +/* Machine-dependent definitions for profiling support. ARM EABI version. + Copyright (C) 2008-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + +/* GCC for the ARM cannot compile __builtin_return_address(N) for N != 0, + so we must use an assembly stub. */ + +/* We must not pollute the global namespace. */ +#define mcount_internal __mcount_internal + +extern void mcount_internal (u_long frompc, u_long selfpc) internal_function; +#define _MCOUNT_DECL(frompc, selfpc) \ + void internal_function mcount_internal (u_long frompc, u_long selfpc) + + +/* Define MCOUNT as empty since we have the implementation in another file. */ +#define MCOUNT diff --git a/REORG.TODO/sysdeps/arm/math-tests.h b/REORG.TODO/sysdeps/arm/math-tests.h new file mode 100644 index 0000000000..14f9265ad8 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/math-tests.h @@ -0,0 +1,35 @@ +/* Configuration for math tests. ARM version. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +/* On systems with VFP support, but where glibc is built for + soft-float, the libgcc functions used in libc and libm do not + support rounding modes, although fesetround succeeds, and do not + support exceptions. */ +#ifdef __SOFTFP__ +# define ROUNDING_TESTS_float(MODE) ((MODE) == FE_TONEAREST) +# define ROUNDING_TESTS_double(MODE) ((MODE) == FE_TONEAREST) +# define ROUNDING_TESTS_long_double(MODE) ((MODE) == FE_TONEAREST) +# define EXCEPTION_TESTS_float 0 +# define EXCEPTION_TESTS_double 0 +# define EXCEPTION_TESTS_long_double 0 +#endif + +/* Not all VFP implementations support trapping exceptions. */ +#define EXCEPTION_ENABLE_SUPPORTED(EXCEPT) ((EXCEPT) == 0) + +#include_next <math-tests.h> diff --git a/REORG.TODO/sysdeps/arm/math_private.h b/REORG.TODO/sysdeps/arm/math_private.h new file mode 100644 index 0000000000..d39e9ee24a --- /dev/null +++ b/REORG.TODO/sysdeps/arm/math_private.h @@ -0,0 +1,11 @@ +#ifndef ARM_MATH_PRIVATE_H +#define ARM_MATH_PRIVATE_H 1 + +/* Enable __finitel, __isinfl, and __isnanl for binary compatibility + when built without long double support. */ +#define LDBL_CLASSIFY_COMPAT 1 + +#include "fenv_private.h" +#include_next <math_private.h> + +#endif diff --git a/REORG.TODO/sysdeps/arm/memcpy.S b/REORG.TODO/sysdeps/arm/memcpy.S new file mode 100644 index 0000000000..62e48c3e4a --- /dev/null +++ b/REORG.TODO/sysdeps/arm/memcpy.S @@ -0,0 +1,320 @@ +/* Copyright (C) 2006-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + Contributed by MontaVista Software, Inc. (written by Nicolas Pitre) + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Thumb requires excessive IT insns here. */ +#define NO_THUMB +#include <sysdep.h> +#include <arm-features.h> + +/* + * Data preload for architectures that support it (ARM V5TE and above) + */ +#if (!defined (__ARM_ARCH_2__) && !defined (__ARM_ARCH_3__) \ + && !defined (__ARM_ARCH_3M__) && !defined (__ARM_ARCH_4__) \ + && !defined (__ARM_ARCH_4T__) && !defined (__ARM_ARCH_5__) \ + && !defined (__ARM_ARCH_5T__)) +#define PLD(code...) code +#else +#define PLD(code...) +#endif + +/* + * This can be used to enable code to cacheline align the source pointer. + * Experiments on tested architectures (StrongARM and XScale) didn't show + * this a worthwhile thing to do. That might be different in the future. + */ +//#define CALGN(code...) code +#define CALGN(code...) + +/* + * Endian independent macros for shifting bytes within registers. + */ +#ifndef __ARMEB__ +#define PULL lsr +#define PUSH lsl +#else +#define PULL lsl +#define PUSH lsr +#endif + + .text + .syntax unified + +/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */ + +ENTRY(memcpy) + + push {r0, r4, lr} + cfi_adjust_cfa_offset (12) + cfi_rel_offset (r4, 4) + cfi_rel_offset (lr, 8) + + cfi_remember_state + + subs r2, r2, #4 + blt 8f + ands ip, r0, #3 + PLD( pld [r1, #0] ) + bne 9f + ands ip, r1, #3 + bne 10f + +1: subs r2, r2, #(28) + push {r5 - r8} + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r5, 0) + cfi_rel_offset (r6, 4) + cfi_rel_offset (r7, 8) + cfi_rel_offset (r8, 12) + blt 5f + + CALGN( ands ip, r1, #31 ) + CALGN( rsb r3, ip, #32 ) + CALGN( sbcsne r4, r3, r2 ) @ C is always set here + CALGN( bcs 2f ) + CALGN( adr r4, 6f ) + CALGN( subs r2, r2, r3 ) @ C gets set +#ifndef ARM_ALWAYS_BX + CALGN( add pc, r4, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2)) +#else + CALGN( add r4, r4, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2)) + CALGN( bx r4 ) +#endif + + PLD( pld [r1, #0] ) +2: PLD( subs r2, r2, #96 ) + PLD( pld [r1, #28] ) + PLD( blt 4f ) + PLD( pld [r1, #60] ) + PLD( pld [r1, #92] ) + +3: PLD( pld [r1, #124] ) +4: ldmia r1!, {r3, r4, r5, r6, r7, r8, ip, lr} + subs r2, r2, #32 + stmia r0!, {r3, r4, r5, r6, r7, r8, ip, lr} + bge 3b + PLD( cmn r2, #96 ) + PLD( bge 4b ) + +5: ands ip, r2, #28 + rsb ip, ip, #32 +#ifndef ARM_ALWAYS_BX + /* C is always clear here. */ + addne pc, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + b 7f +#else + beq 7f + push {r10} + cfi_adjust_cfa_offset (4) + cfi_rel_offset (r10, 0) +0: add r10, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + /* If alignment is not perfect, then there will be some + padding (nop) instructions between this BX and label 6. + The computation above assumed that two instructions + later is exactly the right spot. */ + add r10, #(6f - (0b + PC_OFS)) + bx r10 +#endif + .p2align ARM_BX_ALIGN_LOG2 +6: nop + .p2align ARM_BX_ALIGN_LOG2 + ldr r3, [r1], #4 + .p2align ARM_BX_ALIGN_LOG2 + ldr r4, [r1], #4 + .p2align ARM_BX_ALIGN_LOG2 + ldr r5, [r1], #4 + .p2align ARM_BX_ALIGN_LOG2 + ldr r6, [r1], #4 + .p2align ARM_BX_ALIGN_LOG2 + ldr r7, [r1], #4 + .p2align ARM_BX_ALIGN_LOG2 + ldr r8, [r1], #4 + .p2align ARM_BX_ALIGN_LOG2 + ldr lr, [r1], #4 + +#ifndef ARM_ALWAYS_BX + add pc, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + nop +#else +0: add r10, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + /* If alignment is not perfect, then there will be some + padding (nop) instructions between this BX and label 66. + The computation above assumed that two instructions + later is exactly the right spot. */ + add r10, #(66f - (0b + PC_OFS)) + bx r10 +#endif + .p2align ARM_BX_ALIGN_LOG2 +66: nop + .p2align ARM_BX_ALIGN_LOG2 + str r3, [r0], #4 + .p2align ARM_BX_ALIGN_LOG2 + str r4, [r0], #4 + .p2align ARM_BX_ALIGN_LOG2 + str r5, [r0], #4 + .p2align ARM_BX_ALIGN_LOG2 + str r6, [r0], #4 + .p2align ARM_BX_ALIGN_LOG2 + str r7, [r0], #4 + .p2align ARM_BX_ALIGN_LOG2 + str r8, [r0], #4 + .p2align ARM_BX_ALIGN_LOG2 + str lr, [r0], #4 + +#ifdef ARM_ALWAYS_BX + pop {r10} + cfi_adjust_cfa_offset (-4) + cfi_restore (r10) +#endif + + CALGN( bcs 2b ) + +7: pop {r5 - r8} + cfi_adjust_cfa_offset (-16) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + cfi_restore (r8) + +8: movs r2, r2, lsl #31 + ldrbne r3, [r1], #1 + ldrbcs r4, [r1], #1 + ldrbcs ip, [r1] + strbne r3, [r0], #1 + strbcs r4, [r0], #1 + strbcs ip, [r0] + +#if ((defined (__ARM_ARCH_4T__) && defined(__THUMB_INTERWORK__)) \ + || defined (ARM_ALWAYS_BX)) + pop {r0, r4, lr} + cfi_adjust_cfa_offset (-12) + cfi_restore (r4) + cfi_restore (lr) + bx lr +#else + pop {r0, r4, pc} +#endif + + cfi_restore_state + +9: rsb ip, ip, #4 + cmp ip, #2 + ldrbgt r3, [r1], #1 + ldrbge r4, [r1], #1 + ldrb lr, [r1], #1 + strbgt r3, [r0], #1 + strbge r4, [r0], #1 + subs r2, r2, ip + strb lr, [r0], #1 + blt 8b + ands ip, r1, #3 + beq 1b + +10: bic r1, r1, #3 + cmp ip, #2 + ldr lr, [r1], #4 + beq 17f + bgt 18f + + + .macro forward_copy_shift pull push + + subs r2, r2, #28 + blt 14f + + CALGN( ands ip, r1, #31 ) + CALGN( rsb ip, ip, #32 ) + CALGN( sbcsne r4, ip, r2 ) @ C is always set here + CALGN( subcc r2, r2, ip ) + CALGN( bcc 15f ) + +11: push {r5 - r8, r10} + cfi_adjust_cfa_offset (20) + cfi_rel_offset (r5, 0) + cfi_rel_offset (r6, 4) + cfi_rel_offset (r7, 8) + cfi_rel_offset (r8, 12) + cfi_rel_offset (r10, 16) + + PLD( pld [r1, #0] ) + PLD( subs r2, r2, #96 ) + PLD( pld [r1, #28] ) + PLD( blt 13f ) + PLD( pld [r1, #60] ) + PLD( pld [r1, #92] ) + +12: PLD( pld [r1, #124] ) +13: ldmia r1!, {r4, r5, r6, r7} + mov r3, lr, PULL #\pull + subs r2, r2, #32 + ldmia r1!, {r8, r10, ip, lr} + orr r3, r3, r4, PUSH #\push + mov r4, r4, PULL #\pull + orr r4, r4, r5, PUSH #\push + mov r5, r5, PULL #\pull + orr r5, r5, r6, PUSH #\push + mov r6, r6, PULL #\pull + orr r6, r6, r7, PUSH #\push + mov r7, r7, PULL #\pull + orr r7, r7, r8, PUSH #\push + mov r8, r8, PULL #\pull + orr r8, r8, r10, PUSH #\push + mov r10, r10, PULL #\pull + orr r10, r10, ip, PUSH #\push + mov ip, ip, PULL #\pull + orr ip, ip, lr, PUSH #\push + stmia r0!, {r3, r4, r5, r6, r7, r8, r10, ip} + bge 12b + PLD( cmn r2, #96 ) + PLD( bge 13b ) + + pop {r5 - r8, r10} + cfi_adjust_cfa_offset (-20) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + cfi_restore (r8) + cfi_restore (r10) + +14: ands ip, r2, #28 + beq 16f + +15: mov r3, lr, PULL #\pull + ldr lr, [r1], #4 + subs ip, ip, #4 + orr r3, r3, lr, PUSH #\push + str r3, [r0], #4 + bgt 15b + CALGN( cmp r2, #0 ) + CALGN( bge 11b ) + +16: sub r1, r1, #(\push / 8) + b 8b + + .endm + + + forward_copy_shift pull=8 push=24 + +17: forward_copy_shift pull=16 push=16 + +18: forward_copy_shift pull=24 push=8 + +END(memcpy) +libc_hidden_builtin_def (memcpy) diff --git a/REORG.TODO/sysdeps/arm/memmove.S b/REORG.TODO/sysdeps/arm/memmove.S new file mode 100644 index 0000000000..b18aa329d3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/memmove.S @@ -0,0 +1,336 @@ +/* Copyright (C) 2006-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + Contributed by MontaVista Software, Inc. (written by Nicolas Pitre) + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Thumb requires excessive IT insns here. */ +#define NO_THUMB +#include <sysdep.h> +#include <arm-features.h> + +/* + * Data preload for architectures that support it (ARM V5TE and above) + */ +#if (!defined (__ARM_ARCH_2__) && !defined (__ARM_ARCH_3__) \ + && !defined (__ARM_ARCH_3M__) && !defined (__ARM_ARCH_4__) \ + && !defined (__ARM_ARCH_4T__) && !defined (__ARM_ARCH_5__) \ + && !defined (__ARM_ARCH_5T__)) +#define PLD(code...) code +#else +#define PLD(code...) +#endif + +/* + * This can be used to enable code to cacheline align the source pointer. + * Experiments on tested architectures (StrongARM and XScale) didn't show + * this a worthwhile thing to do. That might be different in the future. + */ +//#define CALGN(code...) code +#define CALGN(code...) + +/* + * Endian independent macros for shifting bytes within registers. + */ +#ifndef __ARMEB__ +#define PULL lsr +#define PUSH lsl +#else +#define PULL lsl +#define PUSH lsr +#endif + + .text + .syntax unified + +/* + * Prototype: void *memmove(void *dest, const void *src, size_t n); + * + * Note: + * + * If the memory regions don't overlap, we simply branch to memcpy which is + * normally a bit faster. Otherwise the copy is done going downwards. + */ + +ENTRY(memmove) + + subs ip, r0, r1 + cmphi r2, ip +#if !IS_IN (libc) + bls memcpy +#else + bls HIDDEN_JUMPTARGET(memcpy) +#endif + + push {r0, r4, lr} + cfi_adjust_cfa_offset (12) + cfi_rel_offset (r4, 4) + cfi_rel_offset (lr, 8) + + cfi_remember_state + + add r1, r1, r2 + add r0, r0, r2 + subs r2, r2, #4 + blt 8f + ands ip, r0, #3 + PLD( pld [r1, #-4] ) + bne 9f + ands ip, r1, #3 + bne 10f + +1: subs r2, r2, #(28) + push {r5 - r8} + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r5, 0) + cfi_rel_offset (r6, 4) + cfi_rel_offset (r7, 8) + cfi_rel_offset (r8, 12) + blt 5f + + CALGN( ands ip, r1, #31 ) + CALGN( sbcsne r4, ip, r2 ) @ C is always set here + CALGN( bcs 2f ) + CALGN( adr r4, 6f ) + CALGN( subs r2, r2, ip ) @ C is set here +#ifndef ARM_ALWAYS_BX + CALGN( add pc, r4, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2)) +#else + CALGN( add r4, r4, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2)) + CALGN( bx r4 ) +#endif + + PLD( pld [r1, #-4] ) +2: PLD( subs r2, r2, #96 ) + PLD( pld [r1, #-32] ) + PLD( blt 4f ) + PLD( pld [r1, #-64] ) + PLD( pld [r1, #-96] ) + +3: PLD( pld [r1, #-128] ) +4: ldmdb r1!, {r3, r4, r5, r6, r7, r8, ip, lr} + subs r2, r2, #32 + stmdb r0!, {r3, r4, r5, r6, r7, r8, ip, lr} + bge 3b + PLD( cmn r2, #96 ) + PLD( bge 4b ) + +5: ands ip, r2, #28 + rsb ip, ip, #32 +#ifndef ARM_ALWAYS_BX + /* C is always clear here. */ + addne pc, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + b 7f +#else + beq 7f + push {r10} + cfi_adjust_cfa_offset (4) + cfi_rel_offset (r10, 0) +0: add r10, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + /* If alignment is not perfect, then there will be some + padding (nop) instructions between this BX and label 6. + The computation above assumed that two instructions + later is exactly the right spot. */ + add r10, #(6f - (0b + PC_OFS)) + bx r10 +#endif + .p2align ARM_BX_ALIGN_LOG2 +6: nop + .p2align ARM_BX_ALIGN_LOG2 + ldr r3, [r1, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + ldr r4, [r1, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + ldr r5, [r1, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + ldr r6, [r1, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + ldr r7, [r1, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + ldr r8, [r1, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + ldr lr, [r1, #-4]! + +#ifndef ARM_ALWAYS_BX + add pc, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + nop +#else +0: add r10, pc, ip, lsl #(ARM_BX_ALIGN_LOG2 - 2) + /* If alignment is not perfect, then there will be some + padding (nop) instructions between this BX and label 66. + The computation above assumed that two instructions + later is exactly the right spot. */ + add r10, #(66f - (0b + PC_OFS)) + bx r10 +#endif + .p2align ARM_BX_ALIGN_LOG2 +66: nop + .p2align ARM_BX_ALIGN_LOG2 + str r3, [r0, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + str r4, [r0, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + str r5, [r0, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + str r6, [r0, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + str r7, [r0, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + str r8, [r0, #-4]! + .p2align ARM_BX_ALIGN_LOG2 + str lr, [r0, #-4]! + +#ifdef ARM_ALWAYS_BX + pop {r10} + cfi_adjust_cfa_offset (-4) + cfi_restore (r10) +#endif + + CALGN( bcs 2b ) + +7: pop {r5 - r8} + cfi_adjust_cfa_offset (-16) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + cfi_restore (r8) + +8: movs r2, r2, lsl #31 + ldrbne r3, [r1, #-1]! + ldrbcs r4, [r1, #-1]! + ldrbcs ip, [r1, #-1] + strbne r3, [r0, #-1]! + strbcs r4, [r0, #-1]! + strbcs ip, [r0, #-1] + +#if ((defined (__ARM_ARCH_4T__) && defined (__THUMB_INTERWORK__)) \ + || defined (ARM_ALWAYS_BX)) + pop {r0, r4, lr} + cfi_adjust_cfa_offset (-12) + cfi_restore (r4) + cfi_restore (lr) + bx lr +#else + pop {r0, r4, pc} +#endif + + cfi_restore_state + +9: cmp ip, #2 + ldrbgt r3, [r1, #-1]! + ldrbge r4, [r1, #-1]! + ldrb lr, [r1, #-1]! + strbgt r3, [r0, #-1]! + strbge r4, [r0, #-1]! + subs r2, r2, ip + strb lr, [r0, #-1]! + blt 8b + ands ip, r1, #3 + beq 1b + +10: bic r1, r1, #3 + cmp ip, #2 + ldr r3, [r1, #0] + beq 17f + blt 18f + + + .macro backward_copy_shift push pull + + subs r2, r2, #28 + blt 14f + + CALGN( ands ip, r1, #31 ) + CALGN( rsb ip, ip, #32 ) + CALGN( sbcsne r4, ip, r2 ) @ C is always set here + CALGN( subcc r2, r2, ip ) + CALGN( bcc 15f ) + +11: push {r5 - r8, r10} + cfi_adjust_cfa_offset (20) + cfi_rel_offset (r5, 0) + cfi_rel_offset (r6, 4) + cfi_rel_offset (r7, 8) + cfi_rel_offset (r8, 12) + cfi_rel_offset (r10, 16) + + PLD( pld [r1, #-4] ) + PLD( subs r2, r2, #96 ) + PLD( pld [r1, #-32] ) + PLD( blt 13f ) + PLD( pld [r1, #-64] ) + PLD( pld [r1, #-96] ) + +12: PLD( pld [r1, #-128] ) +13: ldmdb r1!, {r7, r8, r10, ip} + mov lr, r3, PUSH #\push + subs r2, r2, #32 + ldmdb r1!, {r3, r4, r5, r6} + orr lr, lr, ip, PULL #\pull + mov ip, ip, PUSH #\push + orr ip, ip, r10, PULL #\pull + mov r10, r10, PUSH #\push + orr r10, r10, r8, PULL #\pull + mov r8, r8, PUSH #\push + orr r8, r8, r7, PULL #\pull + mov r7, r7, PUSH #\push + orr r7, r7, r6, PULL #\pull + mov r6, r6, PUSH #\push + orr r6, r6, r5, PULL #\pull + mov r5, r5, PUSH #\push + orr r5, r5, r4, PULL #\pull + mov r4, r4, PUSH #\push + orr r4, r4, r3, PULL #\pull + stmdb r0!, {r4 - r8, r10, ip, lr} + bge 12b + PLD( cmn r2, #96 ) + PLD( bge 13b ) + + pop {r5 - r8, r10} + cfi_adjust_cfa_offset (-20) + cfi_restore (r5) + cfi_restore (r6) + cfi_restore (r7) + cfi_restore (r8) + cfi_restore (r10) + +14: ands ip, r2, #28 + beq 16f + +15: mov lr, r3, PUSH #\push + ldr r3, [r1, #-4]! + subs ip, ip, #4 + orr lr, lr, r3, PULL #\pull + str lr, [r0, #-4]! + bgt 15b + CALGN( cmp r2, #0 ) + CALGN( bge 11b ) + +16: add r1, r1, #(\pull / 8) + b 8b + + .endm + + + backward_copy_shift push=8 pull=24 + +17: backward_copy_shift push=16 pull=16 + +18: backward_copy_shift push=24 pull=8 + + +END(memmove) +libc_hidden_builtin_def (memmove) diff --git a/REORG.TODO/sysdeps/arm/memset.S b/REORG.TODO/sysdeps/arm/memset.S new file mode 100644 index 0000000000..95946360bf --- /dev/null +++ b/REORG.TODO/sysdeps/arm/memset.S @@ -0,0 +1,69 @@ +/* Copyright (C) 1998-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Philip Blundell <philb@gnu.org> + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Thumb requires excessive IT insns here. */ +#define NO_THUMB +#include <sysdep.h> + + .text + .syntax unified + +/* void *memset (dstpp, c, len) */ + +ENTRY(memset) + mov r3, r0 + cmp r2, #8 + bcc 2f @ less than 8 bytes to move + +1: + tst r3, #3 @ aligned yet? + strbne r1, [r3], #1 + subne r2, r2, #1 + bne 1b + + and r1, r1, #255 @ clear any sign bits + orr r1, r1, r1, lsl $8 + orr r1, r1, r1, lsl $16 + mov ip, r1 + +1: + subs r2, r2, #8 + stmiacs r3!, {r1, ip} @ store up to 32 bytes per loop iteration + subscs r2, r2, #8 + stmiacs r3!, {r1, ip} + subscs r2, r2, #8 + stmiacs r3!, {r1, ip} + subscs r2, r2, #8 + stmiacs r3!, {r1, ip} + bcs 1b + + and r2, r2, #7 +2: + subs r2, r2, #1 @ store up to 4 bytes per loop iteration + strbcs r1, [r3], #1 + subscs r2, r2, #1 + strbcs r1, [r3], #1 + subscs r2, r2, #1 + strbcs r1, [r3], #1 + subscs r2, r2, #1 + strbcs r1, [r3], #1 + bcs 2b + + DO_RET(lr) +END(memset) +libc_hidden_builtin_def (memset) diff --git a/REORG.TODO/sysdeps/arm/memusage.h b/REORG.TODO/sysdeps/arm/memusage.h new file mode 100644 index 0000000000..7592202fc3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/memusage.h @@ -0,0 +1,20 @@ +/* Copyright (C) 2000-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#define GETSP() ({ register uintptr_t stack_ptr asm ("sp"); stack_ptr; }) + +#include <sysdeps/generic/memusage.h> diff --git a/REORG.TODO/sysdeps/arm/nptl-aeabi_unwind_cpp_pr1.c b/REORG.TODO/sysdeps/arm/nptl-aeabi_unwind_cpp_pr1.c new file mode 100644 index 0000000000..7b83522437 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/nptl-aeabi_unwind_cpp_pr1.c @@ -0,0 +1 @@ +#include <aeabi_unwind_cpp_pr1.c> diff --git a/REORG.TODO/sysdeps/arm/nptl/Makefile b/REORG.TODO/sysdeps/arm/nptl/Makefile new file mode 100644 index 0000000000..34eec51bc1 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/nptl/Makefile @@ -0,0 +1,33 @@ +# Copyright (C) 2005-2017 Free Software Foundation, Inc. +# This file is part of the GNU C Library. +# +# The GNU C Library is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 2.1 of the License, or (at your option) any later version. +# +# The GNU C Library is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public +# License along with the GNU C Library. If not, see +# <http://www.gnu.org/licenses/>. + +ifeq ($(subdir),csu) +gen-as-const-headers += tcb-offsets.sym +endif + +ifeq ($(subdir),nptl) +libpthread-sysdep_routines += nptl-aeabi_unwind_cpp_pr1 +libpthread-shared-only-routines += nptl-aeabi_unwind_cpp_pr1 + +# This test relies on compiling part of the binary with EH information, +# part without, and unwinding through. The .ARM.exidx tables have +# start addresses for EH regions, but no end addresses. Every +# region an exception needs to propogate through must have unwind +# information, or a previous function's unwind table may be used +# by mistake. +tests := $(filter-out tst-cleanupx4,$(tests)) +endif diff --git a/REORG.TODO/sysdeps/arm/nptl/bits/pthreadtypes-arch.h b/REORG.TODO/sysdeps/arm/nptl/bits/pthreadtypes-arch.h new file mode 100644 index 0000000000..3f9eca4645 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/nptl/bits/pthreadtypes-arch.h @@ -0,0 +1,69 @@ +/* Copyright (C) 2002-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _BITS_PTHREADTYPES_ARCH_H +#define _BITS_PTHREADTYPES_ARCH_H 1 + +#include <endian.h> + +#define __SIZEOF_PTHREAD_ATTR_T 36 +#define __SIZEOF_PTHREAD_MUTEX_T 24 +#define __SIZEOF_PTHREAD_MUTEXATTR_T 4 +#define __SIZEOF_PTHREAD_COND_T 48 +#define __SIZEOF_PTHREAD_CONDATTR_T 4 +#define __SIZEOF_PTHREAD_RWLOCK_T 32 +#define __SIZEOF_PTHREAD_RWLOCKATTR_T 8 +#define __SIZEOF_PTHREAD_BARRIER_T 20 +#define __SIZEOF_PTHREAD_BARRIERATTR_T 4 + +/* Data structure for mutex handling. */ +#define __PTHREAD_COMPAT_PADDING_MID +#define __PTHREAD_COMPAT_PADDING_END +#define __PTHREAD_MUTEX_LOCK_ELISION 0 + +#define __LOCK_ALIGNMENT +#define __ONCE_ALIGNMENT + +struct __pthread_rwlock_arch_t +{ + unsigned int __readers; + unsigned int __writers; + unsigned int __wrphase_futex; + unsigned int __writers_futex; + unsigned int __pad3; + unsigned int __pad4; +#if __BYTE_ORDER == __BIG_ENDIAN + unsigned char __pad1; + unsigned char __pad2; + unsigned char __shared; + /* FLAGS must stay at this position in the structure to maintain + binary compatibility. */ + unsigned char __flags; +#else + /* FLAGS must stay at this position in the structure to maintain + binary compatibility. */ + unsigned char __flags; + unsigned char __shared; + unsigned char __pad1; + unsigned char __pad2; +#endif + int __cur_writer; +}; + +#define __PTHREAD_RWLOCK_ELISION_EXTRA 0 + +#endif /* bits/pthreadtypes.h */ diff --git a/REORG.TODO/sysdeps/arm/nptl/bits/semaphore.h b/REORG.TODO/sysdeps/arm/nptl/bits/semaphore.h new file mode 100644 index 0000000000..0b0844994b --- /dev/null +++ b/REORG.TODO/sysdeps/arm/nptl/bits/semaphore.h @@ -0,0 +1,34 @@ +/* Copyright (C) 2002-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _SEMAPHORE_H +# error "Never use <bits/semaphore.h> directly; include <semaphore.h> instead." +#endif + + +#define __SIZEOF_SEM_T 16 + + +/* Value returned if `sem_open' failed. */ +#define SEM_FAILED ((sem_t *) 0) + + +typedef union +{ + char __size[__SIZEOF_SEM_T]; + long int __align; +} sem_t; diff --git a/REORG.TODO/sysdeps/arm/nptl/pthreaddef.h b/REORG.TODO/sysdeps/arm/nptl/pthreaddef.h new file mode 100644 index 0000000000..f69e0ffa65 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/nptl/pthreaddef.h @@ -0,0 +1,41 @@ +/* Copyright (C) 2002-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Default stack size. */ +#define ARCH_STACK_DEFAULT_SIZE (2 * 1024 * 1024) + +/* Required stack pointer alignment at beginning. SSE requires 16 + bytes. */ +#define STACK_ALIGN 16 + +/* Minimal stack size after allocating thread descriptor and guard size. */ +#define MINIMAL_REST_STACK 2048 + +/* Alignment requirement for TCB. */ +#define TCB_ALIGNMENT 16 + + +/* Location of current stack frame. + + __builtin_frame_address (0) returns the value of the hard frame + pointer, which will point at the location of the saved PC on the + stack. Below this in memory is the remainder of the linkage info, + occupying 12 bytes. Therefore in order to address from + CURRENT_STACK_FRAME using "struct layout", we need to have the macro + return the hard FP minus 12. Of course, this makes no sense + without the obsolete APCS stack layout... */ +#define CURRENT_STACK_FRAME (__builtin_frame_address (0) - 12) diff --git a/REORG.TODO/sysdeps/arm/nptl/tcb-offsets.sym b/REORG.TODO/sysdeps/arm/nptl/tcb-offsets.sym new file mode 100644 index 0000000000..bf9c0a1c17 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/nptl/tcb-offsets.sym @@ -0,0 +1,10 @@ +#include <sysdep.h> +#include <tls.h> + +-- + +-- Derive offsets relative to the thread register. +#define thread_offsetof(mem) (long)(offsetof(struct pthread, mem) - sizeof(struct pthread)) + +MULTIPLE_THREADS_OFFSET thread_offsetof (header.multiple_threads) +TID_OFFSET thread_offsetof (tid) diff --git a/REORG.TODO/sysdeps/arm/nptl/tls.h b/REORG.TODO/sysdeps/arm/nptl/tls.h new file mode 100644 index 0000000000..43e9164897 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/nptl/tls.h @@ -0,0 +1,127 @@ +/* Definition for thread-local data handling. NPTL/ARM version. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _ARM_NPTL_TLS_H +#define _ARM_NPTL_TLS_H 1 + +#include <dl-sysdep.h> + +#ifndef __ASSEMBLER__ +# include <stdbool.h> +# include <stddef.h> +# include <stdint.h> +# include <dl-dtv.h> + +#else /* __ASSEMBLER__ */ +# include <tcb-offsets.h> +#endif /* __ASSEMBLER__ */ + + +#ifndef __ASSEMBLER__ + +/* The TP points to the start of the thread blocks. */ +# define TLS_DTV_AT_TP 1 +# define TLS_TCB_AT_TP 0 + +/* Get the thread descriptor definition. */ +# include <nptl/descr.h> + +typedef struct +{ + dtv_t *dtv; + void *private; +} tcbhead_t; + +/* This is the size of the initial TCB. */ +# define TLS_INIT_TCB_SIZE sizeof (tcbhead_t) + +/* Alignment requirements for the initial TCB. */ +# define TLS_INIT_TCB_ALIGN 16 + +/* This is the size of the TCB. */ +# define TLS_TCB_SIZE sizeof (tcbhead_t) + +/* This is the size we need before TCB. */ +# define TLS_PRE_TCB_SIZE sizeof (struct pthread) + +/* Alignment requirements for the TCB. */ +# define TLS_TCB_ALIGN 16 + +/* Install the dtv pointer. The pointer passed is to the element with + index -1 which contain the length. */ +# define INSTALL_DTV(tcbp, dtvp) \ + (((tcbhead_t *) (tcbp))->dtv = (dtvp) + 1) + +/* Install new dtv for current thread. */ +# define INSTALL_NEW_DTV(dtv) \ + (THREAD_DTV() = (dtv)) + +/* Return dtv of given thread descriptor. */ +# define GET_DTV(tcbp) \ + (((tcbhead_t *) (tcbp))->dtv) + +# define TLS_DEFINE_INIT_TP(tp, pd) void *tp = (pd) + 1 + +/* Return the address of the dtv for the current thread. */ +# define THREAD_DTV() \ + (((tcbhead_t *) __builtin_thread_pointer ())->dtv) + +/* Return the thread descriptor for the current thread. */ +# define THREAD_SELF \ + ((struct pthread *)__builtin_thread_pointer () - 1) + +/* Magic for libthread_db to know how to do THREAD_SELF. */ +# define DB_THREAD_SELF \ + CONST_THREAD_AREA (32, sizeof (struct pthread)) + +/* Access to data in the thread descriptor is easy. */ +#define THREAD_GETMEM(descr, member) \ + descr->member +#define THREAD_GETMEM_NC(descr, member, idx) \ + descr->member[idx] +#define THREAD_SETMEM(descr, member, value) \ + descr->member = (value) +#define THREAD_SETMEM_NC(descr, member, idx, value) \ + descr->member[idx] = (value) + +/* Get and set the global scope generation counter in struct pthread. */ +#define THREAD_GSCOPE_FLAG_UNUSED 0 +#define THREAD_GSCOPE_FLAG_USED 1 +#define THREAD_GSCOPE_FLAG_WAIT 2 +#define THREAD_GSCOPE_RESET_FLAG() \ + do \ + { int __res \ + = atomic_exchange_rel (&THREAD_SELF->header.gscope_flag, \ + THREAD_GSCOPE_FLAG_UNUSED); \ + if (__res == THREAD_GSCOPE_FLAG_WAIT) \ + lll_futex_wake (&THREAD_SELF->header.gscope_flag, 1, LLL_PRIVATE); \ + } \ + while (0) +#define THREAD_GSCOPE_SET_FLAG() \ + do \ + { \ + THREAD_SELF->header.gscope_flag = THREAD_GSCOPE_FLAG_USED; \ + atomic_write_barrier (); \ + } \ + while (0) +#define THREAD_GSCOPE_WAIT() \ + GL(dl_wait_lookup_done) () + +#endif /* __ASSEMBLER__ */ + +#endif /* tls.h */ diff --git a/REORG.TODO/sysdeps/arm/preconfigure b/REORG.TODO/sysdeps/arm/preconfigure new file mode 100644 index 0000000000..33e9501c4f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/preconfigure @@ -0,0 +1,55 @@ +# This file is generated from configure.ac by Autoconf. DO NOT EDIT! + # Local preconfigure fragment for sysdeps/arm + +case "$machine" in +arm*) + # If the compiler enables unwind tables by default, this causes + # problems with undefined symbols in -nostdlib link tests. To + # avoid this, add -fno-unwind-tables here and remove it in + # sysdeps/arm/configure.ac after those tests have been run. + if test "${CFLAGS+set}" != "set"; then + CFLAGS="-g -O2" + fi + CFLAGS="$CFLAGS -fno-unwind-tables" + + base_machine=arm + # Lets ask the compiler which ARM family we've got + # Unfortunately it doesn't define any flags for implementations + # that you might pass to -mcpu or -mtune + # Note if you add patterns here you must ensure that + # an appropriate directory exists in sysdeps/arm + archcppflag=`$CC $CFLAGS $CPPFLAGS -E -dM -xc /dev/null | + sed -n 's/^#define \(__ARM_ARCH_[0-9].*__\) .*$/\1/p'` + + case "x$archcppflag" in + x__ARM_ARCH_89*__) + machine=armv7 + { $as_echo "$as_me:${as_lineno-$LINENO}: Found compiler is configured for something newer than v7 - using v7" >&5 +$as_echo "$as_me: Found compiler is configured for something newer than v7 - using v7" >&6;} + ;; + + x__ARM_ARCH_7A__) + machine=armv7 + { $as_echo "$as_me:${as_lineno-$LINENO}: Found compiler is configured for $machine" >&5 +$as_echo "$as_me: Found compiler is configured for $machine" >&6;} + ;; + + x__ARM_ARCH_6T2__) + machine=armv6t2 + { $as_echo "$as_me:${as_lineno-$LINENO}: Found compiler is configured for $machine" >&5 +$as_echo "$as_me: Found compiler is configured for $machine" >&6;} + ;; + x__ARM_ARCH_6*__) + machine=armv6 + { $as_echo "$as_me:${as_lineno-$LINENO}: Found compiler is configured for $machine" >&5 +$as_echo "$as_me: Found compiler is configured for $machine" >&6;} + ;; + *) + machine=arm + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: arm/preconfigure: Did not find ARM architecture type; using default" >&5 +$as_echo "$as_me: WARNING: arm/preconfigure: Did not find ARM architecture type; using default" >&2;} + ;; + esac + + machine=arm/$machine +esac diff --git a/REORG.TODO/sysdeps/arm/preconfigure.ac b/REORG.TODO/sysdeps/arm/preconfigure.ac new file mode 100644 index 0000000000..20de5bcfea --- /dev/null +++ b/REORG.TODO/sysdeps/arm/preconfigure.ac @@ -0,0 +1,50 @@ +GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory. +# Local preconfigure fragment for sysdeps/arm + +case "$machine" in +arm*) + # If the compiler enables unwind tables by default, this causes + # problems with undefined symbols in -nostdlib link tests. To + # avoid this, add -fno-unwind-tables here and remove it in + # sysdeps/arm/configure.ac after those tests have been run. + if test "${CFLAGS+set}" != "set"; then + CFLAGS="-g -O2" + fi + CFLAGS="$CFLAGS -fno-unwind-tables" + + base_machine=arm + # Lets ask the compiler which ARM family we've got + # Unfortunately it doesn't define any flags for implementations + # that you might pass to -mcpu or -mtune + # Note if you add patterns here you must ensure that + # an appropriate directory exists in sysdeps/arm + archcppflag=`$CC $CFLAGS $CPPFLAGS -E -dM -xc /dev/null | + sed -n 's/^#define \(__ARM_ARCH_[0-9].*__\) .*$/\1/p'` + + case "x$archcppflag" in + x__ARM_ARCH_[89]*__) + machine=armv7 + AC_MSG_NOTICE([Found compiler is configured for something newer than v7 - using v7]) + ;; + + x__ARM_ARCH_7A__) + machine=armv7 + AC_MSG_NOTICE([Found compiler is configured for $machine]) + ;; + + x__ARM_ARCH_6T2__) + machine=armv6t2 + AC_MSG_NOTICE([Found compiler is configured for $machine]) + ;; + x__ARM_ARCH_6*__) + machine=armv6 + AC_MSG_NOTICE([Found compiler is configured for $machine]) + ;; + *) + machine=arm + AC_MSG_WARN([arm/preconfigure: Did not find ARM architecture type; using default]) + ;; + esac + + machine=arm/$machine +esac diff --git a/REORG.TODO/sysdeps/arm/pt-arm-unwind-resume.S b/REORG.TODO/sysdeps/arm/pt-arm-unwind-resume.S new file mode 100644 index 0000000000..7cb555c02b --- /dev/null +++ b/REORG.TODO/sysdeps/arm/pt-arm-unwind-resume.S @@ -0,0 +1,2 @@ +#define __libgcc_s_init pthread_cancel_init +#include <arm-unwind-resume.S> diff --git a/REORG.TODO/sysdeps/arm/rt-aeabi_unwind_cpp_pr1.c b/REORG.TODO/sysdeps/arm/rt-aeabi_unwind_cpp_pr1.c new file mode 100644 index 0000000000..7b83522437 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/rt-aeabi_unwind_cpp_pr1.c @@ -0,0 +1 @@ +#include <aeabi_unwind_cpp_pr1.c> diff --git a/REORG.TODO/sysdeps/arm/rt-arm-unwind-resume.S b/REORG.TODO/sysdeps/arm/rt-arm-unwind-resume.S new file mode 100644 index 0000000000..9144b0ce88 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/rt-arm-unwind-resume.S @@ -0,0 +1 @@ +#include <arm-unwind-resume.S> diff --git a/REORG.TODO/sysdeps/arm/rtld-global-offsets.sym b/REORG.TODO/sysdeps/arm/rtld-global-offsets.sym new file mode 100644 index 0000000000..ff4e97f2a6 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/rtld-global-offsets.sym @@ -0,0 +1,7 @@ +#define SHARED 1 + +#include <ldsodefs.h> + +#define rtld_global_ro_offsetof(mem) offsetof (struct rtld_global_ro, mem) + +RTLD_GLOBAL_RO_DL_HWCAP_OFFSET rtld_global_ro_offsetof (_dl_hwcap) diff --git a/REORG.TODO/sysdeps/arm/s_fma.c b/REORG.TODO/sysdeps/arm/s_fma.c new file mode 100644 index 0000000000..dc4e27bfc1 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/s_fma.c @@ -0,0 +1,5 @@ +#ifdef __SOFTFP__ +# include <soft-fp/fmadf4.c> +#else +# include <sysdeps/ieee754/dbl-64/s_fma.c> +#endif diff --git a/REORG.TODO/sysdeps/arm/s_fmaf.c b/REORG.TODO/sysdeps/arm/s_fmaf.c new file mode 100644 index 0000000000..550d8b85d3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/s_fmaf.c @@ -0,0 +1,5 @@ +#ifdef __SOFTFP__ +# include <soft-fp/fmasf4.c> +#else +# include <sysdeps/ieee754/dbl-64/s_fmaf.c> +#endif diff --git a/REORG.TODO/sysdeps/arm/setfpucw.c b/REORG.TODO/sysdeps/arm/setfpucw.c new file mode 100644 index 0000000000..e6c075e620 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/setfpucw.c @@ -0,0 +1,43 @@ +/* Set the FPU control word. + Copyright (C) 1996-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <math.h> +#include <fpu_control.h> +#include <arm-features.h> + + +void +__setfpucw (fpu_control_t set) +{ + fpu_control_t fpscr, new_fpscr; + + /* Do nothing if a VFP unit isn't present. */ + if (!ARM_HAVE_VFP) + return; + + _FPU_GETCW (fpscr); + + /* Preserve the reserved bits, and set the rest as the user + specified (or the default, if the user gave zero). */ + new_fpscr = fpscr & _FPU_RESERVED; + new_fpscr |= set & ~_FPU_RESERVED; + + /* Write FPSCR if changed. */ + if (new_fpscr != fpscr) + _FPU_SETCW (fpscr); +} diff --git a/REORG.TODO/sysdeps/arm/setjmp.S b/REORG.TODO/sysdeps/arm/setjmp.S new file mode 100644 index 0000000000..2235890936 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/setjmp.S @@ -0,0 +1,101 @@ +/* setjmp for ARM. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> +#include <stap-probe.h> +#include <bits/setjmp.h> +#include <rtld-global-offsets.h> +#include <arm-features.h> + +ENTRY (__sigsetjmp) +#ifdef PTR_MANGLE + PTR_MANGLE_LOAD (a3, ip) +#endif + mov ip, r0 + + /* setjmp probe expects sigsetjmp first argument (4@r0), second + argument (-4@r1), and target address (4@r14), respectively. */ + LIBC_PROBE (setjmp, 3, 4@r0, -4@r1, 4@r14) + + /* Save sp and lr */ +#ifdef PTR_MANGLE + mov a4, sp + PTR_MANGLE2 (a4, a4, a3) + str a4, [ip], #4 + PTR_MANGLE2 (a4, lr, a3) + str a4, [ip], #4 +#else + str sp, [ip], #4 + str lr, [ip], #4 +#endif + /* Save registers */ + stmia ip!, JMP_BUF_REGLIST + +#if !defined ARM_ASSUME_NO_IWMMXT || defined __SOFTFP__ +# define NEED_HWCAP 1 +#endif + +#ifdef NEED_HWCAP + /* Check if we have a VFP unit. */ +# if IS_IN (rtld) + LDST_PCREL (ldr, a3, a4, \ + C_SYMBOL_NAME(_rtld_local_ro) \ + + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET) +# else +# ifdef SHARED + LDR_GLOBAL (a3, a4, C_SYMBOL_NAME(_rtld_global_ro), \ + RTLD_GLOBAL_RO_DL_HWCAP_OFFSET) +# else + LDR_GLOBAL (a3, a4, C_SYMBOL_NAME(_dl_hwcap), 0) +# endif +# endif +#endif + +#ifdef __SOFTFP__ + tst a3, #HWCAP_ARM_VFP + beq .Lno_vfp +#endif + + /* Store the VFP registers. + Don't use VFP instructions directly because this code + is used in non-VFP multilibs. */ + /* Following instruction is vstmia ip!, {d8-d15}. */ + stc p11, cr8, [ip], #64 +.Lno_vfp: + +#ifndef ARM_ASSUME_NO_IWMMXT + tst a3, #HWCAP_ARM_IWMMXT + beq .Lno_iwmmxt + + /* Save the call-preserved iWMMXt registers. */ + /* Following instructions are wstrd wr10, [ip], #8 (etc.) */ + stcl p1, cr10, [r12], #8 + stcl p1, cr11, [r12], #8 + stcl p1, cr12, [r12], #8 + stcl p1, cr13, [r12], #8 + stcl p1, cr14, [r12], #8 + stcl p1, cr15, [r12], #8 +.Lno_iwmmxt: +#endif + + /* Make a tail call to __sigjmp_save; it takes the same args. */ + B PLTJMP(C_SYMBOL_NAME(__sigjmp_save)) + +END (__sigsetjmp) + +hidden_def (__sigsetjmp) diff --git a/REORG.TODO/sysdeps/arm/sfp-machine.h b/REORG.TODO/sysdeps/arm/sfp-machine.h new file mode 100644 index 0000000000..2e7c156294 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/sfp-machine.h @@ -0,0 +1,51 @@ +#define _FP_W_TYPE_SIZE 32 +#define _FP_W_TYPE unsigned long +#define _FP_WS_TYPE signed long +#define _FP_I_TYPE long + +#define _FP_MUL_MEAT_S(R,X,Y) \ + _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) +#define _FP_MUL_MEAT_D(R,X,Y) \ + _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) +#define _FP_MUL_MEAT_Q(R,X,Y) \ + _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) + +#define _FP_MUL_MEAT_DW_S(R,X,Y) \ + _FP_MUL_MEAT_DW_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm) +#define _FP_MUL_MEAT_DW_D(R,X,Y) \ + _FP_MUL_MEAT_DW_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm) +#define _FP_MUL_MEAT_DW_Q(R,X,Y) \ + _FP_MUL_MEAT_DW_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm) + +#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y) +#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y) +#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y) + +#define _FP_NANFRAC_S _FP_QNANBIT_S +#define _FP_NANFRAC_D _FP_QNANBIT_D, 0 +#define _FP_NANFRAC_Q _FP_QNANBIT_Q, 0, 0, 0 +#define _FP_NANSIGN_S 0 +#define _FP_NANSIGN_D 0 +#define _FP_NANSIGN_Q 0 + +#define _FP_KEEPNANFRACP 1 +#define _FP_QNANNEGATEDP 0 + +/* Someone please check this. */ +#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \ + do { \ + if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \ + && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \ + { \ + R##_s = Y##_s; \ + _FP_FRAC_COPY_##wc(R,Y); \ + } \ + else \ + { \ + R##_s = X##_s; \ + _FP_FRAC_COPY_##wc(R,X); \ + } \ + R##_c = FP_CLS_NAN; \ + } while (0) + +#define _FP_TININESS_AFTER_ROUNDING 0 diff --git a/REORG.TODO/sysdeps/arm/sotruss-lib.c b/REORG.TODO/sysdeps/arm/sotruss-lib.c new file mode 100644 index 0000000000..467bb26f38 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/sotruss-lib.c @@ -0,0 +1,49 @@ +/* Override generic sotruss-lib.c to define actual functions for ARM. + Copyright (C) 2012-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#define HAVE_ARCH_PLTENTER +#define HAVE_ARCH_PLTEXIT + +#include <elf/sotruss-lib.c> + +ElfW(Addr) +la_arm_gnu_pltenter (ElfW(Sym) *sym __attribute__ ((unused)), + unsigned int ndx __attribute__ ((unused)), + uintptr_t *refcook, uintptr_t *defcook, + La_arm_regs *regs, unsigned int *flags, + const char *symname, long int *framesizep) +{ + print_enter (refcook, defcook, symname, + regs->lr_reg[0], regs->lr_reg[1], regs->lr_reg[2], + *flags); + + /* No need to copy anything, we will not need the parameters in any case. */ + *framesizep = 0; + + return sym->st_value; +} + +unsigned int +la_arm_gnu_pltexit (ElfW(Sym) *sym, unsigned int ndx, uintptr_t *refcook, + uintptr_t *defcook, const struct La_arm_regs *inregs, + struct La_arm_retval *outregs, const char *symname) +{ + print_exit (refcook, defcook, symname, outregs->lrv_reg[0]); + + return 0; +} diff --git a/REORG.TODO/sysdeps/arm/stackinfo.h b/REORG.TODO/sysdeps/arm/stackinfo.h new file mode 100644 index 0000000000..8cec761ab8 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/stackinfo.h @@ -0,0 +1,33 @@ +/* Copyright (C) 2001-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* This file contains a bit of information about the stack allocation + of the processor. */ + +#ifndef _STACKINFO_H +#define _STACKINFO_H 1 + +#include <elf.h> + +/* On Arm the stack grows down. */ +#define _STACK_GROWS_DOWN 1 + +/* Default to an executable stack. PF_X can be overridden if PT_GNU_STACK is + * present, but it is presumed absent. */ +#define DEFAULT_STACK_PERMS (PF_R|PF_W|PF_X) + +#endif /* stackinfo.h */ diff --git a/REORG.TODO/sysdeps/arm/start.S b/REORG.TODO/sysdeps/arm/start.S new file mode 100644 index 0000000000..4973878f05 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/start.S @@ -0,0 +1,148 @@ +/* Startup code for ARM & ELF + Copyright (C) 1995-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + In addition to the permissions in the GNU Lesser General Public + License, the Free Software Foundation gives you unlimited + permission to link the compiled version of this file with other + programs, and to distribute those programs without any restriction + coming from the use of this file. (The GNU Lesser General Public + License restrictions do apply in other respects; for example, they + cover modification of the file, and distribution when not linked + into another program.) + + Note that people who make modified versions of this file are not + obligated to grant this special exception for their modified + versions; it is their choice whether to do so. The GNU Lesser + General Public License gives permission to release a modified + version without this exception; this exception also makes it + possible to release a modified version which carries forward this + exception. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* This is the canonical entry point, usually the first thing in the text + segment. + + Note that the code in the .init section has already been run. + This includes _init and _libc_init + + + At this entry point, most registers' values are unspecified, except: + + a1 Contains a function pointer to be registered with `atexit'. + This is how the dynamic linker arranges to have DT_FINI + functions called for shared libraries that have been loaded + before this code runs. + + sp The stack contains the arguments and environment: + 0(sp) argc + 4(sp) argv[0] + ... + (4*argc)(sp) NULL + (4*(argc+1))(sp) envp[0] + ... + NULL +*/ + +/* Tag_ABI_align8_preserved: This code preserves 8-byte + alignment in any callee. */ + .eabi_attribute 25, 1 +/* Tag_ABI_align8_needed: This code may require 8-byte alignment from + the caller. */ + .eabi_attribute 24, 1 + +#if defined(__thumb2__) + .thumb + .syntax unified +#endif + + .text + .globl _start + .type _start,#function +_start: + /* Protect against unhandled exceptions. */ + .fnstart + /* Clear the frame pointer and link register since this is the outermost frame. */ + mov fp, #0 + mov lr, #0 + + /* Pop argc off the stack and save a pointer to argv */ + pop { a2 } + mov a3, sp + + /* Push stack limit */ + push { a3 } + + /* Push rtld_fini */ + push { a1 } + +#ifdef SHARED + ldr sl, .L_GOT + adr a4, .L_GOT + add sl, sl, a4 + + ldr ip, .L_GOT+4 /* __libc_csu_fini */ + ldr ip, [sl, ip] + + push { ip } /* Push __libc_csu_fini */ + + ldr a4, .L_GOT+8 /* __libc_csu_init */ + ldr a4, [sl, a4] + + ldr a1, .L_GOT+12 /* main */ + ldr a1, [sl, a1] + + /* __libc_start_main (main, argc, argv, init, fini, rtld_fini, stack_end) */ + /* Let the libc call main and exit with its return code. */ + bl __libc_start_main(PLT) +#else + /* Fetch address of __libc_csu_fini */ + ldr ip, =__libc_csu_fini + + /* Push __libc_csu_fini */ + push { ip } + + /* Set up the other arguments in registers */ + ldr a1, =main + ldr a4, =__libc_csu_init + + /* __libc_start_main (main, argc, argv, init, fini, rtld_fini, stack_end) */ + /* Let the libc call main and exit with its return code. */ + bl __libc_start_main +#endif + + /* should never get here....*/ + bl abort + +#ifdef SHARED + .align 2 +.L_GOT: + .word _GLOBAL_OFFSET_TABLE_ - .L_GOT + .word __libc_csu_fini(GOT) + .word __libc_csu_init(GOT) + .word main(GOT) +#endif + + .cantunwind + .fnend + +/* Define a symbol for the first piece of initialized data. */ + .data + .globl __data_start +__data_start: + .long 0 + .weak data_start + data_start = __data_start diff --git a/REORG.TODO/sysdeps/arm/static-stubs.c b/REORG.TODO/sysdeps/arm/static-stubs.c new file mode 100644 index 0000000000..d90d603a95 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/static-stubs.c @@ -0,0 +1,7 @@ +/* When building with GCC with static-only libgcc, the dummy + _Unwind_Resume from static-stubs.c needs to be used together with + the dummy __aeabi_unwind_cpp_pr* from aeabi_unwind_cpp_pr1.c + instead of the copies from libgcc. */ + +#include <elf/static-stubs.c> +#include <aeabi_unwind_cpp_pr1.c> diff --git a/REORG.TODO/sysdeps/arm/strlen.S b/REORG.TODO/sysdeps/arm/strlen.S new file mode 100644 index 0000000000..fb1589bbe6 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/strlen.S @@ -0,0 +1,77 @@ +/* Copyright (C) 1998-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Code contributed by Matthew Wilcox <willy@odie.barnet.ac.uk> + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Thumb requires excessive IT insns here. */ +#define NO_THUMB +#include <sysdep.h> + +/* size_t strlen(const char *S) + * entry: r0 -> string + * exit: r0 = len + */ + + .syntax unified + .text + +ENTRY(strlen) + bic r1, r0, $3 @ addr of word containing first byte + ldr r2, [r1], $4 @ get the first word + ands r3, r0, $3 @ how many bytes are duff? + rsb r0, r3, $0 @ get - that number into counter. + beq Laligned @ skip into main check routine if no + @ more +#ifdef __ARMEB__ + orr r2, r2, $0xff000000 @ set this byte to non-zero + subs r3, r3, $1 @ any more to do? + orrgt r2, r2, $0x00ff0000 @ if so, set this byte + subs r3, r3, $1 @ more? + orrgt r2, r2, $0x0000ff00 @ then set. +#else + orr r2, r2, $0x000000ff @ set this byte to non-zero + subs r3, r3, $1 @ any more to do? + orrgt r2, r2, $0x0000ff00 @ if so, set this byte + subs r3, r3, $1 @ more? + orrgt r2, r2, $0x00ff0000 @ then set. +#endif +Laligned: @ here, we have a word in r2. Does it + tst r2, $0x000000ff @ contain any zeroes? + tstne r2, $0x0000ff00 @ + tstne r2, $0x00ff0000 @ + tstne r2, $0xff000000 @ + addne r0, r0, $4 @ if not, the string is 4 bytes longer + ldrne r2, [r1], $4 @ and we continue to the next word + bne Laligned @ +Llastword: @ drop through to here once we find a +#ifdef __ARMEB__ + tst r2, $0xff000000 @ word that has a zero byte in it + addne r0, r0, $1 @ + tstne r2, $0x00ff0000 @ and add up to 3 bytes on to it + addne r0, r0, $1 @ + tstne r2, $0x0000ff00 @ (if first three all non-zero, 4th + addne r0, r0, $1 @ must be zero) +#else + tst r2, $0x000000ff @ word that has a zero byte in it + addne r0, r0, $1 @ + tstne r2, $0x0000ff00 @ and add up to 3 bytes on to it + addne r0, r0, $1 @ + tstne r2, $0x00ff0000 @ (if first three all non-zero, 4th + addne r0, r0, $1 @ must be zero) +#endif + DO_RET(lr) +END(strlen) +libc_hidden_builtin_def (strlen) diff --git a/REORG.TODO/sysdeps/arm/sub_n.S b/REORG.TODO/sysdeps/arm/sub_n.S new file mode 100644 index 0000000000..8eafa41e64 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/sub_n.S @@ -0,0 +1,2 @@ +#define USE_AS_SUB_N +#include "add_n.S" diff --git a/REORG.TODO/sysdeps/arm/submul_1.S b/REORG.TODO/sysdeps/arm/submul_1.S new file mode 100644 index 0000000000..24d39d93b8 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/submul_1.S @@ -0,0 +1,68 @@ +/* mpn_submul_1 -- multiply and subtract bignums. + Copyright (C) 2013-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdep.h> + + .syntax unified + .text + +@ cycles/limb +@ StrongArm ? +@ Cortex-A8 ? +@ Cortex-A9 ? +@ Cortex-A15 4 + +/* mp_limb_t mpn_submul_1(res_ptr, src1_ptr, size, s2_limb) */ + +ENTRY (__mpn_submul_1) + push { r4, r5, r6, r7 } + cfi_adjust_cfa_offset (16) + cfi_rel_offset (r4, 0) + cfi_rel_offset (r5, 4) + cfi_rel_offset (r6, 8) + cfi_rel_offset (r7, 12) + + ldr r6, [r1], #4 + ldr r7, [r0] + mov r4, #0 /* init carry in */ + b 1f +0: + ldr r6, [r1], #4 /* load next ul */ + adds r5, r5, r4 /* (lpl, c) = lpl + cl */ + adc r4, ip, #0 /* cl = hpl + c */ + subs r5, r7, r5 /* (lpl, !c) = rl - lpl */ + ldr r7, [r0, #4] /* load next rl */ + it cc + addcc r4, r4, #1 /* cl += !c */ + str r5, [r0], #4 +1: + umull r5, ip, r6, r3 /* (hpl, lpl) = ul * vl */ + subs r2, r2, #1 + bne 0b + + adds r5, r5, r4 /* (lpl, c) = lpl + cl */ + adc r4, ip, #0 /* cl = hpl + c */ + subs r5, r7, r5 /* (lpl, !c) = rl - lpl */ + str r5, [r0], #4 + it cc + addcc r4, r4, #1 /* cl += !c */ + mov r0, r4 /* return carry */ + + pop { r4, r5, r6, r7 } + DO_RET (lr) +END (__mpn_submul_1) diff --git a/REORG.TODO/sysdeps/arm/sys/ucontext.h b/REORG.TODO/sysdeps/arm/sys/ucontext.h new file mode 100644 index 0000000000..722300a7f1 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/sys/ucontext.h @@ -0,0 +1,111 @@ +/* Copyright (C) 1998-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* System V/ARM ABI compliant context switching support. */ + +#ifndef _SYS_UCONTEXT_H +#define _SYS_UCONTEXT_H 1 + +#include <features.h> + +#include <bits/types/sigset_t.h> +#include <bits/sigcontext.h> +#include <bits/types/stack_t.h> + + +typedef int greg_t; + +/* Number of general registers. */ +#define __NGREG 16 +#ifdef __USE_MISC +# define NGREG __NGREG +#endif + +/* Container for all general registers. */ +typedef greg_t gregset_t[__NGREG]; + +#ifdef __USE_MISC +/* Number of each register is the `gregset_t' array. */ +enum +{ + R0 = 0, +# define R0 R0 + R1 = 1, +# define R1 R1 + R2 = 2, +# define R2 R2 + R3 = 3, +# define R3 R3 + R4 = 4, +# define R4 R4 + R5 = 5, +# define R5 R5 + R6 = 6, +# define R6 R6 + R7 = 7, +# define R7 R7 + R8 = 8, +# define R8 R8 + R9 = 9, +# define R9 R9 + R10 = 10, +# define R10 R10 + R11 = 11, +# define R11 R11 + R12 = 12, +# define R12 R12 + R13 = 13, +# define R13 R13 + R14 = 14, +# define R14 R14 + R15 = 15, +# define R15 R15 +}; +#endif + +#ifdef __USE_MISC +# define __ctx(fld) fld +#else +# define __ctx(fld) __ ## fld +#endif + +/* Structure to describe FPU registers. */ +typedef struct fpregset + { + } fpregset_t; + +/* Context to describe whole processor state. */ +typedef struct + { + gregset_t __ctx(gregs); + fpregset_t __ctx(fpregs); + } mcontext_t; + +#undef __ctx + +/* Userlevel context. */ +typedef struct ucontext + { + unsigned long int uc_flags; + struct ucontext *uc_link; + sigset_t uc_sigmask; + stack_t uc_stack; + mcontext_t uc_mcontext; + long int uc_filler[5]; + } ucontext_t; + +#endif /* sys/ucontext.h */ diff --git a/REORG.TODO/sysdeps/arm/sysdep.h b/REORG.TODO/sysdeps/arm/sysdep.h new file mode 100644 index 0000000000..6d60c34df1 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/sysdep.h @@ -0,0 +1,339 @@ +/* Assembler macros for ARM. + Copyright (C) 1997-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <sysdeps/generic/sysdep.h> +#include <features.h> + +#ifndef __ASSEMBLER__ +# include <stdint.h> +#else +# include <arm-features.h> +#endif + +/* The __ARM_ARCH define is provided by gcc 4.8. Construct it otherwise. */ +#ifndef __ARM_ARCH +# ifdef __ARM_ARCH_2__ +# define __ARM_ARCH 2 +# elif defined (__ARM_ARCH_3__) || defined (__ARM_ARCH_3M__) +# define __ARM_ARCH 3 +# elif defined (__ARM_ARCH_4__) || defined (__ARM_ARCH_4T__) +# define __ARM_ARCH 4 +# elif defined (__ARM_ARCH_5__) || defined (__ARM_ARCH_5E__) \ + || defined(__ARM_ARCH_5T__) || defined(__ARM_ARCH_5TE__) \ + || defined(__ARM_ARCH_5TEJ__) +# define __ARM_ARCH 5 +# elif defined (__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) \ + || defined (__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) \ + || defined (__ARM_ARCH_6K__) || defined(__ARM_ARCH_6T2__) +# define __ARM_ARCH 6 +# elif defined (__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) \ + || defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) \ + || defined(__ARM_ARCH_7EM__) +# define __ARM_ARCH 7 +# else +# error unknown arm architecture +# endif +#endif + +#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__) +# define ARCH_HAS_BX +#endif +#if __ARM_ARCH > 4 +# define ARCH_HAS_BLX +#endif +#if __ARM_ARCH > 6 || defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6ZK__) +# define ARCH_HAS_HARD_TP +#endif +#if __ARM_ARCH > 6 || defined (__ARM_ARCH_6T2__) +# define ARCH_HAS_T2 +#endif + +#ifdef __ASSEMBLER__ + +/* Syntactic details of assembler. */ + +#define ALIGNARG(log2) log2 +#define ASM_SIZE_DIRECTIVE(name) .size name,.-name + +#define PLTJMP(_x) _x##(PLT) + +#ifdef ARCH_HAS_BX +# define BX(R) bx R +# define BXC(C, R) bx##C R +# ifdef ARCH_HAS_BLX +# define BLX(R) blx R +# else +# define BLX(R) mov lr, pc; bx R +# endif +#else +# define BX(R) mov pc, R +# define BXC(C, R) mov##C pc, R +# define BLX(R) mov lr, pc; mov pc, R +#endif + +#define DO_RET(R) BX(R) +#define RETINSTR(C, R) BXC(C, R) + +/* Define an entry point visible from C. */ +#define ENTRY(name) \ + .globl C_SYMBOL_NAME(name); \ + .type C_SYMBOL_NAME(name),%function; \ + .align ALIGNARG(4); \ + C_LABEL(name) \ + CFI_SECTIONS; \ + cfi_startproc; \ + CALL_MCOUNT + +#define CFI_SECTIONS \ + .cfi_sections .debug_frame + +#undef END +#define END(name) \ + cfi_endproc; \ + ASM_SIZE_DIRECTIVE(name) + +/* If compiled for profiling, call `mcount' at the start of each function. */ +#ifdef PROF +/* Call __gnu_mcount_nc (GCC >= 4.4). */ +#define CALL_MCOUNT \ + push {lr}; \ + cfi_adjust_cfa_offset (4); \ + cfi_rel_offset (lr, 0); \ + bl PLTJMP(mcount); \ + cfi_adjust_cfa_offset (-4); \ + cfi_restore (lr) +#else +#define CALL_MCOUNT /* Do nothing. */ +#endif + +/* Since C identifiers are not normally prefixed with an underscore + on this system, the asm identifier `syscall_error' intrudes on the + C name space. Make sure we use an innocuous name. */ +#define syscall_error __syscall_error +#define mcount __gnu_mcount_nc + +/* Tag_ABI_align8_preserved: This code preserves 8-byte + alignment in any callee. */ + .eabi_attribute 25, 1 +/* Tag_ABI_align8_needed: This code may require 8-byte alignment from + the caller. */ + .eabi_attribute 24, 1 + +/* The thumb2 encoding is reasonably complete. Unless suppressed, use it. */ + .syntax unified +# if defined(__thumb2__) && !defined(NO_THUMB) + .thumb +#else +# undef __thumb__ +# undef __thumb2__ + .arm +# endif + +/* Load or store to/from address X + Y into/from R, (maybe) using T. + X or Y can use T freely; T can be R if OP is a load. The first + version eschews the two-register addressing mode, while the + second version uses it. */ +# define LDST_INDEXED_NOINDEX(OP, R, T, X, Y) \ + add T, X, Y; \ + OP R, [T] +# define LDST_INDEXED_INDEX(OP, R, X, Y) \ + OP R, [X, Y] + +# ifdef ARM_NO_INDEX_REGISTER +/* We're never using the two-register addressing mode, so this + always uses an intermediate add. */ +# define LDST_INDEXED(OP, R, T, X, Y) LDST_INDEXED_NOINDEX (OP, R, T, X, Y) +# define LDST_PC_INDEXED(OP, R, T, X) LDST_INDEXED_NOINDEX (OP, R, T, pc, X) +# else +/* The two-register addressing mode is OK, except on Thumb with pc. */ +# define LDST_INDEXED(OP, R, T, X, Y) LDST_INDEXED_INDEX (OP, R, X, Y) +# ifdef __thumb2__ +# define LDST_PC_INDEXED(OP, R, T, X) LDST_INDEXED_NOINDEX (OP, R, T, pc, X) +# else +# define LDST_PC_INDEXED(OP, R, T, X) LDST_INDEXED_INDEX (OP, R, pc, X) +# endif +# endif + +/* Load or store to/from a pc-relative EXPR into/from R, using T. */ +# ifdef __thumb2__ +# define LDST_PCREL(OP, R, T, EXPR) \ + ldr T, 98f; \ + .subsection 2; \ +98: .word EXPR - 99f - PC_OFS; \ + .previous; \ +99: add T, T, pc; \ + OP R, [T] +# elif defined (ARCH_HAS_T2) && ARM_PCREL_MOVW_OK +# define LDST_PCREL(OP, R, T, EXPR) \ + movw T, #:lower16:EXPR - 99f - PC_OFS; \ + movt T, #:upper16:EXPR - 99f - PC_OFS; \ +99: LDST_PC_INDEXED (OP, R, T, T) +# else +# define LDST_PCREL(OP, R, T, EXPR) \ + ldr T, 98f; \ + .subsection 2; \ +98: .word EXPR - 99f - PC_OFS; \ + .previous; \ +99: OP R, [pc, T] +# endif + +/* Load from a global SYMBOL + CONSTANT into R, using T. */ +# if defined (ARCH_HAS_T2) && !defined (PIC) +# define LDR_GLOBAL(R, T, SYMBOL, CONSTANT) \ + movw T, #:lower16:SYMBOL; \ + movt T, #:upper16:SYMBOL; \ + ldr R, [T, $CONSTANT] +# elif defined (ARCH_HAS_T2) && defined (PIC) && ARM_PCREL_MOVW_OK +# define LDR_GLOBAL(R, T, SYMBOL, CONSTANT) \ + movw R, #:lower16:_GLOBAL_OFFSET_TABLE_ - 97f - PC_OFS; \ + movw T, #:lower16:99f - 98f - PC_OFS; \ + movt R, #:upper16:_GLOBAL_OFFSET_TABLE_ - 97f - PC_OFS; \ + movt T, #:upper16:99f - 98f - PC_OFS; \ + .pushsection .rodata.cst4, "aM", %progbits, 4; \ + .balign 4; \ +99: .word SYMBOL##(GOT); \ + .popsection; \ +97: add R, R, pc; \ +98: LDST_PC_INDEXED (ldr, T, T, T); \ + LDST_INDEXED (ldr, R, T, R, T); \ + ldr R, [R, $CONSTANT] +# else +# define LDR_GLOBAL(R, T, SYMBOL, CONSTANT) \ + ldr T, 99f; \ + ldr R, 100f; \ +98: add T, T, pc; \ + ldr T, [T, R]; \ + .subsection 2; \ +99: .word _GLOBAL_OFFSET_TABLE_ - 98b - PC_OFS; \ +100: .word SYMBOL##(GOT); \ + .previous; \ + ldr R, [T, $CONSTANT] +# endif + +/* This is the same as LDR_GLOBAL, but for a SYMBOL that is known to + be in the same linked object (as for one with hidden visibility). + We can avoid the GOT indirection in the PIC case. For the pure + static case, LDR_GLOBAL is already optimal. */ +# ifdef PIC +# define LDR_HIDDEN(R, T, SYMBOL, CONSTANT) \ + LDST_PCREL (ldr, R, T, SYMBOL + CONSTANT) +# else +# define LDR_HIDDEN(R, T, SYMBOL, CONSTANT) \ + LDR_GLOBAL (R, T, SYMBOL, CONSTANT) +# endif + +/* Cope with negative memory offsets, which thumb can't encode. + Use NEGOFF_ADJ_BASE to (conditionally) alter the base register, + and then NEGOFF_OFF1 to use 0 for thumb and the offset for arm, + or NEGOFF_OFF2 to use A-B for thumb and A for arm. */ +# ifdef __thumb2__ +# define NEGOFF_ADJ_BASE(R, OFF) add R, R, $OFF +# define NEGOFF_ADJ_BASE2(D, S, OFF) add D, S, $OFF +# define NEGOFF_OFF1(R, OFF) [R] +# define NEGOFF_OFF2(R, OFFA, OFFB) [R, $((OFFA) - (OFFB))] +# else +# define NEGOFF_ADJ_BASE(R, OFF) +# define NEGOFF_ADJ_BASE2(D, S, OFF) mov D, S +# define NEGOFF_OFF1(R, OFF) [R, $OFF] +# define NEGOFF_OFF2(R, OFFA, OFFB) [R, $OFFA] +# endif + +/* Helper to get the TLS base pointer. The interface is that TMP is a + register that may be used to hold the LR, if necessary. TMP may be + LR itself to indicate that LR need not be saved. The base pointer + is returned in R0. Only R0 and TMP are modified. */ + +# ifdef ARCH_HAS_HARD_TP +/* If the cpu has cp15 available, use it. */ +# define GET_TLS(TMP) mrc p15, 0, r0, c13, c0, 3 +# else +/* At this generic level we have no tricks to pull. Call the ABI routine. */ +# define GET_TLS(TMP) \ + push { r1, r2, r3, lr }; \ + cfi_remember_state; \ + cfi_adjust_cfa_offset (16); \ + cfi_rel_offset (r1, 0); \ + cfi_rel_offset (r2, 4); \ + cfi_rel_offset (r3, 8); \ + cfi_rel_offset (lr, 12); \ + bl __aeabi_read_tp; \ + pop { r1, r2, r3, lr }; \ + cfi_restore_state +# endif /* ARCH_HAS_HARD_TP */ + +/* These are the directives used for EABI unwind info. + Wrap them in macros so another configuration's sysdep.h + file can define them away if it doesn't use EABI unwind info. */ +# define eabi_fnstart .fnstart +# define eabi_fnend .fnend +# define eabi_save(...) .save __VA_ARGS__ +# define eabi_cantunwind .cantunwind +# define eabi_pad(n) .pad n + +#endif /* __ASSEMBLER__ */ + +/* This number is the offset from the pc at the current location. */ +#ifdef __thumb__ +# define PC_OFS 4 +#else +# define PC_OFS 8 +#endif + +/* Pointer mangling support. */ +#if (IS_IN (rtld) || \ + (!defined SHARED && (IS_IN (libc) || IS_IN (libpthread)))) +# ifdef __ASSEMBLER__ +# define PTR_MANGLE_LOAD(guard, tmp) \ + LDR_HIDDEN (guard, tmp, C_SYMBOL_NAME(__pointer_chk_guard_local), 0) +# define PTR_MANGLE(dst, src, guard, tmp) \ + PTR_MANGLE_LOAD(guard, tmp); \ + PTR_MANGLE2(dst, src, guard) +/* Use PTR_MANGLE2 for efficiency if guard is already loaded. */ +# define PTR_MANGLE2(dst, src, guard) \ + eor dst, src, guard +# define PTR_DEMANGLE(dst, src, guard, tmp) \ + PTR_MANGLE (dst, src, guard, tmp) +# define PTR_DEMANGLE2(dst, src, guard) \ + PTR_MANGLE2 (dst, src, guard) +# else +extern uintptr_t __pointer_chk_guard_local attribute_relro attribute_hidden; +# define PTR_MANGLE(var) \ + (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard_local) +# define PTR_DEMANGLE(var) PTR_MANGLE (var) +# endif +#else +# ifdef __ASSEMBLER__ +# define PTR_MANGLE_LOAD(guard, tmp) \ + LDR_GLOBAL (guard, tmp, C_SYMBOL_NAME(__pointer_chk_guard), 0); +# define PTR_MANGLE(dst, src, guard, tmp) \ + PTR_MANGLE_LOAD(guard, tmp); \ + PTR_MANGLE2(dst, src, guard) +/* Use PTR_MANGLE2 for efficiency if guard is already loaded. */ +# define PTR_MANGLE2(dst, src, guard) \ + eor dst, src, guard +# define PTR_DEMANGLE(dst, src, guard, tmp) \ + PTR_MANGLE (dst, src, guard, tmp) +# define PTR_DEMANGLE2(dst, src, guard) \ + PTR_MANGLE2 (dst, src, guard) +# else +extern uintptr_t __pointer_chk_guard attribute_relro; +# define PTR_MANGLE(var) \ + (var) = (__typeof (var)) ((uintptr_t) (var) ^ __pointer_chk_guard) +# define PTR_DEMANGLE(var) PTR_MANGLE (var) +# endif +#endif diff --git a/REORG.TODO/sysdeps/arm/test-fpucw.c b/REORG.TODO/sysdeps/arm/test-fpucw.c new file mode 100644 index 0000000000..9fc721c7f4 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/test-fpucw.c @@ -0,0 +1,5 @@ +/* Defining _LIBC_TEST stops fpu_control.h from defining the + hard-float versions of macros (for use with dynamic VFP detection) + when compiling for soft-float. */ +#define _LIBC_TEST +#include <math/test-fpucw.c> diff --git a/REORG.TODO/sysdeps/arm/tls-macros.h b/REORG.TODO/sysdeps/arm/tls-macros.h new file mode 100644 index 0000000000..25cd84931b --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tls-macros.h @@ -0,0 +1,72 @@ +#include <sysdep.h> /* For ARCH_HAS_T2. */ + +#ifdef __thumb2__ +# define ARM_PC_OFFSET "4" +#else +# define ARM_PC_OFFSET "8" +#endif + +/* Returns the address of data containing ".word SYMBOL(RELOC)". */ +#if defined (ARCH_HAS_T2) && !defined (PIC) +# define GET_SPECIAL_RELOC(symbol, reloc) \ + ({ \ + int *__##symbol##_rodata; \ + asm ("movw %0, #:lower16:1f\n" \ + "movt %0, #:upper16:1f\n" \ + ".pushsection .rodata.cst4, \"aM\", %%progbits, 4\n" \ + ".balign 4\n" \ + "1: .word " #symbol "(" #reloc ")\n" \ + ".popsection" \ + : "=r" (__##symbol##_rodata)); \ + __##symbol##_rodata; \ + }) +#elif defined (ARCH_HAS_T2) && defined (PIC) && ARM_PCREL_MOVW_OK +# define GET_SPECIAL_RELOC(symbol, reloc) \ + ({ \ + int *__##symbol##_rodata; \ + asm ("movw %0, #:lower16:1f - 2f - " ARM_PC_OFFSET "\n" \ + "movt %0, #:upper16:1f - 2f - " ARM_PC_OFFSET "\n" \ + ".pushsection .rodata.cst4, \"aM\", %%progbits, 4\n" \ + ".balign 4\n" \ + "1: .word " #symbol "(" #reloc ")\n" \ + ".popsection\n" \ + "2: add %0, %0, pc" \ + : "=r" (__##symbol##_rodata)); \ + __##symbol##_rodata; \ + }) +#else +# define GET_SPECIAL_RELOC(symbol, reloc) \ + ({ \ + int *__##symbol##_rodata; \ + asm ("adr %0, 1f\n" \ + "b 2f\n" \ + ".balign 4\n" \ + "1: .word " #symbol "(" #reloc ")\n" \ + "2:" \ + : "=r" (__##symbol##_rodata)); \ + __##symbol##_rodata; \ + }) +#endif + +/* Returns the pointer value (SYMBOL(RELOC) + pc - PC_OFS). */ +#define GET_SPECIAL_PCREL(symbol, reloc) \ + ({ \ + int *__##symbol##_rodata = GET_SPECIAL_RELOC (symbol, reloc); \ + (void *) ((int) __##symbol##_rodata + *__##symbol##_rodata); \ + }) + +#define TLS_LE(x) \ + (__builtin_thread_pointer () + *GET_SPECIAL_RELOC (x, tpoff)) + +#define TLS_IE(x) \ + ((int *) (__builtin_thread_pointer () \ + + *(int *) GET_SPECIAL_PCREL (x, gottpoff))) + +extern void *__tls_get_addr (void *); + +#define TLS_LD(x) \ + ((int *) (__tls_get_addr (GET_SPECIAL_PCREL (x, tlsldm)) \ + + *GET_SPECIAL_RELOC (x, tlsldo))) + +#define TLS_GD(x) \ + ((int *) __tls_get_addr (GET_SPECIAL_PCREL (x, tlsgd))) diff --git a/REORG.TODO/sysdeps/arm/tlsdesc.c b/REORG.TODO/sysdeps/arm/tlsdesc.c new file mode 100644 index 0000000000..7ec5fef688 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tlsdesc.c @@ -0,0 +1,160 @@ +/* Manage TLS descriptors. ARM version. + Copyright (C) 2005-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#include <link.h> +#include <ldsodefs.h> +#include <elf/dynamic-link.h> +#include <tls.h> +#include <dl-tlsdesc.h> +#include <dl-unmap-segments.h> +#include <tlsdeschtab.h> + +/* This function is used to lazily resolve TLS_DESC REL relocations + Besides the TLS descriptor itself, we get the module's got address + as the second parameter. */ + +void +attribute_hidden +_dl_tlsdesc_lazy_resolver_fixup (struct tlsdesc volatile *td, + Elf32_Addr *got) +{ + struct link_map *l = (struct link_map *)got[1]; + lookup_t result; + unsigned long value; + + if (_dl_tlsdesc_resolve_early_return_p + (td, (void*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_PLT)]) + l->l_addr))) + return; + + if (td->argument.value & 0x80000000) + { + /* A global symbol, this is the symbol index. */ + /* The code below was borrowed from _dl_fixup(). */ + const Elf_Symndx symndx = td->argument.value ^ 0x80000000; + const ElfW(Sym) *const symtab + = (const void *) D_PTR (l, l_info[DT_SYMTAB]); + const char *strtab = (const void *) D_PTR (l, l_info[DT_STRTAB]); + const ElfW(Sym) *sym = &symtab[symndx]; + + /* Look up the target symbol. If the normal lookup rules are not + used don't look in the global scope. */ + if (ELFW(ST_BIND) (sym->st_info) != STB_LOCAL + && __builtin_expect (ELFW(ST_VISIBILITY) (sym->st_other), 0) == 0) + { + const struct r_found_version *version = NULL; + + if (l->l_info[VERSYMIDX (DT_VERSYM)] != NULL) + { + const ElfW(Half) *vernum = + (const void *) D_PTR (l, l_info[VERSYMIDX (DT_VERSYM)]); + ElfW(Half) ndx = vernum[symndx] & 0x7fff; + version = &l->l_versions[ndx]; + if (version->hash == 0) + version = NULL; + } + + result = _dl_lookup_symbol_x + (strtab + sym->st_name, l, &sym, + l->l_scope, version, ELF_RTYPE_CLASS_PLT, + DL_LOOKUP_ADD_DEPENDENCY, NULL); + if (sym) + value = sym->st_value; + else + { + td->entry = _dl_tlsdesc_undefweak; + goto done; + } + } + else + { + /* We already found the symbol. The module (and therefore its load + address) is also known. */ + result = l; + value = sym->st_value; + } + } + else + { + /* A local symbol, this is the offset within our tls section. + */ + value = td->argument.value; + result = l; + } + +#ifndef SHARED + CHECK_STATIC_TLS (l, result); +#else + if (!TRY_STATIC_TLS (l, result)) + { + td->argument.pointer = _dl_make_tlsdesc_dynamic (result, value); + td->entry = _dl_tlsdesc_dynamic; + } + else +#endif + { + td->argument.value = value + result->l_tls_offset; + td->entry = _dl_tlsdesc_return; + } + + done: + _dl_tlsdesc_wake_up_held_fixups (); +} + +/* This function is used to avoid busy waiting for other threads to + complete the lazy relocation. Once another thread wins the race to + relocate a TLS descriptor, it sets the descriptor up such that this + function is called to wait until the resolver releases the + lock. */ + +void +attribute_hidden +_dl_tlsdesc_resolve_hold_fixup (struct tlsdesc volatile *td, + void *caller) +{ + /* Maybe we're lucky and can return early. */ + if (caller != td->entry) + return; + + /* Locking here will stop execution until the running resolver runs + _dl_tlsdesc_wake_up_held_fixups(), releasing the lock. + + FIXME: We'd be better off waiting on a condition variable, such + that we didn't have to hold the lock throughout the relocation + processing. */ + __rtld_lock_lock_recursive (GL(dl_load_lock)); + __rtld_lock_unlock_recursive (GL(dl_load_lock)); +} + +/* Unmap the dynamic object, but also release its TLS descriptor table + if there is one. */ + +void +internal_function +_dl_unmap (struct link_map *map) +{ + _dl_unmap_segments (map); + +#ifdef SHARED + /* _dl_unmap is only called for dlopen()ed libraries, for which + calling free() is safe, or before we've completed the initial + relocation, in which case calling free() is probably pointless, + but still safe. */ + if (map->l_mach.tlsdesc_table) + htab_delete (map->l_mach.tlsdesc_table); +#endif +} diff --git a/REORG.TODO/sysdeps/arm/tlsdesc.sym b/REORG.TODO/sysdeps/arm/tlsdesc.sym new file mode 100644 index 0000000000..3f3a13e2c4 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tlsdesc.sym @@ -0,0 +1,15 @@ +#include <stddef.h> +#include <sysdep.h> +#include <tls.h> +#include <link.h> +#include <dl-tlsdesc.h> + +-- + +-- Abuse tls.h macros to derive offsets relative to the thread register. + +TLSDESC_ARG offsetof(struct tlsdesc, argument.pointer) + +TLSDESC_GEN_COUNT offsetof(struct tlsdesc_dynamic_arg, gen_count) +TLSDESC_MODID offsetof(struct tlsdesc_dynamic_arg, tlsinfo.ti_module) +TLSDESC_MODOFF offsetof(struct tlsdesc_dynamic_arg, tlsinfo.ti_offset) diff --git a/REORG.TODO/sysdeps/arm/tst-armtlsdescextlazy.c b/REORG.TODO/sysdeps/arm/tst-armtlsdescextlazy.c new file mode 100644 index 0000000000..36ae9994f7 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tst-armtlsdescextlazy.c @@ -0,0 +1 @@ +#include "tst-armtlsdescloc.c" diff --git a/REORG.TODO/sysdeps/arm/tst-armtlsdescextlazymod.c b/REORG.TODO/sysdeps/arm/tst-armtlsdescextlazymod.c new file mode 100644 index 0000000000..2cb8f8c853 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tst-armtlsdescextlazymod.c @@ -0,0 +1 @@ +#include "tst-armtlsdesclocmod.c" diff --git a/REORG.TODO/sysdeps/arm/tst-armtlsdescextnow.c b/REORG.TODO/sysdeps/arm/tst-armtlsdescextnow.c new file mode 100644 index 0000000000..36ae9994f7 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tst-armtlsdescextnow.c @@ -0,0 +1 @@ +#include "tst-armtlsdescloc.c" diff --git a/REORG.TODO/sysdeps/arm/tst-armtlsdescextnowmod.c b/REORG.TODO/sysdeps/arm/tst-armtlsdescextnowmod.c new file mode 100644 index 0000000000..2cb8f8c853 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tst-armtlsdescextnowmod.c @@ -0,0 +1 @@ +#include "tst-armtlsdesclocmod.c" diff --git a/REORG.TODO/sysdeps/arm/tst-armtlsdescloc.c b/REORG.TODO/sysdeps/arm/tst-armtlsdescloc.c new file mode 100644 index 0000000000..0abb5930c5 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tst-armtlsdescloc.c @@ -0,0 +1,28 @@ +/* ARM immediate binding GNU TLS descriptor relocation test. + Copyright (C) 2014-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +int getfoo (void); + +int +do_test (void) +{ + return getfoo (); +} + +#define TEST_FUNCTION do_test () +#include "../test-skeleton.c" diff --git a/REORG.TODO/sysdeps/arm/tst-armtlsdesclocmod.c b/REORG.TODO/sysdeps/arm/tst-armtlsdesclocmod.c new file mode 100644 index 0000000000..4f0be0b389 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tst-armtlsdesclocmod.c @@ -0,0 +1,44 @@ +/* DSO used for ARM immediate binding GNU TLS descriptor relocation test. + Copyright (C) 2014-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +static int __thread bar = 1; +static int __thread foo; + +int +getfoo (void) +{ + return foo; +} + +void +setfoo (int i) +{ + foo = 1; +} + +int +getbar (void) +{ + return bar; +} + +void +setbar (int i) +{ + bar = 1; +} diff --git a/REORG.TODO/sysdeps/arm/tst-audit.h b/REORG.TODO/sysdeps/arm/tst-audit.h new file mode 100644 index 0000000000..e6cc5a6ef3 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/tst-audit.h @@ -0,0 +1,25 @@ +/* Definitions for testing PLT entry/exit auditing. ARM version. + + Copyright (C) 2005-2017 Free Software Foundation, Inc. + + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +#define pltenter la_arm_gnu_pltenter +#define pltexit la_arm_gnu_pltexit +#define La_regs La_arm_regs +#define La_retval La_arm_retval +#define int_retval lrv_reg[0] diff --git a/REORG.TODO/sysdeps/arm/unwind-dw2-fde-glibc.c b/REORG.TODO/sysdeps/arm/unwind-dw2-fde-glibc.c new file mode 100644 index 0000000000..bc3cd1fc1f --- /dev/null +++ b/REORG.TODO/sysdeps/arm/unwind-dw2-fde-glibc.c @@ -0,0 +1,79 @@ +/* Dummy exception handling and frame unwind runtime interface routines. + Copyright (C) 2004-2017 Free Software Foundation, Inc. + + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 2.1 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library. If not, see + <http://www.gnu.org/licenses/>. */ + +/* ARM uses setjmp-longjmp exceptions. However, previous versions of + GNU libc exported some DWARF-2 exception handling support routines. + They are not necessary, but older (or broken) configurations of GCC + will do so. Even though all references to these are weak, because + they refer to versioned symbols, they must be provided. */ + +#include <stdlib.h> +#include <unwind.h> +#include <unwind-dw2-fde.h> + +/* These may be called from startup code, but don't need to do + anything. */ + +void __register_frame_info_bases (void *a1, struct object *a2, + void *a3, void *a4) +{ +} + +void __register_frame_info (void *a1, struct object *a2) +{ +} + +void __register_frame (void *a1) +{ +} + +void __register_frame_info_table_bases (void *a1, struct object *a2, + void *a3, void *a4) +{ +} + +void __register_frame_info_table (void *a1, struct object *a2) +{ +} + +void __register_frame_table (void *a1) +{ +} + +void *__deregister_frame_info (void *a1) +{ + return NULL; +} + +void *__deregister_frame_info_bases (void *a1) +{ + return NULL; +} + +void __deregister_frame (void *a1) +{ +} + +/* This should not be called. */ + +fde * +_Unwind_Find_FDE (void *a1, struct dwarf_eh_bases *a2) +{ + abort (); +} diff --git a/REORG.TODO/sysdeps/arm/unwind-pe.c b/REORG.TODO/sysdeps/arm/unwind-pe.c new file mode 100644 index 0000000000..710cecca97 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/unwind-pe.c @@ -0,0 +1 @@ +/* Empty */ diff --git a/REORG.TODO/sysdeps/arm/unwind-resume.h b/REORG.TODO/sysdeps/arm/unwind-resume.h new file mode 100644 index 0000000000..221f10135d --- /dev/null +++ b/REORG.TODO/sysdeps/arm/unwind-resume.h @@ -0,0 +1,33 @@ +/* Definitions for unwind-resume.c. ARM (EABI) version. + Copyright (C) 2015-2017 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 2.1 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; see the file COPYING.LIB. If + not, see <http://www.gnu.org/licenses/>. */ + +/* The EABI personality routine has a different signature than the + canonical one. These macros tell sysdeps/gnu/unwind*.c how to + define __gcc_personality_v0. */ +#define PERSONALITY_PROTO \ + (_Unwind_State state, \ + struct _Unwind_Exception *ue_header, \ + struct _Unwind_Context *context) +#define PERSONALITY_ARGS \ + (state, ue_header, context) + +/* It's vitally important that _Unwind_Resume not have a stack frame; the + ARM unwinder relies on register state at entrance. So we write this in + assembly (see arm-unwind-resume.S). This macro tells the generic code + not to provide the generic C definition. */ +#define HAVE_ARCH_UNWIND_RESUME 1 diff --git a/REORG.TODO/sysdeps/arm/unwind.h b/REORG.TODO/sysdeps/arm/unwind.h new file mode 100644 index 0000000000..08b6f38022 --- /dev/null +++ b/REORG.TODO/sysdeps/arm/unwind.h @@ -0,0 +1,278 @@ +/* Header file for the ARM EABI unwinder + Copyright (C) 2003-2017 Free Software Foundation, Inc. + Contributed by Paul Brook + + This file is free software; you can redistribute it and/or modify it + under the terms of the GNU General Public License as published by the + Free Software Foundation; either version 2, or (at your option) any + later version. + + In addition to the permissions in the GNU General Public License, the + Free Software Foundation gives you unlimited permission to link the + compiled version of this file into combinations with other programs, + and to distribute those combinations without any restriction coming + from the use of this file. (The General Public License restrictions + do apply in other respects; for example, they cover modification of + the file, and distribution when not linked into a combine + executable.) + + This file is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see + <http://www.gnu.org/licenses/>. */ + +/* Language-independent unwinder header public defines. This contains both + ABI defined objects, and GNU support routines. */ + +#ifndef UNWIND_ARM_H +#define UNWIND_ARM_H + +#define __ARM_EABI_UNWINDER__ 1 + +#ifdef __cplusplus +extern "C" { +#endif + typedef unsigned _Unwind_Word __attribute__((__mode__(__word__))); + typedef signed _Unwind_Sword __attribute__((__mode__(__word__))); + typedef unsigned _Unwind_Ptr __attribute__((__mode__(__pointer__))); + typedef unsigned _Unwind_Internal_Ptr __attribute__((__mode__(__pointer__))); + typedef _Unwind_Word _uw; + typedef unsigned _uw64 __attribute__((mode(__DI__))); + typedef unsigned _uw16 __attribute__((mode(__HI__))); + typedef unsigned _uw8 __attribute__((mode(__QI__))); + + typedef enum + { + _URC_OK = 0, /* operation completed successfully */ + _URC_FOREIGN_EXCEPTION_CAUGHT = 1, + _URC_END_OF_STACK = 5, + _URC_HANDLER_FOUND = 6, + _URC_INSTALL_CONTEXT = 7, + _URC_CONTINUE_UNWIND = 8, + _URC_FAILURE = 9 /* unspecified failure of some kind */ + } + _Unwind_Reason_Code; + + typedef enum + { + _US_VIRTUAL_UNWIND_FRAME = 0, + _US_UNWIND_FRAME_STARTING = 1, + _US_UNWIND_FRAME_RESUME = 2, + _US_ACTION_MASK = 3, + _US_FORCE_UNWIND = 8, + _US_END_OF_STACK = 16 + } + _Unwind_State; + + /* Provided only for for compatibility with existing code. */ + typedef int _Unwind_Action; +#define _UA_SEARCH_PHASE 1 +#define _UA_CLEANUP_PHASE 2 +#define _UA_HANDLER_FRAME 4 +#define _UA_FORCE_UNWIND 8 +#define _UA_END_OF_STACK 16 +#define _URC_NO_REASON _URC_OK + + typedef struct _Unwind_Control_Block _Unwind_Control_Block; + typedef struct _Unwind_Context _Unwind_Context; + typedef _uw _Unwind_EHT_Header; + + + /* UCB: */ + + struct _Unwind_Control_Block + { +#ifdef _LIBC + /* For the benefit of code which assumes this is a scalar. All + glibc ever does is clear it. */ + _uw64 exception_class; +#else + char exception_class[8]; +#endif + void (*exception_cleanup)(_Unwind_Reason_Code, _Unwind_Control_Block *); + /* Unwinder cache, private fields for the unwinder's use */ + struct + { + _uw reserved1; /* Forced unwind stop fn, 0 if not forced */ + _uw reserved2; /* Personality routine address */ + _uw reserved3; /* Saved callsite address */ + _uw reserved4; /* Forced unwind stop arg */ + _uw reserved5; + } + unwinder_cache; + /* Propagation barrier cache (valid after phase 1): */ + struct + { + _uw sp; + _uw bitpattern[5]; + } + barrier_cache; + /* Cleanup cache (preserved over cleanup): */ + struct + { + _uw bitpattern[4]; + } + cleanup_cache; + /* Pr cache (for pr's benefit): */ + struct + { + _uw fnstart; /* function start address */ + _Unwind_EHT_Header *ehtp; /* pointer to EHT entry header word */ + _uw additional; /* additional data */ + _uw reserved1; + } + pr_cache; + long long int :0; /* Force alignment to 8-byte boundary */ + }; + + /* Virtual Register Set*/ + + typedef enum + { + _UVRSC_CORE = 0, /* integer register */ + _UVRSC_VFP = 1, /* vfp */ + _UVRSC_FPA = 2, /* fpa */ + _UVRSC_WMMXD = 3, /* Intel WMMX data register */ + _UVRSC_WMMXC = 4 /* Intel WMMX control register */ + } + _Unwind_VRS_RegClass; + + typedef enum + { + _UVRSD_UINT32 = 0, + _UVRSD_VFPX = 1, + _UVRSD_FPAX = 2, + _UVRSD_UINT64 = 3, + _UVRSD_FLOAT = 4, + _UVRSD_DOUBLE = 5 + } + _Unwind_VRS_DataRepresentation; + + typedef enum + { + _UVRSR_OK = 0, + _UVRSR_NOT_IMPLEMENTED = 1, + _UVRSR_FAILED = 2 + } + _Unwind_VRS_Result; + + /* Frame unwinding state. */ + typedef struct + { + /* The current word (bytes packed msb first). */ + _uw data; + /* Pointer to the next word of data. */ + _uw *next; + /* The number of bytes left in this word. */ + _uw8 bytes_left; + /* The number of words pointed to by ptr. */ + _uw8 words_left; + } + __gnu_unwind_state; + + typedef _Unwind_Reason_Code (*personality_routine) (_Unwind_State, + _Unwind_Control_Block *, _Unwind_Context *); + + _Unwind_VRS_Result _Unwind_VRS_Set(_Unwind_Context *, _Unwind_VRS_RegClass, + _uw, _Unwind_VRS_DataRepresentation, + void *); + + _Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *, _Unwind_VRS_RegClass, + _uw, _Unwind_VRS_DataRepresentation, + void *); + + _Unwind_VRS_Result _Unwind_VRS_Pop(_Unwind_Context *, _Unwind_VRS_RegClass, + _uw, _Unwind_VRS_DataRepresentation); + + + /* Support functions for the PR. */ +#define _Unwind_Exception _Unwind_Control_Block + typedef char _Unwind_Exception_Class[8]; + + void * _Unwind_GetLanguageSpecificData (_Unwind_Context *); + _Unwind_Ptr _Unwind_GetRegionStart (_Unwind_Context *); + + /* These two should never be used. */ + _Unwind_Ptr _Unwind_GetDataRelBase (_Unwind_Context *); + _Unwind_Ptr _Unwind_GetTextRelBase (_Unwind_Context *); + + /* Interface functions: */ + _Unwind_Reason_Code _Unwind_RaiseException(_Unwind_Control_Block *ucbp); + void __attribute__((noreturn)) _Unwind_Resume(_Unwind_Control_Block *ucbp); + _Unwind_Reason_Code _Unwind_Resume_or_Rethrow (_Unwind_Control_Block *ucbp); + + typedef _Unwind_Reason_Code (*_Unwind_Stop_Fn) + (int, _Unwind_Action, _Unwind_Exception_Class, + _Unwind_Control_Block *, struct _Unwind_Context *, void *); + _Unwind_Reason_Code _Unwind_ForcedUnwind (_Unwind_Control_Block *, + _Unwind_Stop_Fn, void *); + _Unwind_Word _Unwind_GetCFA (struct _Unwind_Context *); + void _Unwind_Complete(_Unwind_Control_Block *ucbp); + void _Unwind_DeleteException (_Unwind_Exception *); + + _Unwind_Reason_Code __gnu_unwind_frame (_Unwind_Control_Block *, + _Unwind_Context *); + _Unwind_Reason_Code __gnu_unwind_execute (_Unwind_Context *, + __gnu_unwind_state *); + + /* Decode an R_ARM_TARGET2 relocation. */ + static inline _Unwind_Word + _Unwind_decode_target2 (_Unwind_Word ptr) + { + _Unwind_Word tmp; + + tmp = *(_Unwind_Word *) ptr; + /* Zero values are always NULL. */ + if (!tmp) + return 0; + +#if defined(linux) || defined(__NetBSD__) + /* Pc-relative indirect. */ + tmp += ptr; + tmp = *(_Unwind_Word *) tmp; +#elif defined(__symbian__) + /* Absolute pointer. Nothing more to do. */ +#else + /* Pc-relative pointer. */ + tmp += ptr; +#endif + return tmp; + } + + static inline _Unwind_Word + _Unwind_GetGR (_Unwind_Context *context, int regno) + { + _uw val; + _Unwind_VRS_Get (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val); + return val; + } + + /* Return the address of the instruction, not the actual IP value. */ +#define _Unwind_GetIP(context) \ + (_Unwind_GetGR (context, 15) & ~(_Unwind_Word)1) + + static inline void + _Unwind_SetGR (_Unwind_Context *context, int regno, _Unwind_Word val) + { + _Unwind_VRS_Set (context, _UVRSC_CORE, regno, _UVRSD_UINT32, &val); + } + + /* The dwarf unwinder doesn't understand arm/thumb state. We assume the + landing pad uses the same instruction set as the call site. */ +#define _Unwind_SetIP(context, val) \ + _Unwind_SetGR (context, 15, val | (_Unwind_GetGR (context, 15) & 1)) + +typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn) + (struct _Unwind_Context *, void *); + +extern _Unwind_Reason_Code _Unwind_Backtrace (_Unwind_Trace_Fn, void *); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* defined UNWIND_ARM_H */ |