diff options
Diffstat (limited to 'sysdeps/powerpc')
22 files changed, 292 insertions, 62 deletions
diff --git a/sysdeps/powerpc/powerpc32/configure b/sysdeps/powerpc/powerpc32/configure new file mode 100644 index 0000000000..9ebac38b57 --- /dev/null +++ b/sysdeps/powerpc/powerpc32/configure @@ -0,0 +1,33 @@ +# This file is generated from configure.in by Autoconf. DO NOT EDIT! + # Local configure fragment for sysdeps/powerpc/powerpc32. + +# See whether gas has R_PPC_REL16 relocs. +echo "$as_me:$LINENO: checking for R_PPC_REL16 gas support" >&5 +echo $ECHO_N "checking for R_PPC_REL16 gas support... $ECHO_C" >&6 +if test "${libc_cv_ppc_rel16+set}" = set; then + echo $ECHO_N "(cached) $ECHO_C" >&6 +else + cat > conftest.s <<\EOF + .text + addis 11,30,_GLOBAL_OFFSET_TABLE_-.@ha +EOF +if { ac_try='${CC-cc} -c $CFLAGS conftest.s 1>&5' + { (eval echo "$as_me:$LINENO: \"$ac_try\"") >&5 + (eval $ac_try) 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + libc_cv_ppc_rel16=yes +else + libc_cv_ppc_rel16=no +fi +rm -f conftest* +fi +echo "$as_me:$LINENO: result: $libc_cv_ppc_rel16" >&5 +echo "${ECHO_T}$libc_cv_ppc_rel16" >&6 +if test $libc_cv_ppc_rel16 = yes; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_ASM_PPC_REL16 1 +_ACEOF + +fi diff --git a/sysdeps/powerpc/powerpc32/configure.in b/sysdeps/powerpc/powerpc32/configure.in new file mode 100644 index 0000000000..6d2c41cb9a --- /dev/null +++ b/sysdeps/powerpc/powerpc32/configure.in @@ -0,0 +1,18 @@ +GLIBC_PROVIDES dnl See aclocal.m4 in the top level source directory. +# Local configure fragment for sysdeps/powerpc/powerpc32. + +# See whether gas has R_PPC_REL16 relocs. +AC_CACHE_CHECK(for R_PPC_REL16 gas support, libc_cv_ppc_rel16, [dnl +cat > conftest.s <<\EOF + .text + addis 11,30,_GLOBAL_OFFSET_TABLE_-.@ha +EOF +if AC_TRY_COMMAND(${CC-cc} -c $CFLAGS conftest.s 1>&AS_MESSAGE_LOG_FD); then + libc_cv_ppc_rel16=yes +else + libc_cv_ppc_rel16=no +fi +rm -f conftest*]) +if test $libc_cv_ppc_rel16 = yes; then + AC_DEFINE(HAVE_ASM_PPC_REL16) +fi diff --git a/sysdeps/powerpc/powerpc32/dl-dtprocnum.h b/sysdeps/powerpc/powerpc32/dl-dtprocnum.h new file mode 100644 index 0000000000..7fe2be7939 --- /dev/null +++ b/sysdeps/powerpc/powerpc32/dl-dtprocnum.h @@ -0,0 +1,3 @@ +/* Number of extra dynamic section entries for this architecture. By + default there are none. */ +#define DT_THISPROCNUM DT_PPC_NUM diff --git a/sysdeps/powerpc/powerpc32/dl-machine.h b/sysdeps/powerpc/powerpc32/dl-machine.h index 545c19b300..496fa71ecc 100644 --- a/sysdeps/powerpc/powerpc32/dl-machine.h +++ b/sysdeps/powerpc/powerpc32/dl-machine.h @@ -25,6 +25,10 @@ #include <assert.h> #include <dl-tls.h> +/* Translate a processor specific dynamic tag to the index + in l_info array. */ +#define DT_PPC(x) (DT_PPC_##x - DT_LOPROC + DT_NUM) + /* Return nonzero iff ELF header is compatible with the running host. */ static inline int elf_machine_matches_host (const Elf32_Ehdr *ehdr) @@ -32,24 +36,38 @@ elf_machine_matches_host (const Elf32_Ehdr *ehdr) return ehdr->e_machine == EM_PPC; } +/* Return the value of the GOT pointer. */ +static inline Elf32_Addr * __attribute__ ((const)) +ppc_got (void) +{ + Elf32_Addr *got; +#ifdef HAVE_ASM_PPC_REL16 + asm ("bcl 20,31,1f\n" + "1: mflr %0\n" + " addis %0,%0,_GLOBAL_OFFSET_TABLE_-1b@ha\n" + " addi %0,%0,_GLOBAL_OFFSET_TABLE_-1b@l\n" + : "=b" (got) : : "lr"); +#else + asm (" bl _GLOBAL_OFFSET_TABLE_-4@local" + : "=l" (got)); +#endif + return got; +} /* Return the link-time address of _DYNAMIC, stored as the first value in the GOT. */ -static inline Elf32_Addr +static inline Elf32_Addr __attribute__ ((const)) elf_machine_dynamic (void) { - Elf32_Addr *got; - asm (" bl _GLOBAL_OFFSET_TABLE_-4@local" - : "=l"(got)); - return *got; + return *ppc_got (); } /* Return the run-time load address of the shared object. */ -static inline Elf32_Addr +static inline Elf32_Addr __attribute__ ((const)) elf_machine_load_address (void) { - unsigned int *got; - unsigned int *branchaddr; + Elf32_Addr *branchaddr; + Elf32_Addr runtime_dynamic; /* This is much harder than you'd expect. Possibly I'm missing something. The 'obvious' way: @@ -80,19 +98,17 @@ elf_machine_load_address (void) the address ourselves. That gives us the following code: */ /* Get address of the 'b _DYNAMIC@local'... */ - asm ("bl 0f ;" + asm ("bcl 20,31,0f;" "b _DYNAMIC@local;" "0:" - : "=l"(branchaddr)); - - /* ... and the address of the GOT. */ - asm (" bl _GLOBAL_OFFSET_TABLE_-4@local" - : "=l"(got)); + : "=l" (branchaddr)); /* So now work out the difference between where the branch actually points, and the offset of that location in memory from the start of the file. */ - return ((Elf32_Addr)branchaddr - *got - + ((int)(*branchaddr << 6 & 0xffffff00) >> 6)); + runtime_dynamic = ((Elf32_Addr) branchaddr + + ((Elf32_Sword) (*branchaddr << 6 & 0xffffff00) >> 6)); + + return runtime_dynamic - elf_machine_dynamic (); } #define ELF_MACHINE_BEFORE_RTLD_RELOC(dynamic_info) /* nothing */ @@ -144,13 +160,69 @@ __elf_preferred_address(struct link_map *loader, size_t maplength, /* The PowerPC never uses REL relocations. */ #define ELF_MACHINE_NO_REL 1 -/* Set up the loaded object described by L so its unrelocated PLT +/* Set up the loaded object described by MAP so its unrelocated PLT entries will jump to the on-demand fixup code in dl-runtime.c. Also install a small trampoline to be used by entries that have been relocated to an address too far away for a single branch. */ extern int __elf_machine_runtime_setup (struct link_map *map, int lazy, int profile); -#define elf_machine_runtime_setup __elf_machine_runtime_setup + +static inline int +elf_machine_runtime_setup (struct link_map *map, + int lazy, int profile) +{ + if (map->l_info[DT_JMPREL] == 0) + return lazy; + + if (map->l_info[DT_PPC(GOT)] == 0) + /* Handle old style PLT. */ + return __elf_machine_runtime_setup (map, lazy, profile); + + /* New style non-exec PLT consisting of an array of addresses. */ + map->l_info[DT_PPC(GOT)]->d_un.d_ptr += map->l_addr; + if (lazy) + { + Elf32_Addr *plt, *got, glink; + Elf32_Word num_plt_entries; + void (*dlrr) (void); + extern void _dl_runtime_resolve (void); + extern void _dl_prof_resolve (void); + + if (__builtin_expect (!profile, 1)) + dlrr = _dl_runtime_resolve; + else + { + if (GLRO(dl_profile) != NULL + &&_dl_name_match_p (GLRO(dl_profile), map)) + GL(dl_profile_map) = map; + dlrr = _dl_prof_resolve; + } + got = (Elf32_Addr *) map->l_info[DT_PPC(GOT)]->d_un.d_ptr; + glink = got[1]; + got[1] = (Elf32_Addr) dlrr; + got[2] = (Elf32_Addr) map; + + /* Relocate everything in .plt by the load address offset. */ + plt = (Elf32_Addr *) D_PTR (map, l_info[DT_PLTGOT]); + num_plt_entries = (map->l_info[DT_PLTRELSZ]->d_un.d_val + / sizeof (Elf32_Rela)); + + /* If a library is prelinked but we have to relocate anyway, + we have to be able to undo the prelinking of .plt section. + The prelinker saved us at got[1] address of .glink + section's start. */ + if (glink) + { + glink += map->l_addr; + while (num_plt_entries-- != 0) + *plt++ = glink, glink += 4; + } + else + while (num_plt_entries-- != 0) + *plt++ += map->l_addr; + } + return lazy; +} /* Change the PLT entry whose reloc is 'reloc' to call the actual routine. */ extern Elf32_Addr __elf_machine_fixup_plt (struct link_map *map, @@ -163,7 +235,12 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t, const Elf32_Rela *reloc, Elf32_Addr *reloc_addr, Elf64_Addr finaladdr) { - return __elf_machine_fixup_plt (map, reloc, reloc_addr, finaladdr); + if (map->l_info[DT_PPC(GOT)] == 0) + /* Handle old style PLT. */ + return __elf_machine_fixup_plt (map, reloc, reloc_addr, finaladdr); + + *reloc_addr = finaladdr; + return finaladdr; } /* Return the final value of a plt relocation. */ @@ -286,11 +363,16 @@ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc, break; #endif /* USE_TLS etc. */ -#ifdef RESOLVE_CONFLICT_FIND_MAP case R_PPC_JMP_SLOT: +#ifdef RESOLVE_CONFLICT_FIND_MAP RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr); - /* FALLTHROUGH */ #endif + if (map->l_info[DT_PPC(GOT)] != 0) + { + *reloc_addr = value; + break; + } + /* FALLTHROUGH */ default: __process_machine_rela (map, reloc, sym_map, sym, refsym, diff --git a/sysdeps/powerpc/powerpc32/dl-start.S b/sysdeps/powerpc/powerpc32/dl-start.S index d72202d4a4..e1f7f6e24a 100644 --- a/sysdeps/powerpc/powerpc32/dl-start.S +++ b/sysdeps/powerpc/powerpc32/dl-start.S @@ -47,8 +47,15 @@ ENTRY(_dl_start_user) passed by value!). */ /* Put our GOT pointer in r31, */ +#ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r31 + addis r31,r31,_GLOBAL_OFFSET_TABLE_-1b@ha + addi r31,r31,_GLOBAL_OFFSET_TABLE_-1b@l +#else bl _GLOBAL_OFFSET_TABLE_-4@local mflr r31 +#endif /* the address of _start in r30, */ mr r30,r3 /* &_dl_argc in 29, &_dl_argv in 27, and _dl_loaded in 28. */ diff --git a/sysdeps/powerpc/powerpc32/elf/start.S b/sysdeps/powerpc/powerpc32/elf/start.S index 7827357a6c..bafd2ae001 100644 --- a/sysdeps/powerpc/powerpc32/elf/start.S +++ b/sysdeps/powerpc/powerpc32/elf/start.S @@ -52,7 +52,7 @@ L(start_addresses): ASM_SIZE_DIRECTIVE(L(start_addresses)) .section ".text" -#ifdef PIC +#if defined PIC && !defined HAVE_ASM_PPC_REL16 L(start_addressesp): .long L(start_addresses)-L(branch) #endif @@ -73,11 +73,19 @@ L(branch): mtlr r0 stw r0,0(r1) /* Set r13 to point at the 'small data area', and put the address of - start_addresses in r8... */ + start_addresses in r8. Also load the GOT pointer so that new PLT + calls work, like the one to __libc_start_main. */ #ifdef PIC +# ifdef HAVE_ASM_PPC_REL16 + addis r30,r13,_GLOBAL_OFFSET_TABLE_-L(branch)@ha + addis r8,r13,L(start_addresses)-L(branch)@ha + addi r30,r30,_GLOBAL_OFFSET_TABLE_-L(branch)@l + lwzu r13,L(start_addresses)-L(branch)@l(r8) +# else lwz r8,L(start_addressesp)-L(branch)(r13) add r8,r13,r8 lwz r13,0(r8) +# endif #else lis r8,L(start_addresses)@ha lwzu r13,L(start_addresses)@l(r8) diff --git a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S index 6dfe6a67d5..73cc8181f9 100644 --- a/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S +++ b/sysdeps/powerpc/powerpc32/fpu/__longjmp-common.S @@ -34,8 +34,15 @@ ENTRY (BP_SYM (__longjmp)) #ifndef __NO_VMX__ # ifdef PIC mflr r6 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r5 + addis r5,r5,_GLOBAL_OFFSET_TABLE_-1b@ha + addi r5,r5,_GLOBAL_OFFSET_TABLE_-1b@l +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r5 +# endif # ifdef SHARED lwz r5,_rtld_global_ro@got(r5) mtlr r6 diff --git a/sysdeps/powerpc/powerpc32/fpu/s_ceil.S b/sysdeps/powerpc/powerpc32/fpu/s_ceil.S index 7924e34648..13afba88f0 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_ceil.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_ceil.S @@ -29,11 +29,19 @@ ENTRY (__ceil) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S b/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S index 9315d8d2df..f8ca1de08c 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_ceilf.S @@ -20,7 +20,7 @@ #include <sysdep.h> .section .rodata.cst4,"aM",@progbits,4 - .align 2 + .align 2 .LC0: /* 2**23 */ .long 0x4b000000 @@ -29,11 +29,19 @@ ENTRY (__ceilf) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/s_floor.S b/sysdeps/powerpc/powerpc32/fpu/s_floor.S index c8f59c24a6..5dfe8f2d9a 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_floor.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_floor.S @@ -29,11 +29,19 @@ ENTRY (__floor) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/s_floorf.S b/sysdeps/powerpc/powerpc32/fpu/s_floorf.S index 8ee0644ac9..31b71ad229 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_floorf.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_floorf.S @@ -20,7 +20,7 @@ #include <sysdep.h> .section .rodata.cst4,"aM",@progbits,4 - .align 2 + .align 2 .LC0: /* 2**23 */ .long 0x4b000000 @@ -29,11 +29,19 @@ ENTRY (__floorf) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/s_lround.S b/sysdeps/powerpc/powerpc32/fpu/s_lround.S index 72fd49ba46..a85743164c 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_lround.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_lround.S @@ -41,9 +41,16 @@ ENTRY (__lround) #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + addi r9,r9,.LC0-1b@l +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) +# endif mtlr r11 lfs fp12,0(r9) #else diff --git a/sysdeps/powerpc/powerpc32/fpu/s_rint.S b/sysdeps/powerpc/powerpc32/fpu/s_rint.S index 4abdcedfe8..1cfcd78b5c 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_rint.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_rint.S @@ -31,11 +31,19 @@ ENTRY (__rint) #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/s_rintf.S b/sysdeps/powerpc/powerpc32/fpu/s_rintf.S index d02bd066b8..93c02667fe 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_rintf.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_rintf.S @@ -20,7 +20,7 @@ #include <sysdep.h> .section .rodata.cst4,"aM",@progbits,4 - .align 2 + .align 2 .LC0: /* 2**23 */ .long 0x4b000000 @@ -28,11 +28,19 @@ ENTRY (__rintf) #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/s_round.S b/sysdeps/powerpc/powerpc32/fpu/s_round.S index 96fc2984fd..53b45916d1 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_round.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_round.S @@ -41,9 +41,16 @@ ENTRY (__round) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + addi r9,r9,.LC0-1b@l +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) +# endif mtlr r11 lfs fp13,0(r9) #else diff --git a/sysdeps/powerpc/powerpc32/fpu/s_roundf.S b/sysdeps/powerpc/powerpc32/fpu/s_roundf.S index 87965dea80..39ba08655a 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_roundf.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_roundf.S @@ -41,9 +41,16 @@ ENTRY (__roundf ) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + addi r9,r9,.LC0-1b@l +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) +# endif mtlr r11 lfs fp13,0(r9) #else diff --git a/sysdeps/powerpc/powerpc32/fpu/s_trunc.S b/sysdeps/powerpc/powerpc32/fpu/s_trunc.S index 7a3e705a81..827e8cb940 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_trunc.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_trunc.S @@ -36,11 +36,19 @@ ENTRY (__trunc) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/s_truncf.S b/sysdeps/powerpc/powerpc32/fpu/s_truncf.S index 5275c69d29..55e7a74b41 100644 --- a/sysdeps/powerpc/powerpc32/fpu/s_truncf.S +++ b/sysdeps/powerpc/powerpc32/fpu/s_truncf.S @@ -20,7 +20,7 @@ #include <sysdep.h> .section .rodata.cst4,"aM",@progbits,4 - .align 2 + .align 2 .LC0: /* 2**23 */ .long 0x4b000000 @@ -36,11 +36,19 @@ ENTRY (__truncf) mffs fp11 /* Save current FPU rounding mode. */ #ifdef SHARED mflr r11 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r9 + addis r9,r9,.LC0-1b@ha + mtlr r11 + lfs fp13,.LC0-1b@l(r9) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r10 lwz r9,.LC0@got(10) mtlr r11 lfs fp13,0(r9) +# endif #else lis r9,.LC0@ha lfs fp13,.LC0@l(r9) diff --git a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S index be2cf4d69d..cf3f215f2d 100644 --- a/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S +++ b/sysdeps/powerpc/powerpc32/fpu/setjmp-common.S @@ -76,8 +76,15 @@ ENTRY (BP_SYM (__sigsetjmp)) #ifndef __NO_VMX__ # ifdef PIC mflr r6 +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr r5 + addis r5,r5,_GLOBAL_OFFSET_TABLE_-1b@ha + addi r5,r5,_GLOBAL_OFFSET_TABLE_-1b@l +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr r5 +# endif # ifdef SHARED lwz r5,_rtld_global_ro@got(r5) mtlr r6 diff --git a/sysdeps/powerpc/powerpc32/memset.S b/sysdeps/powerpc/powerpc32/memset.S index 4c0edc8e45..f09c294674 100644 --- a/sysdeps/powerpc/powerpc32/memset.S +++ b/sysdeps/powerpc/powerpc32/memset.S @@ -264,10 +264,17 @@ L(checklinesize): beq L(medium) /* Establishes GOT addressability so we can load __cache_line_size from static. This value was set from the aux vector during startup. */ +# ifdef HAVE_ASM_PPC_REL16 + bcl 20,31,1f +1: mflr rGOT + addis rGOT,rGOT,__cache_line_size-1b@ha + lwz rCLS,__cache_line_size-1b@l(rGOT) +# else bl _GLOBAL_OFFSET_TABLE_@local-4 mflr rGOT lwz rGOT,__cache_line_size@got(rGOT) lwz rCLS,0(rGOT) +# endif mtlr rTMP #else /* Load __cache_line_size from static. This value was set from the diff --git a/sysdeps/powerpc/powerpc32/ppc-mcount.S b/sysdeps/powerpc/powerpc32/ppc-mcount.S index 314c8ee703..c1a08d379c 100644 --- a/sysdeps/powerpc/powerpc32/ppc-mcount.S +++ b/sysdeps/powerpc/powerpc32/ppc-mcount.S @@ -1,5 +1,5 @@ /* PowerPC-specific implementation of profiling support. - Copyright (C) 1997, 1999 Free Software Foundation, Inc. + Copyright (C) 1997, 1999, 2005 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -24,25 +24,19 @@ #include <sysdep.h> -/* We do profiling as described in the SYSV ELF ABI, _mcount is called - with the address of a data word in r0 (that is different for every - routine, initialised to 0, and otherwise unused). The caller has put - the address the caller will return to in the usual place on the stack, - 4(r1). _mcount is responsible for ensuring that when it returns no - argument-passing registers are disturbed, and that the LR is set back - to (what the caller sees as) 4(r1). +/* We do profiling as described in the SYSV ELF ABI, except that glibc + _mcount manages its own counters. The caller has put the address the + caller will return to in the usual place on the stack, 4(r1). _mcount + is responsible for ensuring that when it returns no argument-passing + registers are disturbed, and that the LR is set back to (what the + caller sees as) 4(r1). This is intended so that the following code can be inserted at the front of any routine without changing the routine: .data - .align 2 - 0: .long 0 - .previous mflr r0 - lis r11,0b@ha stw r0,4(r1) - addi r0,r11,0b@l bl _mcount */ diff --git a/sysdeps/powerpc/powerpc32/sysdep.h b/sysdeps/powerpc/powerpc32/sysdep.h index 775073f325..552f595a10 100644 --- a/sysdeps/powerpc/powerpc32/sysdep.h +++ b/sysdeps/powerpc/powerpc32/sysdep.h @@ -29,31 +29,10 @@ /* The mcount code relies on a the return address being on the stack to locate our caller and so it can restore it; so store one just for its benefit. */ -# ifdef PIC -# define CALL_MCOUNT \ - .pushsection; \ - .section ".data"; \ - .align ALIGNARG(2); \ -0:.long 0; \ - .previous; \ - mflr r0; \ - stw r0,4(r1); \ - bl _GLOBAL_OFFSET_TABLE_@local-4; \ - mflr r11; \ - lwz r0,0b@got(r11); \ - bl JUMPTARGET(_mcount); -# else /* PIC */ -# define CALL_MCOUNT \ - .section ".data"; \ - .align ALIGNARG(2); \ -0:.long 0; \ - .previous; \ +# define CALL_MCOUNT \ mflr r0; \ - lis r11,0b@ha; \ stw r0,4(r1); \ - addi r0,r11,0b@l; \ bl JUMPTARGET(_mcount); -# endif /* PIC */ #else /* PROF */ # define CALL_MCOUNT /* Do nothing. */ #endif /* PROF */ |