aboutsummaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/alpha/dl-machine.h43
-rw-r--r--sysdeps/arm/bits/link.h4
-rw-r--r--sysdeps/arm/dl-machine.h90
-rw-r--r--sysdeps/generic/ldsodefs.h8
-rw-r--r--sysdeps/i386/bits/link.h5
-rw-r--r--sysdeps/i386/dl-machine.h82
-rw-r--r--sysdeps/powerpc/dl-machine.h6
-rw-r--r--sysdeps/s390/s390-32/bits/link.h5
-rw-r--r--sysdeps/s390/s390-32/dl-machine.h17
-rw-r--r--sysdeps/s390/s390-64/bits/link.h5
-rw-r--r--sysdeps/s390/s390-64/dl-machine.h17
-rw-r--r--sysdeps/sh/bits/link.h5
-rw-r--r--sysdeps/sh/dl-machine.h17
-rw-r--r--sysdeps/sparc/sparc32/dl-machine.h99
-rw-r--r--sysdeps/sparc/sparc64/dl-machine.h66
-rw-r--r--sysdeps/x86_64/bits/link.h5
-rw-r--r--sysdeps/x86_64/dl-machine.h17
17 files changed, 457 insertions, 34 deletions
diff --git a/sysdeps/alpha/dl-machine.h b/sysdeps/alpha/dl-machine.h
index a039f245db..c93da661bf 100644
--- a/sysdeps/alpha/dl-machine.h
+++ b/sysdeps/alpha/dl-machine.h
@@ -122,8 +122,30 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
*(Elf64_Addr *)(plt + 24) = (Elf64_Addr) l;
/* If the first instruction of the plt entry is not
- "br $28, plt0", we cannot do lazy relocation. */
- lazy = (*(unsigned int *)(plt + 32) == 0xc39ffff7);
+ "br $28, plt0", we have to reinitialize .plt for lazy relocation. */
+ if (*(unsigned int *)(plt + 32) != 0xc39ffff7)
+ {
+ unsigned int val = 0xc39ffff7;
+ unsigned int *slot, *end;
+ const Elf64_Rela *rela = D_PTR (l, l_info[DT_JMPREL]);
+ Elf64_Addr l_addr = l->l_addr;
+
+ /* br t12,.+4; ldq t12,12(t12); nop; jmp t12,(t12),.+4 */
+ *(unsigned long *)plt = 0xa77b000cc3600000;
+ *(unsigned long *)(plt + 8) = 0x6b7b000047ff041f;
+ slot = (unsigned int *)(plt + 32);
+ end = (unsigned int *)(plt + 32
+ + l->l_info[DT_PLTRELSZ]->d_un.d_val / 2);
+ while (slot < end)
+ {
+ /* br at,.plt+0 */
+ *slot = val;
+ *(Elf64_Addr *) rela->r_offset = (Elf64_Addr) slot - l_addr;
+ val -= 3;
+ slot += 3;
+ ++rela;
+ }
+ }
}
return lazy;
@@ -520,8 +542,23 @@ elf_machine_rela (struct link_map *map,
if (r_type == R_ALPHA_GLOB_DAT)
*reloc_addr = sym_value;
- else if (r_type == R_ALPHA_JMP_SLOT)
+#ifdef RESOLVE_CONFLICT_FIND_MAP
+ /* In .gnu.conflict section, R_ALPHA_JMP_SLOT relocations have
+ R_ALPHA_JMP_SLOT in lower 8 bits and the remaining 24 bits
+ are .rela.plt index. */
+ else if ((r_type & 0xff) == R_ALPHA_JMP_SLOT)
+ {
+ /* elf_machine_fixup_plt needs the map reloc_addr points into,
+ while in _dl_resolve_conflicts map is _dl_loaded. */
+ RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr);
+ reloc = ((const Elf64_Rela *) D_PTR (map, l_info[DT_JMPREL]))
+ + (r_type >> 8);
+ elf_machine_fixup_plt (map, 0, reloc, reloc_addr, sym_value);
+ }
+#else
+ else if (r_type == R_ALPHA_JMP_SLOT)
elf_machine_fixup_plt (map, 0, reloc, reloc_addr, sym_value);
+#endif
#ifndef RTLD_BOOTSTRAP
else if (r_type == R_ALPHA_REFQUAD)
{
diff --git a/sysdeps/arm/bits/link.h b/sysdeps/arm/bits/link.h
new file mode 100644
index 0000000000..648976d7d2
--- /dev/null
+++ b/sysdeps/arm/bits/link.h
@@ -0,0 +1,4 @@
+struct link_map_machine
+ {
+ Elf32_Addr plt; /* Address of .plt */
+ };
diff --git a/sysdeps/arm/dl-machine.h b/sysdeps/arm/dl-machine.h
index 2d802b7e9b..cda424757b 100644
--- a/sysdeps/arm/dl-machine.h
+++ b/sysdeps/arm/dl-machine.h
@@ -92,6 +92,11 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
index into the .got section, load ip with &_GLOBAL_OFFSET_TABLE_[3],
and then jump to _GLOBAL_OFFSET_TABLE[2]. */
got = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .got.plt.
+ The prelinker saved us here address of .plt. */
+ if (got[1])
+ l->l_mach.plt = got[1] + l->l_addr;
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -334,8 +339,9 @@ _dl_start_user:
/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
#define ELF_MACHINE_JMP_SLOT R_ARM_JUMP_SLOT
-/* The ARM never uses Elf32_Rela relocations. */
-#define ELF_MACHINE_NO_RELA 1
+/* ARM never uses Elf32_Rela relocations for the dynamic linker.
+ Prelinked libraries may use Elf32_Rela though. */
+#define ELF_MACHINE_PLT_REL 1
/* We define an initialization functions. This is called very early in
_dl_sysdep_start. */
@@ -371,6 +377,12 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rel *reloc,
#ifdef RESOLVE
+/* ARM never uses Elf32_Rela relocations for the dynamic linker.
+ Prelinked libraries may use Elf32_Rela though. */
+#ifdef RTLD_BOOTSTRAP
+#define ELF_MACHINE_NO_RELA 1
+#endif
+
extern char **_dl_argv;
/* Deal with an out-of-range PC24 reloc. */
@@ -517,6 +529,64 @@ elf_machine_rel (struct link_map *map, const Elf32_Rel *reloc,
}
}
+#ifndef RTLD_BOOTSTRAP
+static inline void
+elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
+ const Elf32_Sym *sym, const struct r_found_version *version,
+ Elf32_Addr *const reloc_addr)
+{
+ const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
+
+ if (__builtin_expect (r_type == R_ARM_RELATIVE, 0))
+ *reloc_addr = map->l_addr + reloc->r_addend;
+#ifndef RTLD_BOOTSTRAP
+ else if (__builtin_expect (r_type == R_ARM_NONE, 0))
+ return;
+#endif
+ else
+ {
+ const Elf32_Sym *const refsym = sym;
+ Elf32_Addr value = RESOLVE (&sym, version, r_type);
+ if (sym)
+ value += sym->st_value;
+
+ switch (r_type)
+ {
+ case R_ARM_GLOB_DAT:
+ case R_ARM_JUMP_SLOT:
+ case R_ARM_ABS32:
+ *reloc_addr = value + reloc->r_addend;
+ break;
+ case R_ARM_PC24:
+ {
+ Elf32_Addr newvalue, topbits;
+
+ newvalue = value + reloc->r_addend - (Elf32_Addr)reloc_addr;
+ topbits = newvalue & 0xfe000000;
+ if (topbits != 0xfe000000 && topbits != 0x00000000)
+ {
+ newvalue = fix_bad_pc24(reloc_addr, value)
+ - (Elf32_Addr)reloc_addr + (addend << 2);
+ topbits = newvalue & 0xfe000000;
+ if (topbits != 0xfe000000 && topbits != 0x00000000)
+ {
+ _dl_signal_error (0, map->l_name, NULL,
+ "R_ARM_PC24 relocation out of range");
+ }
+ }
+ newvalue >>= 2;
+ value = (*reloc_addr & 0xff000000) | (newvalue & 0x00ffffff);
+ *reloc_addr = value;
+ }
+ break;
+ default:
+ _dl_reloc_bad_type (map, r_type, 0);
+ break;
+ }
+ }
+}
+#endif
+
static inline void
elf_machine_rel_relative (Elf32_Addr l_addr, const Elf32_Rel *reloc,
Elf32_Addr *const reloc_addr)
@@ -524,6 +594,15 @@ elf_machine_rel_relative (Elf32_Addr l_addr, const Elf32_Rel *reloc,
*reloc_addr += l_addr;
}
+#ifndef RTLD_BOOTSTRAP
+static inline void
+elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
+ Elf32_Addr *const reloc_addr)
+{
+ *reloc_addr = l_addr + reloc->r_addend;
+}
+#endif
+
static inline void
elf_machine_lazy_rel (struct link_map *map,
Elf32_Addr l_addr, const Elf32_Rel *reloc)
@@ -532,7 +611,12 @@ elf_machine_lazy_rel (struct link_map *map,
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
if (__builtin_expect (r_type == R_ARM_JUMP_SLOT, 1))
- *reloc_addr += l_addr;
+ {
+ if (__builtin_expect (map->l_mach.plt, 0) == 0)
+ *reloc_addr += l_addr;
+ else
+ *reloc_addr = map->l_mach.plt;
+ }
else
_dl_reloc_bad_type (map, r_type, 1);
}
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index 8c2f160160..837d32d0ca 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -207,6 +207,8 @@ extern const char *_dl_profile;
extern struct link_map *_dl_profile_map;
/* Filename of the output file. */
extern const char *_dl_profile_output;
+/* Map of shared object to be prelink traced. */
+extern struct link_map *_dl_trace_prelink_map;
/* If nonzero the appropriate debug information is printed. */
extern int _dl_debug_mask;
@@ -220,6 +222,7 @@ extern int _dl_debug_mask;
#define DL_DEBUG_STATISTICS (1 << 7)
/* This one is used only internally. */
#define DL_DEBUG_HELP (1 << 8)
+#define DL_DEBUG_PRELINK (1 << 9)
/* Expect cache ID. */
extern int _dl_correct_cache_id;
@@ -435,6 +438,11 @@ extern void _dl_reloc_bad_type (struct link_map *map,
unsigned int type, int plt)
internal_function __attribute__ ((__noreturn__));
+/* Resolve conflicts if prelinking. */
+extern void _dl_resolve_conflicts (struct link_map *l,
+ ElfW(Rela) *conflict,
+ ElfW(Rela) *conflictend);
+
/* Check the version dependencies of all objects available through
MAP. If VERBOSE print some more diagnostics. */
extern int _dl_check_all_versions (struct link_map *map, int verbose,
diff --git a/sysdeps/i386/bits/link.h b/sysdeps/i386/bits/link.h
new file mode 100644
index 0000000000..3be9b7eae8
--- /dev/null
+++ b/sysdeps/i386/bits/link.h
@@ -0,0 +1,5 @@
+struct link_map_machine
+ {
+ Elf32_Addr plt; /* Address of .plt + 0x16 */
+ Elf32_Addr gotplt; /* Address of .got + 0x0c */
+ };
diff --git a/sysdeps/i386/dl-machine.h b/sysdeps/i386/dl-machine.h
index 2f7f96d487..b86f11724b 100644
--- a/sysdeps/i386/dl-machine.h
+++ b/sysdeps/i386/dl-machine.h
@@ -87,6 +87,14 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
and then jump to _GLOBAL_OFFSET_TABLE[2]. */
got = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .got.plt.
+ The prelinker saved us here address of .plt + 0x16. */
+ if (got[1])
+ {
+ l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.gotplt = (Elf32_Addr) &got[3];
+ }
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -258,8 +266,9 @@ _dl_start_user:\n\
/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
#define ELF_MACHINE_JMP_SLOT R_386_JMP_SLOT
-/* The i386 never uses Elf32_Rela relocations. */
-#define ELF_MACHINE_NO_RELA 1
+/* The i386 never uses Elf32_Rela relocations for the dynamic linker.
+ Prelinked libraries may use Elf32_Rela though. */
+#define ELF_MACHINE_PLT_REL 1
/* We define an initialization functions. This is called very early in
_dl_sysdep_start. */
@@ -295,6 +304,12 @@ elf_machine_plt_value (struct link_map *map, const Elf32_Rel *reloc,
#ifdef RESOLVE
+/* The i386 never uses Elf32_Rela relocations for the dynamic linker.
+ Prelinked libraries may use Elf32_Rela though. */
+#ifdef RTLD_BOOTSTRAP
+#define ELF_MACHINE_NO_RELA 1
+#endif
+
/* Perform the relocation specified by RELOC and SYM (which is fully resolved).
MAP is the object containing the reloc. */
@@ -378,6 +393,41 @@ elf_machine_rel (struct link_map *map, const Elf32_Rel *reloc,
}
}
+#ifndef RTLD_BOOTSTRAP
+static inline void
+elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
+ const Elf32_Sym *sym, const struct r_found_version *version,
+ Elf32_Addr *const reloc_addr)
+{
+ if (ELF32_R_TYPE (reloc->r_info) == R_386_RELATIVE)
+ *reloc_addr = map->l_addr + reloc->r_addend;
+ else if (ELF32_R_TYPE (reloc->r_info) != R_386_NONE)
+ {
+/* const Elf32_Sym *const refsym = sym; */
+ Elf32_Addr value = RESOLVE (&sym, version, ELF32_R_TYPE (reloc->r_info));
+ if (sym)
+ value += sym->st_value;
+
+ switch (ELF32_R_TYPE (reloc->r_info))
+ {
+ case R_386_GLOB_DAT:
+ case R_386_JMP_SLOT:
+ case R_386_32:
+ *reloc_addr = value + reloc->r_addend;
+ break;
+ case R_386_PC32:
+ *reloc_addr = (value + reloc->r_addend - (Elf32_Addr) reloc_addr);
+ break;
+ default:
+ /* We add these checks in the version to relocate ld.so only
+ if we are still debugging. */
+ _dl_reloc_bad_type (map, ELFW(R_TYPE) (reloc->r_info), 0);
+ break;
+ }
+ }
+}
+#endif
+
static inline void
elf_machine_rel_relative (Elf32_Addr l_addr, const Elf32_Rel *reloc,
Elf32_Addr *const reloc_addr)
@@ -386,6 +436,15 @@ elf_machine_rel_relative (Elf32_Addr l_addr, const Elf32_Rel *reloc,
*reloc_addr += l_addr;
}
+#ifndef RTLD_BOOTSTRAP
+static inline void
+elf_machine_rela_relative (Elf32_Addr l_addr, const Elf32_Rela *reloc,
+ Elf32_Addr *const reloc_addr)
+{
+ *reloc_addr = l_addr + reloc->r_addend;
+}
+#endif
+
static inline void
elf_machine_lazy_rel (struct link_map *map,
Elf32_Addr l_addr, const Elf32_Rel *reloc)
@@ -394,9 +453,26 @@ elf_machine_lazy_rel (struct link_map *map,
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
if (__builtin_expect (r_type == R_386_JMP_SLOT, 1))
- *reloc_addr += l_addr;
+ {
+ if (__builtin_expect (map->l_mach.plt, 0) == 0)
+ *reloc_addr += l_addr;
+ else
+ *reloc_addr =
+ map->l_mach.plt
+ + (((Elf32_Addr) reloc_addr) - map->l_mach.gotplt) * 4;
+ }
else
_dl_reloc_bad_type (map, r_type, 1);
}
+#ifndef RTLD_BOOTSTRAP
+
+static inline void
+elf_machine_lazy_rela (struct link_map *map,
+ Elf32_Addr l_addr, const Elf32_Rela *reloc)
+{
+}
+
+#endif
+
#endif /* RESOLVE */
diff --git a/sysdeps/powerpc/dl-machine.h b/sysdeps/powerpc/dl-machine.h
index e8b5446875..35b7e55e99 100644
--- a/sysdeps/powerpc/dl-machine.h
+++ b/sysdeps/powerpc/dl-machine.h
@@ -347,6 +347,7 @@ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
Elf32_Word loadbase, finaladdr;
const int rinfo = ELF32_R_TYPE (reloc->r_info);
+#ifndef RESOLVE_CONFLICT_FIND_MAP
if (__builtin_expect (rinfo == R_PPC_NONE, 0))
return;
@@ -375,6 +376,11 @@ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
finaladdr = (loadbase + (Elf32_Word) (char *) sym->st_value
+ reloc->r_addend);
}
+#else
+ finaladdr = reloc->r_addend;
+ if (rinfo == R_PPC_JMP_SLOT)
+ RESOLVE_CONFLICT_FIND_MAP (map, reloc_addr);
+#endif
/* A small amount of code is duplicated here for speed. In libc,
more than 90% of the relocs are R_PPC_RELATIVE; in the X11 shared
diff --git a/sysdeps/s390/s390-32/bits/link.h b/sysdeps/s390/s390-32/bits/link.h
new file mode 100644
index 0000000000..962cf56851
--- /dev/null
+++ b/sysdeps/s390/s390-32/bits/link.h
@@ -0,0 +1,5 @@
+struct link_map_machine
+ {
+ Elf32_Addr plt; /* Address of .plt + 0x2c */
+ Elf32_Addr gotplt; /* Address of .got + 0x0c */
+ };
diff --git a/sysdeps/s390/s390-32/dl-machine.h b/sysdeps/s390/s390-32/dl-machine.h
index f72651fba0..fc80877428 100644
--- a/sysdeps/s390/s390-32/dl-machine.h
+++ b/sysdeps/s390/s390-32/dl-machine.h
@@ -92,6 +92,14 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
and then jump to _GLOBAL_OFFSET_TABLE[2]. */
Elf32_Addr *got;
got = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .got.plt.
+ The prelinker saved us here address of .plt + 0x2c. */
+ if (got[1])
+ {
+ l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.gotplt = (Elf32_Addr) &got[3];
+ }
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -454,7 +462,14 @@ elf_machine_lazy_rel (struct link_map *map,
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
if (__builtin_expect (r_type == R_390_JMP_SLOT, 1))
- *reloc_addr += l_addr;
+ {
+ if (__builtin_expect (map->l_mach.plt, 0) == 0)
+ *reloc_addr += l_addr;
+ else
+ *reloc_addr =
+ map->l_mach.plt
+ + (((Elf32_Addr) reloc_addr) - map->l_mach.gotplt) * 8;
+ }
else
_dl_reloc_bad_type (map, r_type, 1);
}
diff --git a/sysdeps/s390/s390-64/bits/link.h b/sysdeps/s390/s390-64/bits/link.h
new file mode 100644
index 0000000000..34add4ffaa
--- /dev/null
+++ b/sysdeps/s390/s390-64/bits/link.h
@@ -0,0 +1,5 @@
+struct link_map_machine
+ {
+ Elf64_Addr plt; /* Address of .plt + 0x2e */
+ Elf64_Addr gotplt; /* Address of .got + 0x18 */
+ };
diff --git a/sysdeps/s390/s390-64/dl-machine.h b/sysdeps/s390/s390-64/dl-machine.h
index e77017ab1a..4d4c344ea0 100644
--- a/sysdeps/s390/s390-64/dl-machine.h
+++ b/sysdeps/s390/s390-64/dl-machine.h
@@ -85,6 +85,14 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
and then jump to _GLOBAL_OFFSET_TABLE[2]. */
Elf64_Addr *got;
got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .got.plt.
+ The prelinker saved us here address of .plt + 0x2e. */
+ if (got[1])
+ {
+ l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.gotplt = (Elf64_Addr) &got[3];
+ }
got[1] = (Elf64_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -434,7 +442,14 @@ elf_machine_lazy_rel (struct link_map *map,
const unsigned int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
if (__builtin_expect (r_type == R_390_JMP_SLOT, 1))
- *reloc_addr += l_addr;
+ {
+ if (__builtin_expect (map->l_mach.plt, 0) == 0)
+ *reloc_addr += l_addr;
+ else
+ *reloc_addr =
+ map->l_mach.plt
+ + (((Elf64_Addr) reloc_addr) - map->l_mach.gotplt) * 4;
+ }
else
_dl_reloc_bad_type (map, r_type, 1);
}
diff --git a/sysdeps/sh/bits/link.h b/sysdeps/sh/bits/link.h
new file mode 100644
index 0000000000..bb2fbb5f16
--- /dev/null
+++ b/sysdeps/sh/bits/link.h
@@ -0,0 +1,5 @@
+struct link_map_machine
+ {
+ Elf32_Addr plt; /* Address of .plt + 36 */
+ Elf32_Addr gotplt; /* Address of .got + 0x0c */
+ };
diff --git a/sysdeps/sh/dl-machine.h b/sysdeps/sh/dl-machine.h
index dc53c652d0..b303756e44 100644
--- a/sysdeps/sh/dl-machine.h
+++ b/sysdeps/sh/dl-machine.h
@@ -85,6 +85,14 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
offset into the .rela.plt section and _GLOBAL_OFFSET_TABLE_[1],
and then jump to _GLOBAL_OFFSET_TABLE[2]. */
got = (Elf32_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .got.plt.
+ The prelinker saved us here address of .plt + 36. */
+ if (got[1])
+ {
+ l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.gotplt = (Elf32_Addr) &got[3];
+ }
got[1] = (Elf32_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -582,7 +590,14 @@ elf_machine_lazy_rel (struct link_map *map,
Elf32_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
/* Check for unexpected PLT reloc type. */
if (ELF32_R_TYPE (reloc->r_info) == R_SH_JMP_SLOT)
- *reloc_addr += l_addr;
+ {
+ if (__builtin_expect (map->l_mach.plt, 0) == 0)
+ *reloc_addr += l_addr;
+ else
+ *reloc_addr =
+ map->l_mach.plt
+ + (((Elf32_Addr) reloc_addr) - map->l_mach.gotplt) * 7;
+ }
else
_dl_reloc_bad_type (map, ELF32_R_TYPE (reloc->r_info), 1);
}
diff --git a/sysdeps/sparc/sparc32/dl-machine.h b/sysdeps/sparc/sparc32/dl-machine.h
index d98848b5dd..19a3897edb 100644
--- a/sysdeps/sparc/sparc32/dl-machine.h
+++ b/sysdeps/sparc/sparc32/dl-machine.h
@@ -23,6 +23,10 @@
#include <sys/param.h>
#include <ldsodefs.h>
+#ifndef VALIDX
+# define VALIDX(tag) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
+ + DT_EXTRANUM + DT_VALTAGIDX (tag))
+#endif
/* Some SPARC opcodes we need to use for self-modifying code. */
#define OPCODE_NOP 0x01000000 /* nop */
@@ -30,6 +34,7 @@
#define OPCODE_SETHI_G1 0x03000000 /* sethi ?, %g1; add value>>10 */
#define OPCODE_JMP_G1 0x81c06000 /* jmp %g1+?; add lo 10 bits of value */
#define OPCODE_SAVE_SP 0x9de3bfa8 /* save %sp, -(16+6)*4, %sp */
+#define OPCODE_BA 0x30800000 /* b,a ?; add PC-rel word address */
/* Protect some broken versions of gcc from misinterpreting weak addresses. */
#define WEAKADDR(x) ({ __typeof(x) *_px = &x; \
@@ -139,6 +144,37 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
plt[1] = OPCODE_CALL | ((rfunc - (Elf32_Addr) &plt[1]) >> 2);
plt[2] = OPCODE_NOP; /* Fill call delay slot. */
plt[3] = (Elf32_Addr) l;
+ if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
+ || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
+ {
+ /* Need to reinitialize .plt to undo prelinking. */
+ unsigned long *hwcap;
+ int do_flush;
+ Elf32_Rela *rela = (Elf32_Rela *) D_PTR (l, l_info[DT_JMPREL]);
+ Elf32_Rela *relaend
+ = (Elf32_Rela *) ((char *) rela
+ + l->l_info[DT_PLTRELSZ]->d_un.d_val);
+ weak_extern (_dl_hwcap);
+ hwcap = WEAKADDR(_dl_hwcap);
+ do_flush = (!hwcap || (*hwcap & HWCAP_SPARC_FLUSH));
+
+ /* prelink must ensure there are no R_SPARC_NONE relocs left
+ in .rela.plt. */
+ while (rela < relaend)
+ {
+ *(unsigned int *) rela->r_offset
+ = OPCODE_SETHI_G1 | (rela->r_offset - (Elf32_Addr) plt);
+ *(unsigned int *) (rela->r_offset + 4)
+ = OPCODE_BA | ((((Elf32_Addr) plt
+ - rela->r_offset - 4) >> 2) & 0x3fffff);
+ if (do_flush)
+ {
+ __asm __volatile ("flush %0" : : "r"(rela->r_offset));
+ __asm __volatile ("flush %0+4" : : "r"(rela->r_offset));
+ }
+ ++rela;
+ }
+ }
}
return lazy;
@@ -292,10 +328,10 @@ _dl_start_user:
.previous");
static inline Elf32_Addr
-elf_machine_fixup_plt (struct link_map *map, lookup_t t,
- const Elf32_Rela *reloc,
- Elf32_Addr *reloc_addr, Elf32_Addr value)
+sparc_fixup_plt (const Elf32_Rela *reloc, Elf32_Addr *reloc_addr,
+ Elf32_Addr value, int t)
{
+ Elf32_Sword disp = value - (Elf32_Addr) reloc_addr;
#ifndef RTLD_BOOTSTRAP
/* Note that we don't mask the hwcap here, as the flush is essential to
functionality on those cpu's that implement it. */
@@ -309,23 +345,44 @@ elf_machine_fixup_plt (struct link_map *map, lookup_t t,
ld.so will not execute corrupt PLT entry instructions. */
const int do_flush = 1;
#endif
+
+ if (0 && disp >= -0x800000 && disp < 0x800000)
+ {
+ /* Don't need to worry about thread safety. We're writing just one
+ instruction. */
- /* For thread safety, write the instructions from the bottom and
- flush before we overwrite the critical "b,a". This of course
- need not be done during bootstrapping, since there are no threads.
- But we also can't tell if we _can_ use flush, so don't. */
-
- reloc_addr[2] = OPCODE_JMP_G1 | (value & 0x3ff);
- if (do_flush)
- __asm __volatile ("flush %0+8" : : "r"(reloc_addr));
-
- reloc_addr[1] = OPCODE_SETHI_G1 | (value >> 10);
- if (do_flush)
- __asm __volatile ("flush %0+4" : : "r"(reloc_addr));
+ reloc_addr[0] = OPCODE_BA | ((disp >> 2) & 0x3fffff);
+ if (do_flush)
+ __asm __volatile ("flush %0" : : "r"(reloc_addr));
+ }
+ else
+ {
+ /* For thread safety, write the instructions from the bottom and
+ flush before we overwrite the critical "b,a". This of course
+ need not be done during bootstrapping, since there are no threads.
+ But we also can't tell if we _can_ use flush, so don't. */
+
+ reloc_addr += t;
+ reloc_addr[1] = OPCODE_JMP_G1 | (value & 0x3ff);
+ if (do_flush)
+ __asm __volatile ("flush %0+4" : : "r"(reloc_addr));
+
+ reloc_addr[0] = OPCODE_SETHI_G1 | (value >> 10);
+ if (do_flush)
+ __asm __volatile ("flush %0" : : "r"(reloc_addr));
+ }
return value;
}
+static inline Elf32_Addr
+elf_machine_fixup_plt (struct link_map *map, lookup_t t,
+ const Elf32_Rela *reloc,
+ Elf32_Addr *reloc_addr, Elf32_Addr value)
+{
+ return sparc_fixup_plt (reloc, reloc_addr, value, 1);
+}
+
/* Return the final value of a plt relocation. */
static inline Elf32_Addr
elf_machine_plt_value (struct link_map *map, const Elf32_Rela *reloc,
@@ -366,10 +423,11 @@ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
else
#endif
{
-#ifndef RTLD_BOOTSTRAP
+#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
const Elf32_Sym *const refsym = sym;
#endif
Elf32_Addr value;
+#ifndef RESOLVE_CONFLICT_FIND_MAP
if (sym->st_shndx != SHN_UNDEF &&
ELF32_ST_BIND (sym->st_info) == STB_LOCAL)
value = map->l_addr;
@@ -379,11 +437,14 @@ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
if (sym)
value += sym->st_value;
}
+#else
+ value = 0;
+#endif
value += reloc->r_addend; /* Assume copy relocs have zero addend. */
switch (r_type)
{
-#ifndef RTLD_BOOTSTRAP
+#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
case R_SPARC_COPY:
if (sym == NULL)
/* This can happen in trace mode if an object could not be
@@ -410,7 +471,9 @@ elf_machine_rela (struct link_map *map, const Elf32_Rela *reloc,
*reloc_addr = value;
break;
case R_SPARC_JMP_SLOT:
- elf_machine_fixup_plt(map, 0, reloc, reloc_addr, value);
+ /* At this point we don't need to bother with thread safety,
+ so we can optimize the first instruction of .plt out. */
+ sparc_fixup_plt (reloc, reloc_addr, value, 0);
break;
#ifndef RTLD_BOOTSTRAP
case R_SPARC_8:
diff --git a/sysdeps/sparc/sparc64/dl-machine.h b/sysdeps/sparc/sparc64/dl-machine.h
index 9d2f2187ae..913f98a5e1 100644
--- a/sysdeps/sparc/sparc64/dl-machine.h
+++ b/sysdeps/sparc/sparc64/dl-machine.h
@@ -24,6 +24,11 @@
#include <ldsodefs.h>
#include <sysdep.h>
+#ifndef VALIDX
+# define VALIDX(tag) (DT_NUM + DT_THISPROCNUM + DT_VERSIONTAGNUM \
+ + DT_EXTRANUM + DT_VALTAGIDX (tag))
+#endif
+
#define ELF64_R_TYPE_ID(info) ((info) & 0xff)
#define ELF64_R_TYPE_DATA(info) ((info) >> 8)
@@ -147,7 +152,7 @@ sparc64_fixup_plt (struct link_map *map, const Elf64_Rela *reloc,
insns[1] = 0x40000000 | (displacement >> 2);
__asm __volatile ("flush %0 + 4" : : "r" (insns));
- insns[t] = 0x8210000f;
+ insns[0] = 0x8210000f;
__asm __volatile ("flush %0" : : "r" (insns));
}
/* Worst case, ho hum... */
@@ -251,10 +256,11 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
else
#endif
{
-#ifndef RTLD_BOOTSTRAP
+#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
const Elf64_Sym *const refsym = sym;
#endif
Elf64_Addr value;
+#ifndef RESOLVE_CONFLICT_FIND_MAP
if (sym->st_shndx != SHN_UNDEF &&
ELF64_ST_BIND (sym->st_info) == STB_LOCAL)
value = map->l_addr;
@@ -264,11 +270,14 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
if (sym)
value += sym->st_value;
}
+#else
+ value = 0;
+#endif
value += reloc->r_addend; /* Assume copy relocs have zero addend. */
switch (r_type)
{
-#ifndef RTLD_BOOTSTRAP
+#if !defined RTLD_BOOTSTRAP && !defined RESOLVE_CONFLICT_FIND_MAP
case R_SPARC_COPY:
if (sym == NULL)
/* This can happen in trace mode if an object could not be
@@ -371,8 +380,18 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
break;
#endif
case R_SPARC_JMP_SLOT:
+#ifdef RESOLVE_CONFLICT_FIND_MAP
+ /* R_SPARC_JMP_SLOT conflicts against .plt[32768+]
+ relocs should be turned into R_SPARC_64 relocs
+ in .gnu.conflict section.
+ r_addend non-zero does not mean it is a .plt[32768+]
+ reloc, instead it is the actual address of the function
+ to call. */
+ sparc64_fixup_plt (NULL, reloc, reloc_addr, value, 0, 0);
+#else
sparc64_fixup_plt (map, reloc, reloc_addr, value,
reloc->r_addend, 0);
+#endif
break;
#ifndef RTLD_BOOTSTRAP
case R_SPARC_UA16:
@@ -536,6 +555,47 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
/* Now put the magic cookie at the beginning of .PLT2
Entry .PLT3 is unused by this implementation. */
*((struct link_map **)(&plt[16 + 0])) = l;
+
+ if (__builtin_expect (l->l_info[VALIDX(DT_GNU_PRELINKED)] != NULL, 0)
+ || __builtin_expect (l->l_info [VALIDX (DT_GNU_LIBLISTSZ)] != NULL, 0))
+ {
+ /* Need to reinitialize .plt to undo prelinking. */
+ Elf64_Rela *rela = (Elf64_Rela *) D_PTR (l, l_info[DT_JMPREL]);
+ Elf64_Rela *relaend
+ = (Elf64_Rela *) ((char *) rela
+ + l->l_info[DT_PLTRELSZ]->d_un.d_val);
+
+ /* prelink must ensure there are no R_SPARC_NONE relocs left
+ in .rela.plt. */
+ while (rela < relaend)
+ {
+ if (__builtin_expect (rela->r_addend, 0) != 0)
+ {
+ Elf64_Addr slot = ((rela->r_offset + 0x400
+ - (Elf64_Addr) plt)
+ / 0x1400) * 0x1400
+ + (Elf64_Addr) plt - 0x400;
+ /* ldx [%o7 + X], %g1 */
+ unsigned int first_ldx = *(unsigned int *)(slot + 12);
+ Elf64_Addr ptr = slot + (first_ldx & 0xfff) + 4;
+
+ *(Elf64_Addr *) rela->r_offset
+ = (Elf64_Addr) plt
+ - (slot + ((rela->r_offset - ptr) / 8) * 24 + 4);
+ ++rela;
+ continue;
+ }
+
+ *(unsigned int *) rela->r_offset
+ = 0x03000000 | (rela->r_offset - (Elf64_Addr) plt);
+ *(unsigned int *) (rela->r_offset + 4)
+ = 0x30680000 | ((((Elf64_Addr) plt + 32
+ - rela->r_offset - 4) >> 2) & 0x7ffff);
+ __asm __volatile ("flush %0" : : "r" (rela->r_offset));
+ __asm __volatile ("flush %0+4" : : "r" (rela->r_offset));
+ ++rela;
+ }
+ }
}
return lazy;
diff --git a/sysdeps/x86_64/bits/link.h b/sysdeps/x86_64/bits/link.h
new file mode 100644
index 0000000000..21c294a73c
--- /dev/null
+++ b/sysdeps/x86_64/bits/link.h
@@ -0,0 +1,5 @@
+struct link_map_machine
+ {
+ Elf64_Addr plt; /* Address of .plt + 0x16 */
+ Elf64_Addr gotplt; /* Address of .got + 0x18 */
+ };
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index 268c2c31d6..955239dd27 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -76,6 +76,14 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
offset into the .rel.plt section, push _GLOBAL_OFFSET_TABLE_[1],
and then jump to _GLOBAL_OFFSET_TABLE[2]. */
got = (Elf64_Addr *) D_PTR (l, l_info[DT_PLTGOT]);
+ /* If a library is prelinked but we have to relocate anyway,
+ we have to be able to undo the prelinking of .got.plt.
+ The prelinker saved us here address of .plt + 0x16. */
+ if (got[1])
+ {
+ l->l_mach.plt = got[1] + l->l_addr;
+ l->l_mach.gotplt = (Elf64_Addr) &got[3];
+ }
got[1] = (Elf64_Addr) l; /* Identify this shared object. */
/* The got[2] entry contains the address of a function which gets
@@ -409,7 +417,14 @@ elf_machine_lazy_rel (struct link_map *map,
/* Check for unexpected PLT reloc type. */
if (__builtin_expect (r_type == R_X86_64_JUMP_SLOT, 1))
- *reloc_addr += l_addr;
+ {
+ if (__builtin_expect (map->l_mach.plt, 0) == 0)
+ *reloc_addr += l_addr;
+ else
+ *reloc_addr =
+ map->l_mach.plt
+ + (((Elf64_Addr) reloc_addr) - map->l_mach.gotplt) * 2;
+ }
else
_dl_reloc_bad_type (map, r_type, 1);
}