summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorH.J. Lu <hjl.tools@gmail.com>2011-11-21 15:59:59 -0800
committerH.J. Lu <hjl.tools@gmail.com>2011-11-23 09:19:28 -0800
commitd6fc55a36c2d1645c55d6d62b353aee14b1612f3 (patch)
tree56d329298f8cb6e3945686986a69786365be7d3e
parentf6d3197799e76ef22ddb1407a49ab280ee2ef639 (diff)
downloadglibc-d6fc55a36c2d1645c55d6d62b353aee14b1612f3.tar.gz
Add x32 support to sysdeps/x86_64/dl-machine.h.
-rw-r--r--ChangeLog.x329
-rw-r--r--sysdeps/x86_64/dl-machine.h111
2 files changed, 77 insertions, 43 deletions
diff --git a/ChangeLog.x32 b/ChangeLog.x32
index b231a36150..ad30899fd1 100644
--- a/ChangeLog.x32
+++ b/ChangeLog.x32
@@ -1,5 +1,14 @@
2011-11-21 H.J. Lu <hongjiu.lu@intel.com>
+ * sysdeps/x86_64/dl-machine.h: Replace Elf64_XXX with ElfW(XXX).
+ Replace ELF64_R_TYPE with ELF32_R_TYPE.
+ (elf_machine_load_address): Use ASM_ADDR.
+ (elf_machine_rela): Handle R_X86_64_RELATIVE64 for x32. For x32,
+ sign extend relocation result to 64bit for R_X86_64_DTPOFF64
+ and R_X86_64_TPOFF64, and don't process R_X86_64_64.
+
+2011-11-21 H.J. Lu <hongjiu.lu@intel.com>
+
* sysdeps/x86_64/dl-irel.h: Replace Elf64_XXX with ElfW(XXX).
Replace ELF64_R_TYPE with ELF32_R_TYPE.
diff --git a/sysdeps/x86_64/dl-machine.h b/sysdeps/x86_64/dl-machine.h
index f615e9591f..74a8109365 100644
--- a/sysdeps/x86_64/dl-machine.h
+++ b/sysdeps/x86_64/dl-machine.h
@@ -30,7 +30,7 @@
/* Return nonzero iff ELF header is compatible with the running host. */
static inline int __attribute__ ((unused))
-elf_machine_matches_host (const Elf64_Ehdr *ehdr)
+elf_machine_matches_host (const ElfW(Ehdr) *ehdr)
{
return ehdr->e_machine == EM_X86_64;
}
@@ -39,24 +39,24 @@ elf_machine_matches_host (const Elf64_Ehdr *ehdr)
/* Return the link-time address of _DYNAMIC. Conveniently, this is the
first element of the GOT. This must be inlined in a function which
uses global data. */
-static inline Elf64_Addr __attribute__ ((unused))
+static inline ElfW(Addr) __attribute__ ((unused))
elf_machine_dynamic (void)
{
- Elf64_Addr addr;
+ ElfW(Addr) addr;
/* This works because we have our GOT address available in the small PIC
model. */
- addr = (Elf64_Addr) &_DYNAMIC;
+ addr = (ElfW(Addr)) &_DYNAMIC;
return addr;
}
/* Return the run-time load address of the shared object. */
-static inline Elf64_Addr __attribute__ ((unused))
+static inline ElfW(Addr) __attribute__ ((unused))
elf_machine_load_address (void)
{
- Elf64_Addr addr;
+ ElfW(Addr) addr;
/* The easy way is just the same as on x86:
leaq _dl_start, %0
@@ -73,10 +73,10 @@ elf_machine_load_address (void)
load offset which is zero if the binary was loaded at the address
it is prelinked for. */
- asm ("leaq _dl_start(%%rip), %0\n\t"
- "subq 1f(%%rip), %0\n\t"
+ asm ("lea _dl_start(%%rip), %0\n\t"
+ "sub 1f(%%rip), %0\n\t"
".section\t.data.rel.ro\n"
- "1:\t.quad _dl_start\n\t"
+ "1:\t" ASM_ADDR " _dl_start\n\t"
".previous\n\t"
: "=r" (addr) : : "cc");
@@ -90,8 +90,8 @@ static inline int __attribute__ ((unused, always_inline))
elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
{
Elf64_Addr *got;
- extern void _dl_runtime_resolve (Elf64_Word) attribute_hidden;
- extern void _dl_runtime_profile (Elf64_Word) attribute_hidden;
+ extern void _dl_runtime_resolve (ElfW(Word)) attribute_hidden;
+ extern void _dl_runtime_profile (ElfW(Word)) attribute_hidden;
if (l->l_info[DT_JMPREL] && lazy)
{
@@ -106,9 +106,10 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
if (got[1])
{
l->l_mach.plt = got[1] + l->l_addr;
- l->l_mach.gotplt = (Elf64_Addr) &got[3];
+ l->l_mach.gotplt = (ElfW(Addr)) &got[3];
}
- got[1] = (Elf64_Addr) l; /* Identify this shared object. */
+ /* Identify this shared object. */
+ *(ElfW(Addr) *) (got + 1) = (ElfW(Addr)) l;
/* The got[2] entry contains the address of a function which gets
called to get the address of a so far unresolved function and
@@ -118,7 +119,7 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
end in this function. */
if (__builtin_expect (profile, 0))
{
- got[2] = (Elf64_Addr) &_dl_runtime_profile;
+ *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_profile;
if (GLRO(dl_profile) != NULL
&& _dl_name_match_p (GLRO(dl_profile), l))
@@ -129,12 +130,12 @@ elf_machine_runtime_setup (struct link_map *l, int lazy, int profile)
else
/* This function will get called to fix up the GOT entry indicated by
the offset on the stack, and then jump to the resolved address. */
- got[2] = (Elf64_Addr) &_dl_runtime_resolve;
+ *(ElfW(Addr) *) (got + 2) = (ElfW(Addr)) &_dl_runtime_resolve;
}
if (l->l_info[ADDRIDX (DT_TLSDESC_GOT)] && lazy)
- *(Elf64_Addr*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
- = (Elf64_Addr) &_dl_tlsdesc_resolve_rela;
+ *(ElfW(Addr)*)(D_PTR (l, l_info[ADDRIDX (DT_TLSDESC_GOT)]) + l->l_addr)
+ = (ElfW(Addr)) &_dl_tlsdesc_resolve_rela;
return lazy;
}
@@ -213,7 +214,7 @@ _dl_start_user:\n\
/* A reloc type used for ld.so cmdline arg lookups to reject PLT entries. */
#define ELF_MACHINE_JMP_SLOT R_X86_64_JUMP_SLOT
-/* The x86-64 never uses Elf64_Rel relocations. */
+/* The x86-64 never uses Elf64_Rel/Elf32_Rel relocations. */
#define ELF_MACHINE_NO_REL 1
/* We define an initialization functions. This is called very early in
@@ -228,19 +229,19 @@ dl_platform_init (void)
GLRO(dl_platform) = NULL;
}
-static inline Elf64_Addr
+static inline ElfW(Addr)
elf_machine_fixup_plt (struct link_map *map, lookup_t t,
- const Elf64_Rela *reloc,
- Elf64_Addr *reloc_addr, Elf64_Addr value)
+ const ElfW(Rela) *reloc,
+ ElfW(Addr) *reloc_addr, ElfW(Addr) value)
{
return *reloc_addr = value;
}
/* Return the final value of a plt relocation. On x86-64 the
JUMP_SLOT relocation ignores the addend. */
-static inline Elf64_Addr
-elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
- Elf64_Addr value)
+static inline ElfW(Addr)
+elf_machine_plt_value (struct link_map *map, const ElfW(Rela) *reloc,
+ ElfW(Addr) value)
{
return value;
}
@@ -259,12 +260,12 @@ elf_machine_plt_value (struct link_map *map, const Elf64_Rela *reloc,
auto inline void
__attribute__ ((always_inline))
-elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
- const Elf64_Sym *sym, const struct r_found_version *version,
+elf_machine_rela (struct link_map *map, const ElfW(Rela) *reloc,
+ const ElfW(Sym) *sym, const struct r_found_version *version,
void *const reloc_addr_arg)
{
- Elf64_Addr *const reloc_addr = reloc_addr_arg;
- const unsigned long int r_type = ELF64_R_TYPE (reloc->r_info);
+ ElfW(Addr) *const reloc_addr = reloc_addr_arg;
+ const unsigned long int r_type = ELF32_R_TYPE (reloc->r_info);
# if !defined RTLD_BOOTSTRAP || !defined HAVE_Z_COMBRELOC
if (__builtin_expect (r_type == R_X86_64_RELATIVE, 0))
@@ -285,22 +286,28 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
}
else
# endif
+# if !defined RTLD_BOOTSTRAP && !defined __LP64__
+ if (__builtin_expect (r_type == R_X86_64_RELATIVE64, 0))
+ *((Elf64_Addr *) (uintptr_t) reloc_addr)
+ = (Elf64_Addr) map->l_addr + reloc->r_addend;
+ else
+# endif
if (__builtin_expect (r_type == R_X86_64_NONE, 0))
return;
else
{
# ifndef RTLD_BOOTSTRAP
- const Elf64_Sym *const refsym = sym;
+ const ElfW(Sym) *const refsym = sym;
# endif
struct link_map *sym_map = RESOLVE_MAP (&sym, version, r_type);
- Elf64_Addr value = (sym == NULL ? 0
- : (Elf64_Addr) sym_map->l_addr + sym->st_value);
+ ElfW(Addr) value = (sym == NULL ? 0
+ : (ElfW(Addr)) sym_map->l_addr + sym->st_value);
if (sym != NULL
&& __builtin_expect (ELFW(ST_TYPE) (sym->st_info) == STT_GNU_IFUNC,
0)
&& __builtin_expect (sym->st_shndx != SHN_UNDEF, 1))
- value = ((Elf64_Addr (*) (void)) value) ();
+ value = ((ElfW(Addr) (*) (void)) value) ();
# if defined RTLD_BOOTSTRAP && !USE___THREAD
assert (r_type == R_X86_64_GLOB_DAT || r_type == R_X86_64_JUMP_SLOT);
@@ -333,7 +340,13 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
/* During relocation all TLS symbols are defined and used.
Therefore the offset is already correct. */
if (sym != NULL)
+# ifdef __LP64__
*reloc_addr = sym->st_value + reloc->r_addend;
+# else
+ *(Elf64_Sxword *) reloc_addr
+ = (Elf64_Sxword)
+ ((Elf32_Sword) (sym->st_value + reloc->r_addend));
+# endif
# endif
break;
case R_X86_64_TLSDESC:
@@ -383,15 +396,27 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
/* We know the offset of the object the symbol is contained in.
It is a negative value which will be added to the
thread pointer. */
+# ifdef __LP64__
*reloc_addr = (sym->st_value + reloc->r_addend
- sym_map->l_tls_offset);
+# else
+ *(Elf64_Sxword *) reloc_addr
+ = (Elf64_Sxword)
+ ((Elf32_Sword) (sym->st_value + reloc->r_addend
+ - sym_map->l_tls_offset));
+# endif
}
break;
# endif
# ifndef RTLD_BOOTSTRAP
case R_X86_64_64:
+# ifdef __LP64__
*reloc_addr = value + reloc->r_addend;
+# else
+ *((Elf64_Addr *) (uintptr_t) reloc_addr)
+ = (Elf64_Addr) value + reloc->r_addend;
+# endif
break;
case R_X86_64_32:
value += reloc->r_addend;
@@ -417,7 +442,7 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
# ifndef RESOLVE_CONFLICT_FIND_MAP
/* Not needed for dl-conflict.c. */
case R_X86_64_PC32:
- value += reloc->r_addend - (Elf64_Addr) reloc_addr;
+ value += reloc->r_addend - (ElfW(Addr)) reloc_addr;
*(unsigned int *) reloc_addr = value;
if (__builtin_expect (value != (int) value, 0))
{
@@ -445,7 +470,7 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
# endif
case R_X86_64_IRELATIVE:
value = map->l_addr + reloc->r_addend;
- value = ((Elf64_Addr (*) (void)) value) ();
+ value = ((ElfW(Addr) (*) (void)) value) ();
*reloc_addr = value;
break;
default:
@@ -459,21 +484,21 @@ elf_machine_rela (struct link_map *map, const Elf64_Rela *reloc,
auto inline void
__attribute ((always_inline))
-elf_machine_rela_relative (Elf64_Addr l_addr, const Elf64_Rela *reloc,
+elf_machine_rela_relative (ElfW(Addr) l_addr, const ElfW(Rela) *reloc,
void *const reloc_addr_arg)
{
- Elf64_Addr *const reloc_addr = reloc_addr_arg;
- assert (ELF64_R_TYPE (reloc->r_info) == R_X86_64_RELATIVE);
+ ElfW(Addr) *const reloc_addr = reloc_addr_arg;
+ assert (ELF32_R_TYPE (reloc->r_info) == R_X86_64_RELATIVE);
*reloc_addr = l_addr + reloc->r_addend;
}
auto inline void
__attribute ((always_inline))
elf_machine_lazy_rel (struct link_map *map,
- Elf64_Addr l_addr, const Elf64_Rela *reloc)
+ ElfW(Addr) l_addr, const ElfW(Rela) *reloc)
{
- Elf64_Addr *const reloc_addr = (void *) (l_addr + reloc->r_offset);
- const unsigned long int r_type = ELF64_R_TYPE (reloc->r_info);
+ ElfW(Addr) *const reloc_addr = (void *) (l_addr + reloc->r_offset);
+ const unsigned long int r_type = ELF32_R_TYPE (reloc->r_info);
/* Check for unexpected PLT reloc type. */
if (__builtin_expect (r_type == R_X86_64_JUMP_SLOT, 1))
@@ -483,7 +508,7 @@ elf_machine_lazy_rel (struct link_map *map,
else
*reloc_addr =
map->l_mach.plt
- + (((Elf64_Addr) reloc_addr) - map->l_mach.gotplt) * 2;
+ + (((ElfW(Addr)) reloc_addr) - map->l_mach.gotplt) * 2;
}
else if (__builtin_expect (r_type == R_X86_64_TLSDESC, 1))
{
@@ -496,8 +521,8 @@ elf_machine_lazy_rel (struct link_map *map,
}
else if (__builtin_expect (r_type == R_X86_64_IRELATIVE, 0))
{
- Elf64_Addr value = map->l_addr + reloc->r_addend;
- value = ((Elf64_Addr (*) (void)) value) ();
+ ElfW(Addr) value = map->l_addr + reloc->r_addend;
+ value = ((ElfW(Addr) (*) (void)) value) ();
*reloc_addr = value;
}
else