summaryrefslogtreecommitdiff
path: root/bfd/elfnn-aarch64.c
diff options
context:
space:
mode:
authorTamar Christina <tamar.christina@arm.com>2019-04-11 11:27:28 +0100
committerTamar Christina <tamar.christina@arm.com>2019-04-11 11:30:03 +0100
commitce12121b63145322b4961bbb2b94b939cb916ba7 (patch)
tree3c30d02e802c3bf01f6f2c2a093d0908fe3b4469 /bfd/elfnn-aarch64.c
parentbd7ceb8d26e011ff3fd23402ec2587d7c374f090 (diff)
downloadbinutils-gdb-ce12121b63145322b4961bbb2b94b939cb916ba7.tar.gz
AArch64: When DF_BIND_NOW don't use TLSDESC GOT value.
When using DF_BIND_NOW on AArch64 we don't reserve the GOT slot for a TLSDESC, but we still emitted DT_TLSDESC_GOT and DT_TLSDESC_PLT. This caused random memory corruption as the "special" value of (bfd_vma)-1 would be set for dt_tlsdesc_got. Since we don't have a value of dt_tlsdesc_got I also don't emit DT_TLSDESC_PLT now becuase it would point to an incomplete PLT. To be able to write the PLT entry DT_TLSDESC_GOT is needed and since we don't have one we can't write the PLT entry either. It is my understanding that GLIBC doesn't need these two entries when not lazy loading. Conversely AArch32 does not reserve neither the GOT not the PLT slot when doing DF_BIND_NOW. AArch32 does not need these checks because these values are initialized to 0 and so the if (...) checks don't pass, but on AArch64 these are initialized to (bfd_vma)-1 and thus we need some extra checks. bfd/ChangeLog: PR ld/24302 * elfnn-aarch64.c (elfNN_aarch64_size_dynamic_sections): Don't emit DT_TLSDESC_GOT and DT_TLSDESC_PLT when DF_BIND_NOW. (elfNN_aarch64_finish_dynamic_sections): Don't write PLT if DF_BIND_NOW. ld/ChangeLog: PR ld/24302 * testsuite/ld-aarch64/aarch64-elf.exp: Add new test. * testsuite/ld-aarch64/tls-relax-gdesc-le-now.d: New test.
Diffstat (limited to 'bfd/elfnn-aarch64.c')
-rw-r--r--bfd/elfnn-aarch64.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/bfd/elfnn-aarch64.c b/bfd/elfnn-aarch64.c
index 57a723d5477..9d4df11f9d4 100644
--- a/bfd/elfnn-aarch64.c
+++ b/bfd/elfnn-aarch64.c
@@ -9064,13 +9064,13 @@ elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
if (htab->root.splt->size == 0)
htab->root.splt->size += htab->plt_header_size;
- htab->tlsdesc_plt = htab->root.splt->size;
- htab->root.splt->size += htab->tlsdesc_plt_entry_size;
-
/* If we're not using lazy TLS relocations, don't generate the
- GOT entry required. */
+ GOT and PLT entry required. */
if (!(info->flags & DF_BIND_NOW))
{
+ htab->tlsdesc_plt = htab->root.splt->size;
+ htab->root.splt->size += htab->tlsdesc_plt_entry_size;
+
htab->dt_tlsdesc_got = htab->root.sgot->size;
htab->root.sgot->size += GOT_ENTRY_SIZE;
}
@@ -9174,6 +9174,7 @@ elfNN_aarch64_size_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
return FALSE;
if (htab->tlsdesc_plt
+ && !(info->flags & DF_BIND_NOW)
&& (!add_dynamic_entry (DT_TLSDESC_PLT, 0)
|| !add_dynamic_entry (DT_TLSDESC_GOT, 0)))
return FALSE;
@@ -9686,6 +9687,7 @@ elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
case DT_TLSDESC_GOT:
s = htab->root.sgot;
+ BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1);
dyn.d_un.d_ptr = s->output_section->vma + s->output_offset
+ htab->dt_tlsdesc_got;
break;
@@ -9705,8 +9707,9 @@ elfNN_aarch64_finish_dynamic_sections (bfd *output_bfd,
this_hdr.sh_entsize = htab->plt_entry_size;
- if (htab->tlsdesc_plt)
+ if (htab->tlsdesc_plt && !(info->flags & DF_BIND_NOW))
{
+ BFD_ASSERT (htab->dt_tlsdesc_got != (bfd_vma)-1);
bfd_put_NN (output_bfd, (bfd_vma) 0,
htab->root.sgot->contents + htab->dt_tlsdesc_got);