diff options
author | Alan Modra <amodra@gmail.com> | 2015-08-26 17:32:08 +0930 |
---|---|---|
committer | Alan Modra <amodra@gmail.com> | 2015-08-26 23:34:10 +0930 |
commit | 0e28778672160ee57d12fcc4f0e631122088fe69 (patch) | |
tree | acc29a02c3b5e3a00d0aa0ad62d3bd74ed05ce67 /bfd/elflink.c | |
parent | c97c330ba8e80776c546e5f562916bf077e358b3 (diff) | |
download | binutils-gdb-0e28778672160ee57d12fcc4f0e631122088fe69.tar.gz |
Use stable sort for ld -r relocs
A number of targets emit multiple relocs at a given r_offset, and
depend on those relocs staying in their original order.
PR 18867
* elflink.c (cmp_ext32l_r_offset, cmp_ext32b_r_offset): Delete.
(cmp_ext64l_r_offset, cmp_ext64b_r_offset): Delete.
(ext32l_r_offset, ext32b_r_offset, ext64l_r_offset, ext64b_r_offset):
New functions.
(elf_link_adjust_relocs): Use an insertion sort to sort relocs.
Diffstat (limited to 'bfd/elflink.c')
-rw-r--r-- | bfd/elflink.c | 149 |
1 files changed, 77 insertions, 72 deletions
diff --git a/bfd/elflink.c b/bfd/elflink.c index 76f6493e3af..192ce15aa36 100644 --- a/bfd/elflink.c +++ b/bfd/elflink.c @@ -8102,10 +8102,12 @@ bfd_elf_perform_complex_relocation (bfd *input_bfd, return r; } -/* qsort comparison functions sorting external relocs by r_offset. */ +/* Functions to read r_offset from external (target order) reloc + entry. Faster than bfd_getl32 et al, because we let the compiler + know the value is aligned. */ -static int -cmp_ext32l_r_offset (const void *p, const void *q) +static bfd_vma +ext32l_r_offset (const void *p) { union aligned32 { @@ -8113,27 +8115,17 @@ cmp_ext32l_r_offset (const void *p, const void *q) unsigned char c[4]; }; const union aligned32 *a - = (const union aligned32 *) ((const Elf32_External_Rel *) p)->r_offset; - const union aligned32 *b - = (const union aligned32 *) ((const Elf32_External_Rel *) q)->r_offset; + = (const union aligned32 *) &((const Elf32_External_Rel *) p)->r_offset; uint32_t aval = ( (uint32_t) a->c[0] | (uint32_t) a->c[1] << 8 | (uint32_t) a->c[2] << 16 | (uint32_t) a->c[3] << 24); - uint32_t bval = ( (uint32_t) b->c[0] - | (uint32_t) b->c[1] << 8 - | (uint32_t) b->c[2] << 16 - | (uint32_t) b->c[3] << 24); - if (aval < bval) - return -1; - else if (aval > bval) - return 1; - return 0; + return aval; } -static int -cmp_ext32b_r_offset (const void *p, const void *q) +static bfd_vma +ext32b_r_offset (const void *p) { union aligned32 { @@ -8141,28 +8133,18 @@ cmp_ext32b_r_offset (const void *p, const void *q) unsigned char c[4]; }; const union aligned32 *a - = (const union aligned32 *) ((const Elf32_External_Rel *) p)->r_offset; - const union aligned32 *b - = (const union aligned32 *) ((const Elf32_External_Rel *) q)->r_offset; + = (const union aligned32 *) &((const Elf32_External_Rel *) p)->r_offset; uint32_t aval = ( (uint32_t) a->c[0] << 24 | (uint32_t) a->c[1] << 16 | (uint32_t) a->c[2] << 8 | (uint32_t) a->c[3]); - uint32_t bval = ( (uint32_t) b->c[0] << 24 - | (uint32_t) b->c[1] << 16 - | (uint32_t) b->c[2] << 8 - | (uint32_t) b->c[3]); - if (aval < bval) - return -1; - else if (aval > bval) - return 1; - return 0; + return aval; } #ifdef BFD_HOST_64_BIT -static int -cmp_ext64l_r_offset (const void *p, const void *q) +static bfd_vma +ext64l_r_offset (const void *p) { union aligned64 { @@ -8170,9 +8152,7 @@ cmp_ext64l_r_offset (const void *p, const void *q) unsigned char c[8]; }; const union aligned64 *a - = (const union aligned64 *) ((const Elf64_External_Rel *) p)->r_offset; - const union aligned64 *b - = (const union aligned64 *) ((const Elf64_External_Rel *) q)->r_offset; + = (const union aligned64 *) &((const Elf64_External_Rel *) p)->r_offset; uint64_t aval = ( (uint64_t) a->c[0] | (uint64_t) a->c[1] << 8 @@ -8182,23 +8162,11 @@ cmp_ext64l_r_offset (const void *p, const void *q) | (uint64_t) a->c[5] << 40 | (uint64_t) a->c[6] << 48 | (uint64_t) a->c[7] << 56); - uint64_t bval = ( (uint64_t) b->c[0] - | (uint64_t) b->c[1] << 8 - | (uint64_t) b->c[2] << 16 - | (uint64_t) b->c[3] << 24 - | (uint64_t) b->c[4] << 32 - | (uint64_t) b->c[5] << 40 - | (uint64_t) b->c[6] << 48 - | (uint64_t) b->c[7] << 56); - if (aval < bval) - return -1; - else if (aval > bval) - return 1; - return 0; + return aval; } -static int -cmp_ext64b_r_offset (const void *p, const void *q) +static bfd_vma +ext64b_r_offset (const void *p) { union aligned64 { @@ -8206,9 +8174,7 @@ cmp_ext64b_r_offset (const void *p, const void *q) unsigned char c[8]; }; const union aligned64 *a - = (const union aligned64 *) ((const Elf64_External_Rel *) p)->r_offset; - const union aligned64 *b - = (const union aligned64 *) ((const Elf64_External_Rel *) q)->r_offset; + = (const union aligned64 *) &((const Elf64_External_Rel *) p)->r_offset; uint64_t aval = ( (uint64_t) a->c[0] << 56 | (uint64_t) a->c[1] << 48 @@ -8218,19 +8184,7 @@ cmp_ext64b_r_offset (const void *p, const void *q) | (uint64_t) a->c[5] << 16 | (uint64_t) a->c[6] << 8 | (uint64_t) a->c[7]); - uint64_t bval = ( (uint64_t) b->c[0] << 56 - | (uint64_t) b->c[1] << 48 - | (uint64_t) b->c[2] << 40 - | (uint64_t) b->c[3] << 32 - | (uint64_t) b->c[4] << 24 - | (uint64_t) b->c[5] << 16 - | (uint64_t) b->c[6] << 8 - | (uint64_t) b->c[7]); - if (aval < bval) - return -1; - else if (aval > bval) - return 1; - return 0; + return aval; } #endif @@ -8299,16 +8253,20 @@ elf_link_adjust_relocs (bfd *abfd, (*swap_out) (abfd, irela, erela); } - if (sort) + if (sort && count != 0) { - int (*compare) (const void *, const void *); + bfd_vma (*ext_r_off) (const void *); + bfd_vma r_off; + size_t elt_size; + bfd_byte *base, *end, *p, *loc; + bfd_byte buf[sizeof (Elf64_External_Rela)]; if (bed->s->arch_size == 32) { if (abfd->xvec->header_byteorder == BFD_ENDIAN_LITTLE) - compare = cmp_ext32l_r_offset; + ext_r_off = ext32l_r_offset; else if (abfd->xvec->header_byteorder == BFD_ENDIAN_BIG) - compare = cmp_ext32b_r_offset; + ext_r_off = ext32b_r_offset; else abort (); } @@ -8316,14 +8274,61 @@ elf_link_adjust_relocs (bfd *abfd, { #ifdef BFD_HOST_64_BIT if (abfd->xvec->header_byteorder == BFD_ENDIAN_LITTLE) - compare = cmp_ext64l_r_offset; + ext_r_off = ext64l_r_offset; else if (abfd->xvec->header_byteorder == BFD_ENDIAN_BIG) - compare = cmp_ext64b_r_offset; + ext_r_off = ext64b_r_offset; else #endif abort (); } - qsort (reldata->hdr->contents, count, reldata->hdr->sh_entsize, compare); + + /* Must use a stable sort here. Insertion sort, since the + relocs are mostly sorted already. */ + elt_size = reldata->hdr->sh_entsize; + base = reldata->hdr->contents; + end = base + count * elt_size; + if (elt_size > sizeof (buf)) + abort (); + + /* Ensure the first element is lowest. This acts as a sentinel, + speeding the main loop below. */ + r_off = (*ext_r_off) (base); + for (p = loc = base; (p += elt_size) < end; ) + { + bfd_vma r_off2 = (*ext_r_off) (p); + if (r_off > r_off2) + { + r_off = r_off2; + loc = p; + } + } + if (loc != base) + { + /* Don't just swap *base and *loc as that changes the order + of the original base[0] and base[1] if they happen to + have the same r_offset. */ + memcpy (buf, loc, elt_size); + memmove (base + elt_size, base, loc - base); + memcpy (base, buf, elt_size); + } + + for (p = base + elt_size; (p += elt_size) < end; ) + { + /* base to p is sorted, *p is next to insert. */ + r_off = (*ext_r_off) (p); + /* Search the sorted region for location to insert. */ + loc = p - elt_size; + while (r_off < (*ext_r_off) (loc)) + loc -= elt_size; + loc += elt_size; + if (loc != p) + { + memcpy (buf, p, elt_size); + memmove (loc + elt_size, loc, p - loc); + memcpy (loc, buf, elt_size); + } + } + /* Hashes are no longer valid. */ free (reldata->hashes); reldata->hashes = NULL; } |