summaryrefslogtreecommitdiff
path: root/sysdeps
diff options
context:
space:
mode:
authorUlrich Drepper <drepper@redhat.com>2002-02-13 08:03:56 +0000
committerUlrich Drepper <drepper@redhat.com>2002-02-13 08:03:56 +0000
commitaed283dd456e7566cbfef88abe7b235ca77cc23c (patch)
tree39e854a8a182adce9cc8876c27a0a1d8b07d878e /sysdeps
parent712ac5b11c3f250c4354d92ee5a902c67b6d2045 (diff)
downloadglibc-aed283dd456e7566cbfef88abe7b235ca77cc23c.tar.gz
Update.
2002-02-12 Ulrich Drepper <drepper@redhat.com> * sysdeps/generic/dl-tls.c (TLS_DTV_UNALLOCATED): Renamed from TLS_DTV_UNALLOCATE. (oom): New function. (_dl_next_tls_modid): Rewrite to handle dl_tls_dtv_slotinfo_list. (_dl_determine_tlsoffset): Likewise. (_dl_allocate_tls): Likewise. (__TLS_GET_ADDR): Define if not already defined. (_dl_tls_symaddr): New function. (allocate_and_init): New function. (__tls_get_addr): Actually implement handling of generation counter and deferred allocation. * sysdeps/generic/ldsodefs.h (_rtld_global): Remove _dl_initimage_list, add _dl_tls_dtv_slotinfo_list, _dl_tls_static_nelem, and _dl_tls_generation. Define TLS_SLOTINFO_SURPLUS and DTV_SURPLUS. Declare _dl_tls_symaddr. * sysdeps/i386/dl-tls.h: Disable __tls_get_addr handling unless SHARED. * include/link.h (struct link_map): Remove l_tls_nextimage and l_tls_previmage. * elf/dl-sym.c (_dl_sym): After successful lookup call _dl_tls_symaddr instead of DL_SYMBOL_ADDRESS for STT_TLS symbols. (_dl_vsym): Likewise. * elf/rtld.c (_dl_start_final): Adjust initdtv initialization for new layout. (dl_main): Allow PT_TLS be present for empty segment. Remove nextimage list handling. Instead add all modules using TLS to dl_tls_dtv_slotinfo_list. * elf/dl-open.c (dl_open_worker): After successfully loading all objects add those with TLS to the dl_tls_dtv_slotinfo_list list. * elf/dl-load.c (_dl_map_object_from_fd): If PT_TLS entry is for an empty segment don't do anything. Remove handling of initimage list. * elf/Versions [ld] (GLIBC_2.0): Add __libc_memalign. (GLIBC_PRIVATE): Add _dl_tls_symaddr. * elf/dl-minimal.c: Define __libc_memalign. * elf/dl-support.c: Remove _dl_initimage_list. Add _dl_tls_dtv_slotinfo_list, _dl_tls_static_nelem, and _dl_tls_generation. * include/stdlib.h: Declare __libc_memalign. * elf/Makefile: Add rules to build and run tst-tls4 and tst-tls5. * elf/tst-tls4.c: New file. * elf/tst-tls5.c: New file. * elf/tst-tlsmod2.c: New file. * elf/tls-macros.h: asms using ___tls_get_addr destroy %ecx and %edx. * elf/tst-tlsmod1.c: Don't define variables unles USE_TLS. * elf/tst-tls1.c: Use test-skeleton.c. * elf/tst-tls2.c: Likewise. * elf/tst-tls3.c: Likewise. * elf/dl-conflict.c (RESOLVE_MAP): Return NULL not 0. * sysdeps/mips/machine-gmon.h: Update MCOUNT for current GCC behavior.
Diffstat (limited to 'sysdeps')
-rw-r--r--sysdeps/generic/dl-tls.c494
-rw-r--r--sysdeps/generic/ldsodefs.h37
-rw-r--r--sysdeps/i386/dl-tls.h6
3 files changed, 426 insertions, 111 deletions
diff --git a/sysdeps/generic/dl-tls.c b/sysdeps/generic/dl-tls.c
index e42911acd3..d13b0f93fa 100644
--- a/sysdeps/generic/dl-tls.c
+++ b/sysdeps/generic/dl-tls.c
@@ -18,8 +18,12 @@
02111-1307 USA. */
#include <assert.h>
+#include <signal.h>
#include <stdlib.h>
+#include <unistd.h>
+#include <sys/param.h>
+#include <abort-instr.h>
#include <tls.h>
/* We don't need any of this if TLS is not supported. */
@@ -29,7 +33,31 @@
#include <ldsodefs.h>
/* Value used for dtv entries for which the allocation is delayed. */
-# define TLS_DTV_UNALLOCATE ((void *) -1l)
+# define TLS_DTV_UNALLOCATED ((void *) -1l)
+
+
+/* Out-of-memory handler. */
+static void
+__attribute__ ((__noreturn__))
+oom (void)
+{
+ static const char msg[] = "\
+cannot allocate memory for thread-local data: ABORT\n";
+
+ __libc_write (STDERR_FILENO, msg, sizeof (msg) - 1);
+
+ /* Kill ourself. */
+ __kill (__getpid (), SIGKILL);
+
+ /* Just in case something goes wrong with the kill. */
+ while (1)
+ {
+# ifdef ABORT_INSTRUCTION
+ ABORT_INSTRUCTION;
+# endif
+ }
+}
+
size_t
@@ -40,38 +68,49 @@ _dl_next_tls_modid (void)
if (__builtin_expect (GL(dl_tls_dtv_gaps), false))
{
- /* XXX If this method proves too costly we can optimize
- it to use a constant time method. But I don't think
- it's a problem. */
- struct link_map *runp = GL(dl_initimage_list);
- bool used[GL(dl_tls_max_dtv_idx)];
-
- assert (runp != NULL);
+ size_t disp = 0;
+ struct dtv_slotinfo_list *runp = GL(dl_tls_dtv_slotinfo_list);
+
+ /* Note that this branch will never be executed during program
+ start since there are no gaps at that time. Therefore it
+ does not matter that the dl_tls_dtv_slotinfo is not allocated
+ yet when the function is called for the first times. */
+ result = GL(dl_tls_static_nelem);
+ assert (result < GL(dl_tls_max_dtv_idx));
do
{
- assert (runp->l_tls_modid > 0
- && runp->l_tls_modid <= GL(dl_tls_max_dtv_idx));
- used[runp->l_tls_modid - 1] = true;
- }
- while ((runp = runp->l_tls_nextimage) != GL(dl_initimage_list));
+ while (result - disp < runp->len)
+ if (runp->slotinfo[result - disp].map == NULL)
+ break;
- result = 0;
- do
- /* The information about the gaps is pessimistic. It might be
- there are actually none. */
- if (result >= GL(dl_tls_max_dtv_idx))
- {
- /* Now we know there is actually no gap. Bump the maximum
- ID number and remember that there are no gaps. */
- result = ++GL(dl_tls_max_dtv_idx);
- GL(dl_tls_dtv_gaps) = false;
+ ++result;
+ assert (result <= GL(dl_tls_max_dtv_idx) + 1);
+
+ if (result - disp < runp->len)
break;
- }
- while (used[result++]);
+
+ disp += runp->len;
+ }
+ while ((runp = runp->next) != NULL);
+
+ if (result >= GL(dl_tls_max_dtv_idx) + 1)
+ {
+ /* The new index must indeed be exactly one higher than the
+ previous high. */
+ assert (result == GL(dl_tls_max_dtv_idx) + 1);
+
+ /* There is no gap anymore. */
+ GL(dl_tls_dtv_gaps) = false;
+
+ goto nogaps;
+ }
}
else
- /* No gaps, allocate a new entry. */
- result = ++GL(dl_tls_max_dtv_idx);
+ {
+ /* No gaps, allocate a new entry. */
+ nogaps:
+ result = ++GL(dl_tls_max_dtv_idx);
+ }
return result;
}
@@ -79,41 +118,39 @@ _dl_next_tls_modid (void)
void
internal_function
-_dl_determine_tlsoffset (struct link_map *lastp)
+_dl_determine_tlsoffset (void)
{
- struct link_map *runp;
- size_t max_align = 0;
+ struct dtv_slotinfo *slotinfo;
+ size_t max_align = __alignof__ (void *);
size_t offset;
+ size_t cnt;
- if (lastp == NULL)
- {
- /* None of the objects used at startup time uses TLS. We still
- have to allocate the TCB and dtv. */
- GL(dl_tls_static_size) = TLS_TCB_SIZE;
- GL(dl_tls_static_align) = TLS_TCB_ALIGN;
-
- return;
- }
+ /* The first element of the dtv slot info list is allocated. */
+ assert (GL(dl_tls_dtv_slotinfo_list) != NULL);
+ /* There is at this point only one element in the
+ dl_tls_dtv_slotinfo_list list. */
+ assert (GL(dl_tls_dtv_slotinfo_list)->next == NULL);
# if TLS_TCB_AT_TP
/* We simply start with zero. */
offset = 0;
- runp = lastp->l_tls_nextimage;
- do
+ slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
+ for (cnt = 1; slotinfo[cnt].map != NULL; ++cnt)
{
- max_align = MAX (max_align, runp->l_tls_align);
+ assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
+
+ max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
/* Compute the offset of the next TLS block. */
- offset = roundup (offset + runp->l_tls_blocksize, runp->l_tls_align);
+ offset = roundup (offset + slotinfo[cnt].map->l_tls_blocksize,
+ slotinfo[cnt].map->l_tls_align);
/* XXX For some architectures we perhaps should store the
negative offset. */
- runp->l_tls_offset = offset;
+ slotinfo[cnt].map->l_tls_offset = offset;
}
- while ((runp = runp->l_tls_nextimage) != lastp->l_tls_nextimage);
-#if 0
/* The thread descriptor (pointed to by the thread pointer) has its
own alignment requirement. Adjust the static TLS size
and TLS offsets appropriately. */
@@ -121,34 +158,44 @@ _dl_determine_tlsoffset (struct link_map *lastp)
// XXX after the first (closest to the TCB) TLS block since this
// XXX would invalidate the offsets the linker creates for the LE
// XXX model.
- if (offset % TLS_TCB_ALIGN != 0)
- abort ();
-#endif
GL(dl_tls_static_size) = offset + TLS_TCB_SIZE;
# elif TLS_DTV_AT_TP
- struct link_map *prevp;
-
- /* The first block starts right after the TCB. */
+ /* The TLS blocks start right after the TCB. */
offset = TLS_TCB_SIZE;
- max_align = runp->l_tls_align;
- runp = lastp->l_tls_nextimage;
- runp->l_tls_offset = offset;
- prevp = runp;
- while ((runp = runp->l_tls_nextimage) != firstp)
+ /* The first block starts right after the TCB. */
+ slotinfo = GL(dl_tls_dtv_slotinfo_list)->slotinfo;
+ if (slotinfo[1].map != NULL)
{
- max_align = MAX (max_align, runp->l_tls_align);
+ size_t prev_size
- /* Compute the offset of the next TLS block. */
- offset = roundup (offset + prevp->l_tls_blocksize, runp->l_tls_align);
+ offset = roundup (offset, slotinfo[1].map->l_tls_align);
+ slotinfo[1].map->l_tls_offset = offset;
+ max_align = slotinfo[1].map->l_tls_align;
+ prev_size = slotinfo[1].map->l_tls_blocksize;
- runp->l_tls_offset = offset;
+ for (cnt = 2; slotinfo[cnt].map != NULL; ++cnt)
+ {
+ assert (cnt < GL(dl_tls_dtv_slotinfo_list)->len);
+
+ max_align = MAX (max_align, slotinfo[cnt].map->l_tls_align);
+
+ /* Compute the offset of the next TLS block. */
+ offset = roundup (offset + prev_size,
+ slotinfo[cnt].map->l_tls_align);
+
+ /* XXX For some architectures we perhaps should store the
+ negative offset. */
+ slotinfo[cnt].map->l_tls_offset = offset;
+
+ prev_size = slotinfo[cnt].map->l_tls_blocksize;
+ }
- prevp = runp;
+ offset += prev_size;
}
- GL(dl_tls_static_size) = offset + prevp->l_tls_blocksize;
+ GL(dl_tls_static_size) = offset;
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
@@ -164,59 +211,100 @@ _dl_allocate_tls (void)
{
void *result;
dtv_t *dtv;
+ size_t dtv_length;
/* Allocate a correctly aligned chunk of memory. */
/* XXX For now */
assert (GL(dl_tls_static_align) <= GL(dl_pagesize));
-#ifdef MAP_ANON
-# define _dl_zerofd (-1)
-#else
-# define _dl_zerofd GL(dl_zerofd)
+# ifdef MAP_ANON
+# define _dl_zerofd (-1)
+# else
+# define _dl_zerofd GL(dl_zerofd)
if ((dl_zerofd) == -1)
GL(dl_zerofd) = _dl_sysdep_open_zero_fill ();
-# define MAP_ANON 0
-#endif
+# define MAP_ANON 0
+# endif
result = __mmap (0, GL(dl_tls_static_size), PROT_READ|PROT_WRITE,
MAP_ANON|MAP_PRIVATE, _dl_zerofd, 0);
- dtv = (dtv_t *) malloc ((GL(dl_tls_max_dtv_idx) + 1) * sizeof (dtv_t));
+ /* We allocate a few more elements in the dtv than are needed for the
+ initial set of modules. This should avoid in most cases expansions
+ of the dtv. */
+ dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
+ dtv = (dtv_t *) malloc ((dtv_length + 2) * sizeof (dtv_t));
if (result != MAP_FAILED && dtv != NULL)
{
- struct link_map *runp;
+ struct dtv_slotinfo_list *listp;
+ bool first_block = true;
+ size_t total = 0;
# if TLS_TCB_AT_TP
/* The TCB follows the TLS blocks. */
result = (char *) result + GL(dl_tls_static_size) - TLS_TCB_SIZE;
# endif
- /* XXX Fill in an correct generation number. */
- dtv[0].counter = 0;
-
- /* Initialize the memory from the initialization image list and clear
- the BSS parts. */
- if (GL(dl_initimage_list) != NULL)
+ /* This is the initial length of the dtv. */
+ dtv[0].counter = dtv_length;
+ /* Fill in the generation number. */
+ dtv[1].counter = GL(dl_tls_generation) = 0;
+ /* Initialize all of the rest of the dtv with zero to indicate
+ nothing there. */
+ memset (dtv + 2, '\0', dtv_length * sizeof (dtv_t));
+
+ /* We have to look prepare the dtv for all currently loaded
+ modules using TLS. For those which are dynamically loaded we
+ add the values indicating deferred allocation. */
+ listp = GL(dl_tls_dtv_slotinfo_list);
+ while (1)
{
- runp = GL(dl_initimage_list)->l_tls_nextimage;
- do
+ size_t cnt;
+
+ for (cnt = first_block ? 1 : 0; cnt < listp->len; ++cnt)
{
- assert (runp->l_tls_modid > 0);
- assert (runp->l_tls_modid <= GL(dl_tls_max_dtv_idx));
+ struct link_map *map;
+ void *dest;
+
+ /* Check for the total number of used slots. */
+ if (total + cnt >= GL(dl_tls_max_dtv_idx))
+ break;
+
+ map = listp->slotinfo[cnt].map;
+ if (map == NULL)
+ /* Unused entry. */
+ continue;
+
+ if (map->l_type == lt_loaded)
+ {
+ /* For dynamically loaded modules we simply store
+ the value indicating deferred allocation. */
+ dtv[1 + map->l_tls_modid].pointer = TLS_DTV_UNALLOCATED;
+ continue;
+ }
+
+ assert (map->l_tls_modid == cnt);
+ assert (map->l_tls_blocksize >= map->l_tls_initimage_size);
# if TLS_TCB_AT_TP
- dtv[runp->l_tls_modid].pointer = result - runp->l_tls_offset;
+ assert (map->l_tls_offset >= map->l_tls_blocksize);
+ dest = (char *) result - map->l_tls_offset;
# elif TLS_DTV_AT_TP
- dtv[runp->l_tls_modid].pointer = result + runp->l_tls_offset;
+ dest = (char *) result + map->l_tls_offset;
# else
# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
# endif
- memset (__mempcpy (dtv[runp->l_tls_modid].pointer,
- runp->l_tls_initimage,
- runp->l_tls_initimage_size),
- '\0',
- runp->l_tls_blocksize - runp->l_tls_initimage_size);
+ /* We don't have to clear the BSS part of the TLS block
+ since mmap is used to allocate the memory which
+ guarantees it is initialized to zero. */
+ dtv[1 + cnt].pointer = memcpy (dest, map->l_tls_initimage,
+ map->l_tls_initimage_size);
}
- while ((runp = runp->l_tls_nextimage)
- != GL(dl_initimage_list)->l_tls_nextimage);
+
+ total += cnt;
+ if (total >= GL(dl_tls_max_dtv_idx))
+ break;
+
+ listp = listp->next;
+ assert (listp != NULL);
}
/* Add the dtv to the thread data structures. */
@@ -232,6 +320,7 @@ _dl_allocate_tls (void)
}
+# ifdef SHARED
/* The __tls_get_addr function has two basic forms which differ in the
arguments. The IA-64 form takes two parameters, the module ID and
offset. The form used, among others, on IA-32 takes a reference to
@@ -239,26 +328,227 @@ _dl_allocate_tls (void)
form seems to be more often used (in the moment) so we default to
it. Users of the IA-64 form have to provide adequate definitions
of the following macros. */
-# ifndef GET_ADDR_ARGS
-# define GET_ADDR_ARGS tls_index *ti
-# endif
-# ifndef GET_ADDR_MODULE
-# define GET_ADDR_MODULE ti->ti_module
-# endif
-# ifndef GET_ADDR_OFFSET
-# define GET_ADDR_OFFSET ti->ti_offset
-# endif
+# ifndef GET_ADDR_ARGS
+# define GET_ADDR_ARGS tls_index *ti
+# endif
+# ifndef GET_ADDR_MODULE
+# define GET_ADDR_MODULE ti->ti_module
+# endif
+# ifndef GET_ADDR_OFFSET
+# define GET_ADDR_OFFSET ti->ti_offset
+# endif
+/* Systems which do not have tls_index also probably have to define
+ DONT_USE_TLS_INDEX. */
+
+# ifndef __TLS_GET_ADDR
+# define __TLS_GET_ADDR __tls_get_addr
+# endif
+
+
+/* Return the symbol address given the map of the module it is in and
+ the symbol record. This is used in dl-sym.c. */
+void *
+internal_function
+_dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
+{
+# ifndef DONT_USE_TLS_INDEX
+ tls_index tmp =
+ {
+ .ti_module = map->l_tls_modid,
+ .ti_offset = ref->st_value
+ };
+
+ return __TLS_GET_ADDR (&tmp);
+# else
+ return __TLS_GET_ADDR (map->l_tls_modid, ref->st_value);
+# endif
+}
+
+
+static void *
+allocate_and_init (struct link_map *map)
+{
+ void *newp;
+
+ newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
+ if (newp == NULL)
+ oom ();
+ /* Initialize the memory. */
+ memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
+ '\0', map->l_tls_blocksize - map->l_tls_initimage_size);
+ return newp;
+}
+
+
+/* The generic dynamic and local dynamic model cannot be used in
+ statically linked applications. */
void *
__tls_get_addr (GET_ADDR_ARGS)
{
dtv_t *dtv = THREAD_DTV ();
+ struct link_map *the_map = NULL;
+ void *p;
+
+ if (__builtin_expect (dtv[0].counter != GL(dl_tls_generation), 0))
+ {
+ struct dtv_slotinfo_list *listp;
+ size_t idx;
+
+ /* The global dl_tls_dtv_slotinfo array contains for each module
+ index the generation counter current when the entry was
+ created. This array never shrinks so that all module indices
+ which were valid at some time can be used to access it.
+ Before the first use of a new module index in this function
+ the array was extended appropriately. Access also does not
+ have to be guarded against modifications of the array. It is
+ assumed that pointer-size values can be read atomically even
+ in SMP environments. It is possible that other threads at
+ the same time dynamically load code and therefore add to the
+ slotinfo list. This is a problem since we must not pick up
+ any information about incomplete work. The solution to this
+ is to ignore all dtv slots which were created after the one
+ we are currently interested. We know that dynamic loading
+ for this module is completed and this is the last load
+ operation we know finished. */
+ idx = GET_ADDR_MODULE;
+ listp = GL(dl_tls_dtv_slotinfo_list);
+ while (idx >= listp->len)
+ {
+ idx -= listp->len;
+ listp = listp->next;
+ }
- if (dtv[GET_ADDR_MODULE].pointer == TLS_DTV_UNALLOCATE)
- /* XXX */;
+ if (dtv[0].counter < listp->slotinfo[idx].gen)
+ {
+ /* The generation counter for the slot is higher than what
+ the current dtv implements. We have to update the whole
+ dtv but only those entries with a generation counter <=
+ the one for the entry we need. */
+ size_t new_gen = listp->slotinfo[idx].gen;
+ size_t total = 0;
+
+ /* We have to look through the entire dtv slotinfo list. */
+ listp = GL(dl_tls_dtv_slotinfo_list);
+ do
+ {
+ size_t cnt;
+
+ for (cnt = total = 0 ? 1 : 0; cnt < listp->len; ++cnt)
+ {
+ size_t gen = listp->slotinfo[cnt].gen;
+ struct link_map *map;
+ size_t modid;
+
+ if (gen > new_gen)
+ /* This is a slot for a generation younger than
+ the one we are handling now. It might be
+ incompletely set up so ignore it. */
+ continue;
+
+ /* If the entry is older than the current dtv layout
+ we know we don't have to handle it. */
+ if (gen <= dtv[0].counter)
+ continue;
+
+ /* If there is no map this means the entry is empty. */
+ map = listp->slotinfo[cnt].map;
+ if (map == NULL)
+ {
+ /* If this modid was used at some point the memory
+ might still be allocated. */
+ if (dtv[total + cnt].pointer != TLS_DTV_UNALLOCATED)
+ free (dtv[total + cnt].pointer);
+
+ continue;
+ }
+
+ /* Check whether the current dtv array is large enough. */
+ modid = map->l_tls_modid;
+ assert (total + cnt == modid);
+ if (dtv[-1].counter < modid)
+ {
+ /* Reallocate the dtv. */
+ dtv_t *newp;
+ size_t newsize = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
+ size_t oldsize = dtv[-1].counter;
+
+ assert (map->l_tls_modid <= newsize);
+
+ newp = (dtv_t *) realloc (&dtv[-1],
+ (2 + newsize)
+ * sizeof (dtv_t));
+ if (newp == NULL)
+ oom ();
+
+ newp[0].counter = newsize;
+
+ /* Clear the newly allocate part. */
+ memset (newp + 2 + oldsize, '\0',
+ (newsize - oldsize) * sizeof (dtv_t));
+
+ /* Point dtv to the generation counter. */
+ dtv = &newp[1];
+
+ /* Install this new dtv in the thread data
+ structures. */
+ INSTALL_NEW_DTV (dtv);
+ }
+
+ /* If there is currently memory allocate for this
+ dtv entry free it. */
+ /* XXX Ideally we will at some point create a memory
+ pool. */
+ if (dtv[modid].pointer != TLS_DTV_UNALLOCATED)
+ /* Note that free is called for NULL is well. We
+ deallocate even if it is this dtv entry we are
+ supposed to load. The reason is that we call
+ memalign and not malloc. */
+ free (dtv[modid].pointer);
+
+ /* This module is loaded dynamically- We defer
+ memory allocation. */
+ dtv[modid].pointer = TLS_DTV_UNALLOCATED;
+
+ if (modid == GET_ADDR_MODULE)
+ the_map = map;
+ }
+
+ total += listp->len;
+ }
+ while ((listp = listp->next) != NULL);
- return (char *) dtv[GET_ADDR_MODULE].pointer + GET_ADDR_OFFSET;
+ /* This will be the new maximum generation counter. */
+ dtv[0].counter = new_gen;
+ }
+ }
+
+ p = dtv[GET_ADDR_MODULE].pointer;
+
+ if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ {
+ /* The allocation was deferred. Do it now. */
+ if (the_map == NULL)
+ {
+ /* Find the link map for this module. */
+ size_t idx = GET_ADDR_MODULE;
+ struct dtv_slotinfo_list *listp = GL(dl_tls_dtv_slotinfo_list);
+
+ while (idx >= listp->len)
+ {
+ idx -= listp->len;
+ listp = listp->next;
+ }
+
+ the_map = listp->slotinfo[idx].map;
+ }
+
+ p = dtv[GET_ADDR_MODULE].pointer = allocate_and_init (the_map);
+ }
+
+ return (char *) p + GET_ADDR_OFFSET;
}
+# endif
#endif /* use TLS */
diff --git a/sysdeps/generic/ldsodefs.h b/sysdeps/generic/ldsodefs.h
index bfad7c6b07..d722198328 100644
--- a/sysdeps/generic/ldsodefs.h
+++ b/sysdeps/generic/ldsodefs.h
@@ -289,23 +289,40 @@ struct rtld_global
#endif
#ifdef USE_TLS
- /* Beginning of the list of link maps for objects which contain
- thread-local storage sections. This will be traversed to
- initialize new TLS blocks. */
- EXTERN struct link_map *_dl_initimage_list;
-
/* Highest dtv index currently needed. */
EXTERN size_t _dl_tls_max_dtv_idx;
/* Flag signalling whether there are gaps in the module ID allocation. */
EXTERN bool _dl_tls_dtv_gaps;
-
+ /* Information about the dtv slots. */
+ EXTERN struct dtv_slotinfo_list
+ {
+ size_t len;
+ struct dtv_slotinfo_list *next;
+ struct dtv_slotinfo
+ {
+ size_t gen;
+ struct link_map *map;
+ } slotinfo[0];
+ } *_dl_tls_dtv_slotinfo_list;
+ /* Number of modules in the static TLS block. */
+ EXTERN size_t _dl_tls_static_nelem;
/* Size of the static TLS block. */
EXTERN size_t _dl_tls_static_size;
/* Alignment requirement of the static TLS block. */
EXTERN size_t _dl_tls_static_align;
+/* Number of additional entries in the slotinfo array of each slotinfo
+ list element. A large number makes it almost certain take we never
+ have to iterate beyond the first element in the slotinfo list. */
+# define TLS_SLOTINFO_SURPLUS (62)
+
+/* Number of additional slots in the dtv allocated. */
+# define DTV_SURPLUS (14)
+
/* True if the dtv for the initial thread was malloc()ed. */
EXTERN bool _dl_initial_dtv_malloced;
+ /* Generation counter for the dtv. */
+ EXTERN size_t _dl_tls_generation;
#endif
/* Name of the shared object to be profiled (if any). */
@@ -667,12 +684,16 @@ extern void _dl_sysdep_start_cleanup (void)
extern size_t _dl_next_tls_modid (void) internal_function;
/* Calculate offset of the TLS blocks in the static TLS block. */
-extern void _dl_determine_tlsoffset (struct link_map *firstp)
- internal_function;
+extern void _dl_determine_tlsoffset (void) internal_function;
/* Allocate memory for static TLS block and dtv. */
extern void *_dl_allocate_tls (void) internal_function;
+/* Return the symbol address given the map of the module it is in and
+ the symbol record. */
+extern void *_dl_tls_symaddr (struct link_map *map, const ElfW(Sym) *ref)
+ internal_function;
+
__END_DECLS
#endif /* ldsodefs.h */
diff --git a/sysdeps/i386/dl-tls.h b/sysdeps/i386/dl-tls.h
index 8e30530542..5066b8dcd6 100644
--- a/sysdeps/i386/dl-tls.h
+++ b/sysdeps/i386/dl-tls.h
@@ -17,6 +17,7 @@
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
02111-1307 USA. */
+
/* Type used for the representation of TLS information in the GOT. */
typedef struct
{
@@ -25,6 +26,7 @@ typedef struct
} tls_index;
+#ifdef SHARED
/* This is the prototype for the GNU version. */
extern void *___tls_get_addr (tls_index *ti)
__attribute__ ((__regparm__ (1)));
@@ -46,5 +48,7 @@ __tls_get_addr (tls_index *ti)
/* Prepare using the definition of __tls_get_addr in the generic
version of this file. */
-#define __tls_get_addr __attribute__ ((__regparm__ (1))) ___tls_get_addr
+# define __tls_get_addr __attribute__ ((__regparm__ (1))) ___tls_get_addr
strong_alias (___tls_get_addr, ___tls_get_addr_internal)
+# define __TLS_GET_ADDR ___tls_get_addr
+#endif