summaryrefslogtreecommitdiff
path: root/libc/elf
diff options
context:
space:
mode:
Diffstat (limited to 'libc/elf')
-rw-r--r--libc/elf/Versions1
-rw-r--r--libc/elf/dl-close.c10
-rw-r--r--libc/elf/dl-iteratephdr.c30
-rw-r--r--libc/elf/dl-load.c14
-rw-r--r--libc/elf/dl-misc.c142
-rw-r--r--libc/elf/dl-open.c5
-rw-r--r--libc/elf/dl-reloc.c48
-rw-r--r--libc/elf/dl-support.c3
-rw-r--r--libc/elf/dl-tls.c144
9 files changed, 62 insertions, 335 deletions
diff --git a/libc/elf/Versions b/libc/elf/Versions
index 01b7a59d5..238399232 100644
--- a/libc/elf/Versions
+++ b/libc/elf/Versions
@@ -53,7 +53,6 @@ ld {
_dl_allocate_tls; _dl_allocate_tls_init;
_dl_argv; _dl_find_dso_for_object; _dl_get_tls_static_info;
_dl_deallocate_tls; _dl_make_stack_executable; _dl_out_of_memory;
- _dl_clear_dtv;
_dl_rtld_di_serinfo; _dl_starting_up; _dl_tls_setup;
_rtld_global; _rtld_global_ro;
diff --git a/libc/elf/dl-close.c b/libc/elf/dl-close.c
index 406d3ba91..412497902 100644
--- a/libc/elf/dl-close.c
+++ b/libc/elf/dl-close.c
@@ -643,9 +643,7 @@ _dl_close_worker (struct link_map *map)
imap->l_prev->l_next = imap->l_next;
else
{
-#ifdef SHARED
assert (nsid != LM_ID_BASE);
-#endif
ns->_ns_loaded = imap->l_next;
/* Update the pointer to the head of the list
@@ -736,13 +734,7 @@ _dl_close_worker (struct link_map *map)
if (__builtin_expect (ns->_ns_loaded == NULL, 0)
&& nsid == GL(dl_nns) - 1)
do
- {
- --GL(dl_nns);
-#ifndef SHARED
- if (GL(dl_nns) == 0)
- break;
-#endif
- }
+ --GL(dl_nns);
while (GL(dl_ns)[GL(dl_nns) - 1]._ns_loaded == NULL);
/* Notify the debugger those objects are finalized and gone. */
diff --git a/libc/elf/dl-iteratephdr.c b/libc/elf/dl-iteratephdr.c
index 76e98aa2f..6572ec3a3 100644
--- a/libc/elf/dl-iteratephdr.c
+++ b/libc/elf/dl-iteratephdr.c
@@ -86,34 +86,4 @@ __dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
}
hidden_def (__dl_iterate_phdr)
-#ifdef SHARED
-
weak_alias (__dl_iterate_phdr, dl_iterate_phdr);
-
-#else
-
-int
-dl_iterate_phdr (int (*callback) (struct dl_phdr_info *info,
- size_t size, void *data), void *data)
-{
- if (_dl_phnum != 0)
- {
- /* This entry describes this statically-linked program itself. */
- struct dl_phdr_info info;
- int ret;
- info.dlpi_addr = 0;
- info.dlpi_name = "";
- info.dlpi_phdr = _dl_phdr;
- info.dlpi_phnum = _dl_phnum;
- info.dlpi_adds = GL(dl_load_adds);
- info.dlpi_subs = GL(dl_load_adds) - GL(dl_ns)[LM_ID_BASE]._ns_nloaded;
- ret = (*callback) (&info, sizeof (struct dl_phdr_info), data);
- if (ret)
- return ret;
- }
-
- return __dl_iterate_phdr (callback, data);
-}
-
-
-#endif
diff --git a/libc/elf/dl-load.c b/libc/elf/dl-load.c
index fdd4d264a..086197e07 100644
--- a/libc/elf/dl-load.c
+++ b/libc/elf/dl-load.c
@@ -2233,23 +2233,17 @@ _dl_map_object (struct link_map *loader, const char *name,
if (cached != NULL)
{
-# ifdef SHARED
// XXX Correct to unconditionally default to namespace 0?
l = (loader
?: GL(dl_ns)[LM_ID_BASE]._ns_loaded
- ?: &GL(dl_rtld_map));
-# else
- l = loader;
+# ifdef SHARED
+ ?: &GL(dl_rtld_map)
# endif
+ );
/* If the loader has the DF_1_NODEFLIB flag set we must not
use a cache entry from any of these directories. */
- if (
-# ifndef SHARED
- /* 'l' is always != NULL for dynamically linked objects. */
- l != NULL &&
-# endif
- __builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
+ if (__builtin_expect (l->l_flags_1 & DF_1_NODEFLIB, 0))
{
const char *dirp = system_dirs;
unsigned int cnt = 0;
diff --git a/libc/elf/dl-misc.c b/libc/elf/dl-misc.c
index 043185aa7..8fd67100e 100644
--- a/libc/elf/dl-misc.c
+++ b/libc/elf/dl-misc.c
@@ -19,7 +19,6 @@
#include <assert.h>
#include <fcntl.h>
#include <ldsodefs.h>
-#include <libc-symbols.h>
#include <limits.h>
#include <link.h>
#include <stdarg.h>
@@ -365,144 +364,3 @@ _dl_higher_prime_number (unsigned long int n)
return *low;
}
-
-/* To support accessing TLS variables from signal handlers, we need an
- async signal safe memory allocator. These routines are never
- themselves invoked reentrantly (all calls to them are surrounded by
- signal masks) but may be invoked concurrently from many threads.
- The current implementation is not particularly performant nor space
- efficient, but it will be used rarely (and only in binaries that use
- dlopen.) The API matches that of malloc() and friends. */
-
-struct __signal_safe_allocator_header
-{
- size_t size;
- void *start;
-};
-
-static inline struct __signal_safe_allocator_header *
-ptr_to_signal_safe_allocator_header (void *ptr)
-{
- return (struct __signal_safe_allocator_header *)
- ((char *) (ptr) - sizeof (struct __signal_safe_allocator_header));
-}
-
-void *weak_function
-__signal_safe_memalign (size_t boundary, size_t size)
-{
- struct __signal_safe_allocator_header *header;
-
- if (boundary < sizeof (*header))
- boundary = sizeof (*header);
-
- /* Boundary must be a power of two. */
- if (!powerof2 (boundary))
- return NULL;
-
- size_t pg = GLRO (dl_pagesize);
- size_t padded_size;
- if (boundary <= pg)
- {
- /* We'll get a pointer certainly aligned to boundary, so just
- add one more boundary-sized chunk to hold the header. */
- padded_size = roundup (size, boundary) + boundary;
- }
- else
- {
- /* If we want K pages aligned to a J-page boundary, K+J+1 pages
- contains at least one such region that isn't directly at the start
- (so we can place the header.) This is wasteful, but you're the one
- who wanted 64K-aligned TLS. */
- padded_size = roundup (size, pg) + boundary + pg;
- }
-
-
- size_t actual_size = roundup (padded_size, pg);
- void *actual = mmap (NULL, actual_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
- if (actual == MAP_FAILED)
- return NULL;
-
- if (boundary <= pg)
- {
- header = actual + boundary - sizeof (*header);
- }
- else
- {
- intptr_t actual_pg = ((intptr_t) actual) / pg;
- intptr_t boundary_pg = boundary / pg;
- intptr_t start_pg = actual_pg + boundary_pg;
- start_pg -= start_pg % boundary_pg;
- if (start_pg > (actual_pg + 1))
- {
- int ret = munmap (actual, (start_pg - actual_pg - 1) * pg);
- assert (ret == 0);
- actual = (void *) ((start_pg - 1) * pg);
- }
- char *start = (void *) (start_pg * pg);
- header = ptr_to_signal_safe_allocator_header (start);
- }
-
- header->size = actual_size;
- header->start = actual;
- void *ptr = header;
- ptr += sizeof (*header);
- if (((intptr_t) ptr) % boundary != 0)
- _dl_fatal_printf ("__signal_safe_memalign produced incorrect alignment\n");
- return ptr;
-}
-
-void * weak_function
-__signal_safe_malloc (size_t size)
-{
- return __signal_safe_memalign (1, size);
-}
-
-void weak_function
-__signal_safe_free (void *ptr)
-{
- if (ptr == NULL)
- return;
-
- struct __signal_safe_allocator_header *header
- = ptr_to_signal_safe_allocator_header (ptr);
- int ret = munmap (header->start, header->size);
-
- assert (ret == 0);
-}
-
-void * weak_function
-__signal_safe_realloc (void *ptr, size_t size)
-{
- if (size == 0)
- {
- __signal_safe_free (ptr);
- return NULL;
- }
- if (ptr == NULL)
- return __signal_safe_malloc (size);
-
- struct __signal_safe_allocator_header *header
- = ptr_to_signal_safe_allocator_header (ptr);
- size_t old_size = header->size;
- if (old_size - sizeof (*header) >= size)
- return ptr;
-
- void *new_ptr = __signal_safe_malloc (size);
- if (new_ptr == NULL)
- return NULL;
-
- memcpy (new_ptr, ptr, old_size);
- __signal_safe_free (ptr);
-
- return new_ptr;
-}
-
-void * weak_function
-__signal_safe_calloc (size_t nmemb, size_t size)
-{
- void *ptr = __signal_safe_malloc (nmemb * size);
- if (ptr == NULL)
- return NULL;
- return memset (ptr, 0, nmemb * size);
-}
diff --git a/libc/elf/dl-open.c b/libc/elf/dl-open.c
index e5e6e069e..6c096d30d 100644
--- a/libc/elf/dl-open.c
+++ b/libc/elf/dl-open.c
@@ -548,10 +548,7 @@ cannot load any more object with static TLS"));
generation of the DSO we are allocating data for. */
_dl_update_slotinfo (imap->l_tls_modid);
#endif
- /* We do this iteration under a signal mask in dl-reloc; why not
- here? Because these symbols are new and dlopen hasn't
- returned yet. So we can't possibly be racing with a TLS
- access to them from another thread. */
+
GL(dl_init_static_tls) (imap);
assert (imap->l_need_tls_init == 0);
}
diff --git a/libc/elf/dl-reloc.c b/libc/elf/dl-reloc.c
index d45891779..4f1279239 100644
--- a/libc/elf/dl-reloc.c
+++ b/libc/elf/dl-reloc.c
@@ -16,10 +16,8 @@
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
-#include <atomic.h>
#include <errno.h>
#include <libintl.h>
-#include <signal.h>
#include <stdlib.h>
#include <unistd.h>
#include <ldsodefs.h>
@@ -72,6 +70,8 @@ _dl_try_allocate_static_tls (struct link_map *map)
size_t offset = GL(dl_tls_static_used) + (freebytes - n * map->l_tls_align
- map->l_tls_firstbyte_offset);
+
+ map->l_tls_offset = GL(dl_tls_static_used) = offset;
#elif TLS_DTV_AT_TP
/* dl_tls_static_used includes the TCB at the beginning. */
size_t offset = (((GL(dl_tls_static_used)
@@ -83,36 +83,7 @@ _dl_try_allocate_static_tls (struct link_map *map)
if (used > GL(dl_tls_static_size))
goto fail;
-#else
-# error "Either TLS_TCB_AT_TP or TLS_DTV_AT_TP must be defined"
-#endif
- /* We've computed the new value we want, now try to install it. */
- ptrdiff_t val;
- if ((val = map->l_tls_offset) == NO_TLS_OFFSET)
- {
- /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to
- change it go from NO_TLS_OFFSET to some other value. We use
- compare_and_exchange to ensure only one attempt succeeds. We
- don't actually need any memory ordering here, but _acq is the
- weakest available. */
- (void ) atomic_compare_and_exchange_bool_acq (&map->l_tls_offset,
- offset,
- NO_TLS_OFFSET);
- val = map->l_tls_offset;
- assert (val != NO_TLS_OFFSET);
- }
- if (val != offset)
- {
- /* We'd like to set a static offset for this section, but another
- thread has already used a dynamic TLS block for it. Since we can
- only use static offsets if everyone does (and it's not practical
- to move that thread's dynamic block), we have to fail. */
- goto fail;
- }
- /* We installed the value; now update the globals. */
-#if TLS_TCB_AT_TP
- GL(dl_tls_static_used) = offset;
-#elif TLS_DTV_AT_TP
+ map->l_tls_offset = offset;
map->l_tls_firstbyte_offset = GL(dl_tls_static_used);
GL(dl_tls_static_used) = used;
#else
@@ -143,17 +114,8 @@ void
internal_function __attribute_noinline__
_dl_allocate_static_tls (struct link_map *map)
{
- /* We wrap this in a signal mask because it has to iterate all threads
- (including this one) and update this map's TLS entry. A signal handler
- accessing TLS would try to do the same update and break. */
- sigset_t old;
- _dl_mask_all_signals (&old);
- int err = -1;
- if (map->l_tls_offset != FORCED_DYNAMIC_TLS_OFFSET)
- err = _dl_try_allocate_static_tls (map);
-
- _dl_unmask_signals (&old);
- if (err != 0)
+ if (map->l_tls_offset == FORCED_DYNAMIC_TLS_OFFSET
+ || _dl_try_allocate_static_tls (map))
{
_dl_signal_error (0, map->l_name, NULL, N_("\
cannot allocate memory in static TLS block"));
diff --git a/libc/elf/dl-support.c b/libc/elf/dl-support.c
index 1b8588929..32df06fb6 100644
--- a/libc/elf/dl-support.c
+++ b/libc/elf/dl-support.c
@@ -94,6 +94,7 @@ static struct link_map _dl_main_map =
.l_scope = _dl_main_map.l_scope_mem,
.l_local_scope = { &_dl_main_map.l_searchlist },
.l_used = 1,
+ .l_flags_1 = DF_1_NODEFLIB,
.l_tls_offset = NO_TLS_OFFSET,
.l_serial = 1,
};
@@ -314,6 +315,8 @@ internal_function
_dl_non_dynamic_init (void)
{
_dl_main_map.l_origin = _dl_get_origin ();
+ _dl_main_map.l_phdr = GL(dl_phdr);
+ _dl_main_map.l_phnum = GL(dl_phnum);
if (HP_TIMING_AVAIL)
HP_TIMING_NOW (_dl_cpuclock_offset);
diff --git a/libc/elf/dl-tls.c b/libc/elf/dl-tls.c
index 50ec876ea..dbaea0aa9 100644
--- a/libc/elf/dl-tls.c
+++ b/libc/elf/dl-tls.c
@@ -17,7 +17,6 @@
<http://www.gnu.org/licenses/>. */
#include <assert.h>
-#include <atomic.h>
#include <errno.h>
#include <libintl.h>
#include <signal.h>
@@ -294,7 +293,7 @@ allocate_dtv (void *result)
initial set of modules. This should avoid in most cases expansions
of the dtv. */
dtv_length = GL(dl_tls_max_dtv_idx) + DTV_SURPLUS;
- dtv = __signal_safe_calloc (dtv_length + 2, sizeof (dtv_t));
+ dtv = calloc (dtv_length + 2, sizeof (dtv_t));
if (dtv != NULL)
{
/* This is the initial length of the dtv. */
@@ -464,18 +463,6 @@ _dl_allocate_tls (void *mem)
}
rtld_hidden_def (_dl_allocate_tls)
-void
-internal_function
-_dl_clear_dtv (dtv_t *dtv)
-{
- for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
- if (! dtv[1 + cnt].pointer.is_static
- && dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
- __signal_safe_free (dtv[1 + cnt].pointer.val);
- memset (dtv, '\0', (dtv[-1].counter + 1) * sizeof (dtv_t));
-}
-
-rtld_hidden_def (_dl_clear_dtv)
#ifndef SHARED
extern dtv_t _dl_static_dtv[];
@@ -492,11 +479,11 @@ _dl_deallocate_tls (void *tcb, bool dealloc_tcb)
for (size_t cnt = 0; cnt < dtv[-1].counter; ++cnt)
if (! dtv[1 + cnt].pointer.is_static
&& dtv[1 + cnt].pointer.val != TLS_DTV_UNALLOCATED)
- __signal_safe_free (dtv[1 + cnt].pointer.val);
+ free (dtv[1 + cnt].pointer.val);
/* The array starts with dtv[-1]. */
if (dtv != GL(dl_initial_dtv))
- __signal_safe_free (dtv - 1);
+ free (dtv - 1);
if (dealloc_tcb)
{
@@ -534,21 +521,20 @@ rtld_hidden_def (_dl_deallocate_tls)
# endif
-static void
-allocate_and_init (dtv_t *dtv, struct link_map *map)
+static void *
+allocate_and_init (struct link_map *map)
{
void *newp;
- newp = __signal_safe_memalign (map->l_tls_align, map->l_tls_blocksize);
+
+ newp = __libc_memalign (map->l_tls_align, map->l_tls_blocksize);
if (newp == NULL)
oom ();
- /* Initialize the memory. Since this is our thread's space, we are
- under a signal mask, and no one has touched this section before,
- we can safely just overwrite whatever's there. */
+ /* Initialize the memory. */
memset (__mempcpy (newp, map->l_tls_initimage, map->l_tls_initimage_size),
'\0', map->l_tls_blocksize - map->l_tls_initimage_size);
- dtv->pointer.val = newp;
+ return newp;
}
@@ -590,15 +576,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
the entry we need. */
size_t new_gen = listp->slotinfo[idx].gen;
size_t total = 0;
- sigset_t old;
-
- _dl_mask_all_signals (&old);
- /* We use the signal mask as a lock against reentrancy here.
- Check that a signal taken before the lock didn't already
- update us. */
- dtv = THREAD_DTV ();
- if (dtv[0].counter >= listp->slotinfo[idx].gen)
- goto out;
+
/* We have to look through the entire dtv slotinfo list. */
listp = GL(dl_tls_dtv_slotinfo_list);
do
@@ -618,27 +596,25 @@ _dl_update_slotinfo (unsigned long int req_modid)
if (gen <= dtv[0].counter)
continue;
- size_t modid = total + cnt;
-
/* If there is no map this means the entry is empty. */
struct link_map *map = listp->slotinfo[cnt].map;
if (map == NULL)
{
/* If this modid was used at some point the memory
might still be allocated. */
- if (dtv[-1].counter >= modid
- && !dtv[modid].pointer.is_static
- && dtv[modid].pointer.val != TLS_DTV_UNALLOCATED)
+ if (! dtv[total + cnt].pointer.is_static
+ && dtv[total + cnt].pointer.val != TLS_DTV_UNALLOCATED)
{
- __signal_safe_free (dtv[modid].pointer.val);
- dtv[modid].pointer.val = TLS_DTV_UNALLOCATED;
+ free (dtv[total + cnt].pointer.val);
+ dtv[total + cnt].pointer.val = TLS_DTV_UNALLOCATED;
}
continue;
}
- assert (modid == map->l_tls_modid);
/* Check whether the current dtv array is large enough. */
+ size_t modid = map->l_tls_modid;
+ assert (total + cnt == modid);
if (dtv[-1].counter < modid)
{
/* Reallocate the dtv. */
@@ -652,18 +628,17 @@ _dl_update_slotinfo (unsigned long int req_modid)
{
/* This is the initial dtv that was allocated
during rtld startup using the dl-minimal.c
- malloc instead of the real allocator. We can't
+ malloc instead of the real malloc. We can't
free it, we have to abandon the old storage. */
- newp = __signal_safe_malloc (
- (2 + newsize) * sizeof (dtv_t));
+ newp = malloc ((2 + newsize) * sizeof (dtv_t));
if (newp == NULL)
oom ();
memcpy (newp, &dtv[-1], (2 + oldsize) * sizeof (dtv_t));
}
else
{
- newp = __signal_safe_realloc (&dtv[-1],
+ newp = realloc (&dtv[-1],
(2 + newsize) * sizeof (dtv_t));
if (newp == NULL)
oom ();
@@ -693,7 +668,7 @@ _dl_update_slotinfo (unsigned long int req_modid)
deallocate even if it is this dtv entry we are
supposed to load. The reason is that we call
memalign and not malloc. */
- __signal_safe_free (dtv[modid].pointer.val);
+ free (dtv[modid].pointer.val);
/* This module is loaded dynamically- We defer memory
allocation. */
@@ -710,8 +685,6 @@ _dl_update_slotinfo (unsigned long int req_modid)
/* This will be the new maximum generation counter. */
dtv[0].counter = new_gen;
- out:
- _dl_unmask_signals (&old);
}
return the_map;
@@ -737,60 +710,39 @@ tls_get_addr_tail (GET_ADDR_ARGS, dtv_t *dtv, struct link_map *the_map)
the_map = listp->slotinfo[idx].map;
}
- sigset_t old;
- _dl_mask_all_signals (&old);
-
- /* As with update_slotinfo, we use the sigmask as a check against
- reentrancy. */
- if (dtv[GET_ADDR_MODULE].pointer.val != TLS_DTV_UNALLOCATED)
- goto out;
-
- /* Synchronize against a parallel dlopen() forcing this variable
- into static storage. If that happens, we have to be more careful
- about initializing the area, as that dlopen() will be iterating
- the threads to do so itself. */
- ptrdiff_t offset;
- if ((offset = the_map->l_tls_offset) == NO_TLS_OFFSET)
- {
- /* l_tls_offset starts out at NO_TLS_OFFSET, and all attempts to
- change it go from NO_TLS_OFFSET to some other value. We use
- compare_and_exchange to ensure only one attempt succeeds. We
- don't actually need any memory ordering here, but _acq is the
- weakest available. */
- (void) atomic_compare_and_exchange_bool_acq (&the_map->l_tls_offset,
- FORCED_DYNAMIC_TLS_OFFSET,
- NO_TLS_OFFSET);
- offset = the_map->l_tls_offset;
- assert (offset != NO_TLS_OFFSET);
- }
- if (offset == FORCED_DYNAMIC_TLS_OFFSET)
- {
- allocate_and_init (&dtv[GET_ADDR_MODULE], the_map);
- }
- else
+
+ again:
+ /* Make sure that, if a dlopen running in parallel forces the
+ variable into static storage, we'll wait until the address in the
+ static TLS block is set up, and use that. If we're undecided
+ yet, make sure we make the decision holding the lock as well. */
+ if (__builtin_expect (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET, 0))
{
- void **pp = &dtv[GET_ADDR_MODULE].pointer.val;
- while (atomic_forced_read (*pp) == TLS_DTV_UNALLOCATED)
+ __rtld_lock_lock_recursive (GL(dl_load_lock));
+ if (__builtin_expect (the_map->l_tls_offset == NO_TLS_OFFSET, 1))
{
- /* for lack of a better (safe) thing to do, just spin.
- Someone else (not us; it's done under a signal mask) set
- this map to a static TLS offset, and they'll iterate all
- threads to initialize it. They'll eventually write
- to pointer.val, at which point we know they've fully
- completed initialization. */
- atomic_delay ();
+ the_map->l_tls_offset = FORCED_DYNAMIC_TLS_OFFSET;
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ }
+ else
+ {
+ __rtld_lock_unlock_recursive (GL(dl_load_lock));
+ if (__builtin_expect (the_map->l_tls_offset
+ != FORCED_DYNAMIC_TLS_OFFSET, 1))
+ {
+ void *p = dtv[GET_ADDR_MODULE].pointer.val;
+ if (__builtin_expect (p == TLS_DTV_UNALLOCATED, 0))
+ goto again;
+
+ return (char *) p + GET_ADDR_OFFSET;
+ }
}
- /* Make sure we've picked up their initialization of the actual
- block; this pairs against the write barrier in
- init_one_static_tls, guaranteeing that we see their write of
- the tls_initimage into the static region. */
- atomic_read_barrier ();
}
-out:
- assert (dtv[GET_ADDR_MODULE].pointer.val != TLS_DTV_UNALLOCATED);
- _dl_unmask_signals (&old);
+ void *p = dtv[GET_ADDR_MODULE].pointer.val = allocate_and_init (the_map);
+ dtv[GET_ADDR_MODULE].pointer.is_static = false;
- return (char *) dtv[GET_ADDR_MODULE].pointer.val + GET_ADDR_OFFSET;
+ return (char *) p + GET_ADDR_OFFSET;
}