diff options
author | Ondřej Bílka <neleai@seznam.cz> | 2014-01-02 09:38:18 +0100 |
---|---|---|
committer | Ondřej Bílka <neleai@seznam.cz> | 2014-01-02 09:40:10 +0100 |
commit | 6c8dbf00f536d78b1937b5af6f57be47fd376344 (patch) | |
tree | ad86d3e7433a907cac50ebbd9c39ca3402a87c6a /malloc/hooks.c | |
parent | 9a3c6a6ff602c88d7155139a7d7d0000b7b7e946 (diff) | |
download | glibc-6c8dbf00f536d78b1937b5af6f57be47fd376344.tar.gz |
Reformat malloc to gnu style.
Diffstat (limited to 'malloc/hooks.c')
-rw-r--r-- | malloc/hooks.c | 648 |
1 files changed, 354 insertions, 294 deletions
diff --git a/malloc/hooks.c b/malloc/hooks.c index 1b80a74e28..00ee6bec8c 100644 --- a/malloc/hooks.c +++ b/malloc/hooks.c @@ -24,29 +24,29 @@ /* Hooks for debugging versions. The initial hooks just call the initialization routine, then do the normal work. */ -static void* -malloc_hook_ini(size_t sz, const void *caller) +static void * +malloc_hook_ini (size_t sz, const void *caller) { __malloc_hook = NULL; - ptmalloc_init(); - return __libc_malloc(sz); + ptmalloc_init (); + return __libc_malloc (sz); } -static void* -realloc_hook_ini(void* ptr, size_t sz, const void *caller) +static void * +realloc_hook_ini (void *ptr, size_t sz, const void *caller) { __malloc_hook = NULL; __realloc_hook = NULL; - ptmalloc_init(); - return __libc_realloc(ptr, sz); + ptmalloc_init (); + return __libc_realloc (ptr, sz); } -static void* -memalign_hook_ini(size_t alignment, size_t sz, const void *caller) +static void * +memalign_hook_ini (size_t alignment, size_t sz, const void *caller) { __memalign_hook = NULL; - ptmalloc_init(); - return __libc_memalign(alignment, sz); + ptmalloc_init (); + return __libc_memalign (alignment, sz); } /* Whether we are using malloc checking. */ @@ -71,10 +71,11 @@ static int disallow_malloc_check; void __malloc_check_init (void) { - if (disallow_malloc_check) { - disallow_malloc_check = 0; - return; - } + if (disallow_malloc_check) + { + disallow_malloc_check = 0; + return; + } using_malloc_checking = 1; __malloc_hook = malloc_check; __free_hook = free_check; @@ -87,7 +88,7 @@ __malloc_check_init (void) overruns. The goal here is to avoid obscure crashes due to invalid usage, unlike in the MALLOC_DEBUG code. */ -#define MAGICBYTE(p) ( ( ((size_t)p >> 3) ^ ((size_t)p >> 11)) & 0xFF ) +#define MAGICBYTE(p) ((((size_t) p >> 3) ^ ((size_t) p >> 11)) & 0xFF) /* Visualize the chunk as being partitioned into blocks of 256 bytes from the highest address of the chunk, downwards. The beginning of each block tells @@ -96,53 +97,58 @@ __malloc_check_init (void) must reach it with this iteration, otherwise we have witnessed a memory corruption. */ static size_t -malloc_check_get_size(mchunkptr p) +malloc_check_get_size (mchunkptr p) { size_t size; unsigned char c; - unsigned char magic = MAGICBYTE(p); + unsigned char magic = MAGICBYTE (p); - assert(using_malloc_checking == 1); + assert (using_malloc_checking == 1); - for (size = chunksize(p) - 1 + (chunk_is_mmapped(p) ? 0 : SIZE_SZ); - (c = ((unsigned char*)p)[size]) != magic; - size -= c) { - if(c<=0 || size<(c+2*SIZE_SZ)) { - malloc_printerr(check_action, "malloc_check_get_size: memory corruption", - chunk2mem(p)); - return 0; + for (size = chunksize (p) - 1 + (chunk_is_mmapped (p) ? 0 : SIZE_SZ); + (c = ((unsigned char *) p)[size]) != magic; + size -= c) + { + if (c <= 0 || size < (c + 2 * SIZE_SZ)) + { + malloc_printerr (check_action, "malloc_check_get_size: memory corruption", + chunk2mem (p)); + return 0; + } } - } /* chunk2mem size. */ - return size - 2*SIZE_SZ; + return size - 2 * SIZE_SZ; } /* Instrument a chunk with overrun detector byte(s) and convert it into a user pointer with requested size sz. */ -static void* +static void * internal_function -mem2mem_check(void *ptr, size_t sz) +mem2mem_check (void *ptr, size_t sz) { mchunkptr p; - unsigned char* m_ptr = ptr; + unsigned char *m_ptr = ptr; size_t i; if (!ptr) return ptr; - p = mem2chunk(ptr); - for(i = chunksize(p) - (chunk_is_mmapped(p) ? 2*SIZE_SZ+1 : SIZE_SZ+1); - i > sz; - i -= 0xFF) { - if(i-sz < 0x100) { - m_ptr[i] = (unsigned char)(i-sz); - break; + + p = mem2chunk (ptr); + for (i = chunksize (p) - (chunk_is_mmapped (p) ? 2 * SIZE_SZ + 1 : SIZE_SZ + 1); + i > sz; + i -= 0xFF) + { + if (i - sz < 0x100) + { + m_ptr[i] = (unsigned char) (i - sz); + break; + } + m_ptr[i] = 0xFF; } - m_ptr[i] = 0xFF; - } - m_ptr[sz] = MAGICBYTE(p); - return (void*)m_ptr; + m_ptr[sz] = MAGICBYTE (p); + return (void *) m_ptr; } /* Convert a pointer to be free()d or realloc()ed to a valid chunk @@ -150,53 +156,64 @@ mem2mem_check(void *ptr, size_t sz) static mchunkptr internal_function -mem2chunk_check(void* mem, unsigned char **magic_p) +mem2chunk_check (void *mem, unsigned char **magic_p) { mchunkptr p; INTERNAL_SIZE_T sz, c; unsigned char magic; - if(!aligned_OK(mem)) return NULL; - p = mem2chunk(mem); - if (!chunk_is_mmapped(p)) { - /* Must be a chunk in conventional heap memory. */ - int contig = contiguous(&main_arena); - sz = chunksize(p); - if((contig && - ((char*)p<mp_.sbrk_base || - ((char*)p + sz)>=(mp_.sbrk_base+main_arena.system_mem) )) || - sz<MINSIZE || sz&MALLOC_ALIGN_MASK || !inuse(p) || - ( !prev_inuse(p) && (p->prev_size&MALLOC_ALIGN_MASK || - (contig && (char*)prev_chunk(p)<mp_.sbrk_base) || - next_chunk(prev_chunk(p))!=p) )) - return NULL; - magic = MAGICBYTE(p); - for(sz += SIZE_SZ-1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { - if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; + if (!aligned_OK (mem)) + return NULL; + + p = mem2chunk (mem); + if (!chunk_is_mmapped (p)) + { + /* Must be a chunk in conventional heap memory. */ + int contig = contiguous (&main_arena); + sz = chunksize (p); + if ((contig && + ((char *) p < mp_.sbrk_base || + ((char *) p + sz) >= (mp_.sbrk_base + main_arena.system_mem))) || + sz < MINSIZE || sz & MALLOC_ALIGN_MASK || !inuse (p) || + (!prev_inuse (p) && (p->prev_size & MALLOC_ALIGN_MASK || + (contig && (char *) prev_chunk (p) < mp_.sbrk_base) || + next_chunk (prev_chunk (p)) != p))) + return NULL; + + magic = MAGICBYTE (p); + for (sz += SIZE_SZ - 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c) + { + if (c <= 0 || sz < (c + 2 * SIZE_SZ)) + return NULL; + } } - } else { - unsigned long offset, page_mask = GLRO(dl_pagesize)-1; - - /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two - alignment relative to the beginning of a page. Check this - first. */ - offset = (unsigned long)mem & page_mask; - if((offset!=MALLOC_ALIGNMENT && offset!=0 && offset!=0x10 && - offset!=0x20 && offset!=0x40 && offset!=0x80 && offset!=0x100 && - offset!=0x200 && offset!=0x400 && offset!=0x800 && offset!=0x1000 && - offset<0x2000) || - !chunk_is_mmapped(p) || (p->size & PREV_INUSE) || - ( (((unsigned long)p - p->prev_size) & page_mask) != 0 ) || - ( (sz = chunksize(p)), ((p->prev_size + sz) & page_mask) != 0 ) ) - return NULL; - magic = MAGICBYTE(p); - for(sz -= 1; (c = ((unsigned char*)p)[sz]) != magic; sz -= c) { - if(c<=0 || sz<(c+2*SIZE_SZ)) return NULL; + else + { + unsigned long offset, page_mask = GLRO (dl_pagesize) - 1; + + /* mmap()ed chunks have MALLOC_ALIGNMENT or higher power-of-two + alignment relative to the beginning of a page. Check this + first. */ + offset = (unsigned long) mem & page_mask; + if ((offset != MALLOC_ALIGNMENT && offset != 0 && offset != 0x10 && + offset != 0x20 && offset != 0x40 && offset != 0x80 && offset != 0x100 && + offset != 0x200 && offset != 0x400 && offset != 0x800 && offset != 0x1000 && + offset < 0x2000) || + !chunk_is_mmapped (p) || (p->size & PREV_INUSE) || + ((((unsigned long) p - p->prev_size) & page_mask) != 0) || + ((sz = chunksize (p)), ((p->prev_size + sz) & page_mask) != 0)) + return NULL; + + magic = MAGICBYTE (p); + for (sz -= 1; (c = ((unsigned char *) p)[sz]) != magic; sz -= c) + { + if (c <= 0 || sz < (c + 2 * SIZE_SZ)) + return NULL; + } } - } - ((unsigned char*)p)[sz] ^= 0xFF; + ((unsigned char *) p)[sz] ^= 0xFF; if (magic_p) - *magic_p = (unsigned char *)p + sz; + *magic_p = (unsigned char *) p + sz; return p; } @@ -205,32 +222,32 @@ mem2chunk_check(void* mem, unsigned char **magic_p) static int internal_function -top_check(void) +top_check (void) { - mchunkptr t = top(&main_arena); - char* brk, * new_brk; + mchunkptr t = top (&main_arena); + char *brk, *new_brk; INTERNAL_SIZE_T front_misalign, sbrk_size; - unsigned long pagesz = GLRO(dl_pagesize); - - if (t == initial_top(&main_arena) || - (!chunk_is_mmapped(t) && - chunksize(t)>=MINSIZE && - prev_inuse(t) && - (!contiguous(&main_arena) || - (char*)t + chunksize(t) == mp_.sbrk_base + main_arena.system_mem))) + unsigned long pagesz = GLRO (dl_pagesize); + + if (t == initial_top (&main_arena) || + (!chunk_is_mmapped (t) && + chunksize (t) >= MINSIZE && + prev_inuse (t) && + (!contiguous (&main_arena) || + (char *) t + chunksize (t) == mp_.sbrk_base + main_arena.system_mem))) return 0; malloc_printerr (check_action, "malloc: top chunk is corrupt", t); /* Try to set up a new top chunk. */ - brk = MORECORE(0); - front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK; + brk = MORECORE (0); + front_misalign = (unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK; if (front_misalign > 0) front_misalign = MALLOC_ALIGNMENT - front_misalign; sbrk_size = front_misalign + mp_.top_pad + MINSIZE; - sbrk_size += pagesz - ((unsigned long)(brk + sbrk_size) & (pagesz - 1)); - new_brk = (char*)(MORECORE (sbrk_size)); - if (new_brk == (char*)(MORECORE_FAILURE)) + sbrk_size += pagesz - ((unsigned long) (brk + sbrk_size) & (pagesz - 1)); + new_brk = (char *) (MORECORE (sbrk_size)); + if (new_brk == (char *) (MORECORE_FAILURE)) { __set_errno (ENOMEM); return -1; @@ -238,128 +255,148 @@ top_check(void) /* Call the `morecore' hook if necessary. */ void (*hook) (void) = atomic_forced_read (__after_morecore_hook); if (hook) - (*hook) (); + (*hook)(); main_arena.system_mem = (new_brk - mp_.sbrk_base) + sbrk_size; - top(&main_arena) = (mchunkptr)(brk + front_misalign); - set_head(top(&main_arena), (sbrk_size - front_misalign) | PREV_INUSE); + top (&main_arena) = (mchunkptr) (brk + front_misalign); + set_head (top (&main_arena), (sbrk_size - front_misalign) | PREV_INUSE); return 0; } -static void* -malloc_check(size_t sz, const void *caller) +static void * +malloc_check (size_t sz, const void *caller) { void *victim; - if (sz+1 == 0) { - __set_errno (ENOMEM); - return NULL; - } + if (sz + 1 == 0) + { + __set_errno (ENOMEM); + return NULL; + } - (void)mutex_lock(&main_arena.mutex); - victim = (top_check() >= 0) ? _int_malloc(&main_arena, sz+1) : NULL; - (void)mutex_unlock(&main_arena.mutex); - return mem2mem_check(victim, sz); + (void) mutex_lock (&main_arena.mutex); + victim = (top_check () >= 0) ? _int_malloc (&main_arena, sz + 1) : NULL; + (void) mutex_unlock (&main_arena.mutex); + return mem2mem_check (victim, sz); } static void -free_check(void* mem, const void *caller) +free_check (void *mem, const void *caller) { mchunkptr p; - if(!mem) return; - (void)mutex_lock(&main_arena.mutex); - p = mem2chunk_check(mem, NULL); - if(!p) { - (void)mutex_unlock(&main_arena.mutex); - - malloc_printerr(check_action, "free(): invalid pointer", mem); + if (!mem) return; - } - if (chunk_is_mmapped(p)) { - (void)mutex_unlock(&main_arena.mutex); - munmap_chunk(p); - return; - } - _int_free(&main_arena, p, 1); - (void)mutex_unlock(&main_arena.mutex); + + (void) mutex_lock (&main_arena.mutex); + p = mem2chunk_check (mem, NULL); + if (!p) + { + (void) mutex_unlock (&main_arena.mutex); + + malloc_printerr (check_action, "free(): invalid pointer", mem); + return; + } + if (chunk_is_mmapped (p)) + { + (void) mutex_unlock (&main_arena.mutex); + munmap_chunk (p); + return; + } + _int_free (&main_arena, p, 1); + (void) mutex_unlock (&main_arena.mutex); } -static void* -realloc_check(void* oldmem, size_t bytes, const void *caller) +static void * +realloc_check (void *oldmem, size_t bytes, const void *caller) { INTERNAL_SIZE_T nb; - void* newmem = 0; + void *newmem = 0; unsigned char *magic_p; - if (bytes+1 == 0) { - __set_errno (ENOMEM); - return NULL; - } - if (oldmem == 0) return malloc_check(bytes, NULL); - if (bytes == 0) { - free_check (oldmem, NULL); - return NULL; - } - (void)mutex_lock(&main_arena.mutex); - const mchunkptr oldp = mem2chunk_check(oldmem, &magic_p); - (void)mutex_unlock(&main_arena.mutex); - if(!oldp) { - malloc_printerr(check_action, "realloc(): invalid pointer", oldmem); - return malloc_check(bytes, NULL); - } - const INTERNAL_SIZE_T oldsize = chunksize(oldp); - - checked_request2size(bytes+1, nb); - (void)mutex_lock(&main_arena.mutex); - - if (chunk_is_mmapped(oldp)) { + if (bytes + 1 == 0) + { + __set_errno (ENOMEM); + return NULL; + } + if (oldmem == 0) + return malloc_check (bytes, NULL); + + if (bytes == 0) + { + free_check (oldmem, NULL); + return NULL; + } + (void) mutex_lock (&main_arena.mutex); + const mchunkptr oldp = mem2chunk_check (oldmem, &magic_p); + (void) mutex_unlock (&main_arena.mutex); + if (!oldp) + { + malloc_printerr (check_action, "realloc(): invalid pointer", oldmem); + return malloc_check (bytes, NULL); + } + const INTERNAL_SIZE_T oldsize = chunksize (oldp); + + checked_request2size (bytes + 1, nb); + (void) mutex_lock (&main_arena.mutex); + + if (chunk_is_mmapped (oldp)) + { #if HAVE_MREMAP - mchunkptr newp = mremap_chunk(oldp, nb); - if(newp) - newmem = chunk2mem(newp); - else + mchunkptr newp = mremap_chunk (oldp, nb); + if (newp) + newmem = chunk2mem (newp); + else #endif - { - /* Note the extra SIZE_SZ overhead. */ - if(oldsize - SIZE_SZ >= nb) - newmem = oldmem; /* do nothing */ - else { - /* Must alloc, copy, free. */ - if (top_check() >= 0) - newmem = _int_malloc(&main_arena, bytes+1); - if (newmem) { - memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ); - munmap_chunk(oldp); - } + { + /* Note the extra SIZE_SZ overhead. */ + if (oldsize - SIZE_SZ >= nb) + newmem = oldmem; /* do nothing */ + else + { + /* Must alloc, copy, free. */ + if (top_check () >= 0) + newmem = _int_malloc (&main_arena, bytes + 1); + if (newmem) + { + memcpy (newmem, oldmem, oldsize - 2 * SIZE_SZ); + munmap_chunk (oldp); + } + } } } - } else { - if (top_check() >= 0) { - INTERNAL_SIZE_T nb; - checked_request2size(bytes + 1, nb); - newmem = _int_realloc(&main_arena, oldp, oldsize, nb); + else + { + if (top_check () >= 0) + { + INTERNAL_SIZE_T nb; + checked_request2size (bytes + 1, nb); + newmem = _int_realloc (&main_arena, oldp, oldsize, nb); + } } - } /* mem2chunk_check changed the magic byte in the old chunk. If newmem is NULL, then the old chunk will still be used though, so we need to invert that change here. */ - if (newmem == NULL) *magic_p ^= 0xFF; + if (newmem == NULL) + *magic_p ^= 0xFF; - (void)mutex_unlock(&main_arena.mutex); + (void) mutex_unlock (&main_arena.mutex); - return mem2mem_check(newmem, bytes); + return mem2mem_check (newmem, bytes); } -static void* -memalign_check(size_t alignment, size_t bytes, const void *caller) +static void * +memalign_check (size_t alignment, size_t bytes, const void *caller) { - void* mem; + void *mem; + + if (alignment <= MALLOC_ALIGNMENT) + return malloc_check (bytes, NULL); - if (alignment <= MALLOC_ALIGNMENT) return malloc_check(bytes, NULL); - if (alignment < MINSIZE) alignment = MINSIZE; + if (alignment < MINSIZE) + alignment = MINSIZE; /* If the alignment is greater than SIZE_MAX / 2 + 1 it cannot be a power of 2 and will cause overflow in the check below. */ @@ -377,17 +414,19 @@ memalign_check(size_t alignment, size_t bytes, const void *caller) } /* Make sure alignment is power of 2. */ - if (!powerof2(alignment)) { - size_t a = MALLOC_ALIGNMENT * 2; - while (a < alignment) a <<= 1; - alignment = a; - } - - (void)mutex_lock(&main_arena.mutex); - mem = (top_check() >= 0) ? _int_memalign(&main_arena, alignment, bytes+1) : - NULL; - (void)mutex_unlock(&main_arena.mutex); - return mem2mem_check(mem, bytes); + if (!powerof2 (alignment)) + { + size_t a = MALLOC_ALIGNMENT * 2; + while (a < alignment) + a <<= 1; + alignment = a; + } + + (void) mutex_lock (&main_arena.mutex); + mem = (top_check () >= 0) ? _int_memalign (&main_arena, alignment, bytes + 1) : + NULL; + (void) mutex_unlock (&main_arena.mutex); + return mem2mem_check (mem, bytes); } @@ -408,59 +447,63 @@ memalign_check(size_t alignment, size_t bytes, const void *caller) then the hooks are reset to 0. */ #define MALLOC_STATE_MAGIC 0x444c4541l -#define MALLOC_STATE_VERSION (0*0x100l + 4l) /* major*0x100 + minor */ - -struct malloc_save_state { - long magic; - long version; - mbinptr av[NBINS * 2 + 2]; - char* sbrk_base; - int sbrked_mem_bytes; +#define MALLOC_STATE_VERSION (0 * 0x100l + 4l) /* major*0x100 + minor */ + +struct malloc_save_state +{ + long magic; + long version; + mbinptr av[NBINS * 2 + 2]; + char *sbrk_base; + int sbrked_mem_bytes; unsigned long trim_threshold; unsigned long top_pad; - unsigned int n_mmaps_max; + unsigned int n_mmaps_max; unsigned long mmap_threshold; - int check_action; + int check_action; unsigned long max_sbrked_mem; unsigned long max_total_mem; - unsigned int n_mmaps; - unsigned int max_n_mmaps; + unsigned int n_mmaps; + unsigned int max_n_mmaps; unsigned long mmapped_mem; unsigned long max_mmapped_mem; - int using_malloc_checking; + int using_malloc_checking; unsigned long max_fast; unsigned long arena_test; unsigned long arena_max; unsigned long narenas; }; -void* -__malloc_get_state(void) +void * +__malloc_get_state (void) { - struct malloc_save_state* ms; + struct malloc_save_state *ms; int i; mbinptr b; - ms = (struct malloc_save_state*)__libc_malloc(sizeof(*ms)); + ms = (struct malloc_save_state *) __libc_malloc (sizeof (*ms)); if (!ms) return 0; - (void)mutex_lock(&main_arena.mutex); - malloc_consolidate(&main_arena); + + (void) mutex_lock (&main_arena.mutex); + malloc_consolidate (&main_arena); ms->magic = MALLOC_STATE_MAGIC; ms->version = MALLOC_STATE_VERSION; ms->av[0] = 0; ms->av[1] = 0; /* used to be binblocks, now no longer used */ - ms->av[2] = top(&main_arena); + ms->av[2] = top (&main_arena); ms->av[3] = 0; /* used to be undefined */ - for(i=1; i<NBINS; i++) { - b = bin_at(&main_arena, i); - if(first(b) == b) - ms->av[2*i+2] = ms->av[2*i+3] = 0; /* empty bin */ - else { - ms->av[2*i+2] = first(b); - ms->av[2*i+3] = last(b); + for (i = 1; i < NBINS; i++) + { + b = bin_at (&main_arena, i); + if (first (b) == b) + ms->av[2 * i + 2] = ms->av[2 * i + 3] = 0; /* empty bin */ + else + { + ms->av[2 * i + 2] = first (b); + ms->av[2 * i + 3] = last (b); + } } - } ms->sbrk_base = mp_.sbrk_base; ms->sbrked_mem_bytes = main_arena.system_mem; ms->trim_threshold = mp_.trim_threshold; @@ -475,78 +518,92 @@ __malloc_get_state(void) ms->mmapped_mem = mp_.mmapped_mem; ms->max_mmapped_mem = mp_.max_mmapped_mem; ms->using_malloc_checking = using_malloc_checking; - ms->max_fast = get_max_fast(); + ms->max_fast = get_max_fast (); ms->arena_test = mp_.arena_test; ms->arena_max = mp_.arena_max; ms->narenas = narenas; - (void)mutex_unlock(&main_arena.mutex); - return (void*)ms; + (void) mutex_unlock (&main_arena.mutex); + return (void *) ms; } int -__malloc_set_state(void* msptr) +__malloc_set_state (void *msptr) { - struct malloc_save_state* ms = (struct malloc_save_state*)msptr; + struct malloc_save_state *ms = (struct malloc_save_state *) msptr; size_t i; mbinptr b; disallow_malloc_check = 1; - ptmalloc_init(); - if(ms->magic != MALLOC_STATE_MAGIC) return -1; + ptmalloc_init (); + if (ms->magic != MALLOC_STATE_MAGIC) + return -1; + /* Must fail if the major version is too high. */ - if((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) return -2; - (void)mutex_lock(&main_arena.mutex); + if ((ms->version & ~0xffl) > (MALLOC_STATE_VERSION & ~0xffl)) + return -2; + + (void) mutex_lock (&main_arena.mutex); /* There are no fastchunks. */ - clear_fastchunks(&main_arena); + clear_fastchunks (&main_arena); if (ms->version >= 4) - set_max_fast(ms->max_fast); + set_max_fast (ms->max_fast); else - set_max_fast(64); /* 64 used to be the value we always used. */ - for (i=0; i<NFASTBINS; ++i) + set_max_fast (64); /* 64 used to be the value we always used. */ + for (i = 0; i < NFASTBINS; ++i) fastbin (&main_arena, i) = 0; - for (i=0; i<BINMAPSIZE; ++i) + for (i = 0; i < BINMAPSIZE; ++i) main_arena.binmap[i] = 0; - top(&main_arena) = ms->av[2]; + top (&main_arena) = ms->av[2]; main_arena.last_remainder = 0; - for(i=1; i<NBINS; i++) { - b = bin_at(&main_arena, i); - if(ms->av[2*i+2] == 0) { - assert(ms->av[2*i+3] == 0); - first(b) = last(b) = b; - } else { - if(ms->version >= 3 && - (i<NSMALLBINS || (largebin_index(chunksize(ms->av[2*i+2]))==i && - largebin_index(chunksize(ms->av[2*i+3]))==i))) { - first(b) = ms->av[2*i+2]; - last(b) = ms->av[2*i+3]; - /* Make sure the links to the bins within the heap are correct. */ - first(b)->bk = b; - last(b)->fd = b; - /* Set bit in binblocks. */ - mark_bin(&main_arena, i); - } else { - /* Oops, index computation from chunksize must have changed. - Link the whole list into unsorted_chunks. */ - first(b) = last(b) = b; - b = unsorted_chunks(&main_arena); - ms->av[2*i+2]->bk = b; - ms->av[2*i+3]->fd = b->fd; - b->fd->bk = ms->av[2*i+3]; - b->fd = ms->av[2*i+2]; - } + for (i = 1; i < NBINS; i++) + { + b = bin_at (&main_arena, i); + if (ms->av[2 * i + 2] == 0) + { + assert (ms->av[2 * i + 3] == 0); + first (b) = last (b) = b; + } + else + { + if (ms->version >= 3 && + (i < NSMALLBINS || (largebin_index (chunksize (ms->av[2 * i + 2])) == i && + largebin_index (chunksize (ms->av[2 * i + 3])) == i))) + { + first (b) = ms->av[2 * i + 2]; + last (b) = ms->av[2 * i + 3]; + /* Make sure the links to the bins within the heap are correct. */ + first (b)->bk = b; + last (b)->fd = b; + /* Set bit in binblocks. */ + mark_bin (&main_arena, i); + } + else + { + /* Oops, index computation from chunksize must have changed. + Link the whole list into unsorted_chunks. */ + first (b) = last (b) = b; + b = unsorted_chunks (&main_arena); + ms->av[2 * i + 2]->bk = b; + ms->av[2 * i + 3]->fd = b->fd; + b->fd->bk = ms->av[2 * i + 3]; + b->fd = ms->av[2 * i + 2]; + } + } } - } - if (ms->version < 3) { - /* Clear fd_nextsize and bk_nextsize fields. */ - b = unsorted_chunks(&main_arena)->fd; - while (b != unsorted_chunks(&main_arena)) { - if (!in_smallbin_range(chunksize(b))) { - b->fd_nextsize = NULL; - b->bk_nextsize = NULL; - } - b = b->fd; + if (ms->version < 3) + { + /* Clear fd_nextsize and bk_nextsize fields. */ + b = unsorted_chunks (&main_arena)->fd; + while (b != unsorted_chunks (&main_arena)) + { + if (!in_smallbin_range (chunksize (b))) + { + b->fd_nextsize = NULL; + b->bk_nextsize = NULL; + } + b = b->fd; + } } - } mp_.sbrk_base = ms->sbrk_base; main_arena.system_mem = ms->sbrked_mem_bytes; mp_.trim_threshold = ms->trim_threshold; @@ -560,28 +617,31 @@ __malloc_set_state(void* msptr) mp_.mmapped_mem = ms->mmapped_mem; mp_.max_mmapped_mem = ms->max_mmapped_mem; /* add version-dependent code here */ - if (ms->version >= 1) { - /* Check whether it is safe to enable malloc checking, or whether - it is necessary to disable it. */ - if (ms->using_malloc_checking && !using_malloc_checking && - !disallow_malloc_check) - __malloc_check_init (); - else if (!ms->using_malloc_checking && using_malloc_checking) { - __malloc_hook = NULL; - __free_hook = NULL; - __realloc_hook = NULL; - __memalign_hook = NULL; - using_malloc_checking = 0; + if (ms->version >= 1) + { + /* Check whether it is safe to enable malloc checking, or whether + it is necessary to disable it. */ + if (ms->using_malloc_checking && !using_malloc_checking && + !disallow_malloc_check) + __malloc_check_init (); + else if (!ms->using_malloc_checking && using_malloc_checking) + { + __malloc_hook = NULL; + __free_hook = NULL; + __realloc_hook = NULL; + __memalign_hook = NULL; + using_malloc_checking = 0; + } } - } - if (ms->version >= 4) { - mp_.arena_test = ms->arena_test; - mp_.arena_max = ms->arena_max; - narenas = ms->narenas; - } - check_malloc_state(&main_arena); - - (void)mutex_unlock(&main_arena.mutex); + if (ms->version >= 4) + { + mp_.arena_test = ms->arena_test; + mp_.arena_max = ms->arena_max; + narenas = ms->narenas; + } + check_malloc_state (&main_arena); + + (void) mutex_unlock (&main_arena.mutex); return 0; } |