summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorVolker Lendecke <vl@samba.org>2013-09-06 14:08:43 -0700
committerKarolin Seeger <kseeger@samba.org>2015-03-15 22:14:08 +0100
commite5ff3e1b10eb5833f23f852454073625a1bd847a (patch)
tree692210d5a0be0656a132cdeb1cc59bbaa6c65685
parentaa2a731d1b124044b195f2730fedeac099230a31 (diff)
downloadsamba-e5ff3e1b10eb5833f23f852454073625a1bd847a.tar.gz
talloc: Put pool-specific data before the chunk
This is a preparation to make talloc pool real objects themselves. Signed-off-by: Volker Lendecke <vl@samba.org> Signed-off-by: Jeremy Allison <jra@samba.org> (cherry picked from commit b87c8fd435d1863d6efcec03830ecd85ddfcd7fb)
-rw-r--r--lib/talloc/talloc.c204
1 files changed, 125 insertions, 79 deletions
diff --git a/lib/talloc/talloc.c b/lib/talloc/talloc.c
index 21d675d3a2c..a553050e6eb 100644
--- a/lib/talloc/talloc.c
+++ b/lib/talloc/talloc.c
@@ -244,7 +244,7 @@ static void talloc_memlimit_update_on_free(struct talloc_chunk *tc);
typedef int (*talloc_destructor_t)(void *);
-union talloc_pool_chunk;
+struct talloc_pool_hdr;
struct talloc_chunk {
struct talloc_chunk *next, *prev;
@@ -270,7 +270,7 @@ struct talloc_chunk {
* allocated from. This way children can quickly find the pool to chew
* from.
*/
- union talloc_pool_chunk *pool;
+ struct talloc_pool_hdr *pool;
};
/* 16 byte alignment seems to keep everyone happy */
@@ -458,31 +458,37 @@ _PUBLIC_ const char *talloc_parent_name(const void *ptr)
memory footprint of each talloc chunk by those 16 bytes.
*/
-union talloc_pool_chunk {
- /* This lets object_count nestle into 16-byte padding of talloc_chunk,
- * on 32-bit platforms. */
- struct tc_pool_hdr {
- struct talloc_chunk c;
- void *next;
- unsigned int object_count;
- } hdr;
- /* This makes it always 16 byte aligned. */
- char pad[TC_ALIGN16(sizeof(struct tc_pool_hdr))];
+struct talloc_pool_hdr {
+ void *end;
+ unsigned int object_count;
};
-static void *tc_pool_end(union talloc_pool_chunk *pool_tc)
+#define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr))
+
+static struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c)
+{
+ return (struct talloc_pool_hdr *)((char *)c - TP_HDR_SIZE);
+}
+
+static struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h)
{
- return (char *)pool_tc + TC_HDR_SIZE + pool_tc->hdr.c.size;
+ return (struct talloc_chunk *)((char *)h + TP_HDR_SIZE);
}
-static size_t tc_pool_space_left(union talloc_pool_chunk *pool_tc)
+static void *tc_pool_end(struct talloc_pool_hdr *pool_hdr)
{
- return (char *)tc_pool_end(pool_tc) - (char *)pool_tc->hdr.next;
+ struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr);
+ return (char *)tc + TC_HDR_SIZE + tc->size;
}
-static void *tc_pool_first_chunk(union talloc_pool_chunk *pool_tc)
+static size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr)
{
- return pool_tc + 1;
+ return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end;
+}
+
+static void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr)
+{
+ return TC_PTR_FROM_CHUNK(talloc_chunk_from_pool(pool_hdr));
}
/* If tc is inside a pool, this gives the next neighbour. */
@@ -492,16 +498,16 @@ static void *tc_next_chunk(struct talloc_chunk *tc)
}
/* Mark the whole remaining pool as not accessable */
-static void tc_invalidate_pool(union talloc_pool_chunk *pool_tc)
+static void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr)
{
- size_t flen = tc_pool_space_left(pool_tc);
+ size_t flen = tc_pool_space_left(pool_hdr);
if (unlikely(talloc_fill.enabled)) {
- memset(pool_tc->hdr.next, talloc_fill.fill_value, flen);
+ memset(pool_hdr->end, talloc_fill.fill_value, flen);
}
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
- VALGRIND_MAKE_MEM_NOACCESS(pool_tc->hdr.next, flen);
+ VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen);
#endif
}
@@ -512,7 +518,7 @@ static void tc_invalidate_pool(union talloc_pool_chunk *pool_tc)
static struct talloc_chunk *talloc_alloc_pool(struct talloc_chunk *parent,
size_t size, size_t prefix_len)
{
- union talloc_pool_chunk *pool_ctx = NULL;
+ struct talloc_pool_hdr *pool_hdr = NULL;
size_t space_left;
struct talloc_chunk *result;
size_t chunk_size;
@@ -522,17 +528,17 @@ static struct talloc_chunk *talloc_alloc_pool(struct talloc_chunk *parent,
}
if (parent->flags & TALLOC_FLAG_POOL) {
- pool_ctx = (union talloc_pool_chunk *)parent;
+ pool_hdr = talloc_pool_from_chunk(parent);
}
else if (parent->flags & TALLOC_FLAG_POOLMEM) {
- pool_ctx = parent->pool;
+ pool_hdr = parent->pool;
}
- if (pool_ctx == NULL) {
+ if (pool_hdr == NULL) {
return NULL;
}
- space_left = tc_pool_space_left(pool_ctx);
+ space_left = tc_pool_space_left(pool_hdr);
/*
* Align size to 16 bytes
@@ -543,19 +549,18 @@ static struct talloc_chunk *talloc_alloc_pool(struct talloc_chunk *parent,
return NULL;
}
- result = (struct talloc_chunk *)
- ((char *)pool_ctx->hdr.next + prefix_len);
+ result = (struct talloc_chunk *)((char *)pool_hdr->end + prefix_len);
#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
- VALGRIND_MAKE_MEM_UNDEFINED(result, size);
+ VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size);
#endif
- pool_ctx->hdr.next = (void *)((char *)result + chunk_size);
+ pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size);
result->flags = TALLOC_MAGIC | TALLOC_FLAG_POOLMEM;
- result->pool = pool_ctx;
+ result->pool = pool_hdr;
- pool_ctx->hdr.object_count++;
+ pool_hdr->object_count++;
return result;
}
@@ -593,6 +598,8 @@ static inline void *__talloc_with_prefix(const void *context, size_t size,
}
if (tc == NULL) {
+ char *ptr;
+
/*
* Only do the memlimit check/update on actual allocation.
*/
@@ -601,8 +608,11 @@ static inline void *__talloc_with_prefix(const void *context, size_t size,
return NULL;
}
- tc = (struct talloc_chunk *)malloc(total_len);
- if (unlikely(tc == NULL)) return NULL;
+ ptr = malloc(total_len);
+ if (unlikely(ptr == NULL)) {
+ return NULL;
+ }
+ tc = (struct talloc_chunk *)(ptr + prefix_len);
tc->flags = TALLOC_MAGIC;
tc->pool = NULL;
@@ -647,27 +657,32 @@ static inline void *__talloc(const void *context, size_t size)
_PUBLIC_ void *talloc_pool(const void *context, size_t size)
{
- union talloc_pool_chunk *pool_tc;
- void *result = __talloc(context, sizeof(*pool_tc) - TC_HDR_SIZE + size);
+ struct talloc_chunk *tc;
+ struct talloc_pool_hdr *pool_hdr;
+ void *result;
+
+ result = __talloc_with_prefix(context, size, TP_HDR_SIZE);
if (unlikely(result == NULL)) {
return NULL;
}
- pool_tc = (union talloc_pool_chunk *)talloc_chunk_from_ptr(result);
- if (unlikely(pool_tc->hdr.c.flags & TALLOC_FLAG_POOLMEM)) {
+ tc = talloc_chunk_from_ptr(result);
+ pool_hdr = talloc_pool_from_chunk(tc);
+
+ if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
/* We don't handle this correctly, so fail. */
talloc_log("talloc: cannot allocate pool off another pool %s\n",
talloc_get_name(context));
talloc_free(result);
return NULL;
}
- pool_tc->hdr.c.flags |= TALLOC_FLAG_POOL;
- pool_tc->hdr.next = tc_pool_first_chunk(pool_tc);
+ tc->flags |= TALLOC_FLAG_POOL;
- pool_tc->hdr.object_count = 1;
+ pool_hdr->object_count = 1;
+ pool_hdr->end = result;
- tc_invalidate_pool(pool_tc);
+ tc_invalidate_pool(pool_hdr);
return result;
}
@@ -770,10 +785,12 @@ static void *_talloc_steal_internal(const void *new_ctx, const void *ptr);
static inline void _talloc_free_poolmem(struct talloc_chunk *tc,
const char *location)
{
- union talloc_pool_chunk *pool;
+ struct talloc_pool_hdr *pool;
+ struct talloc_chunk *pool_tc;
void *next_tc;
pool = tc->pool;
+ pool_tc = talloc_chunk_from_pool(pool);
next_tc = tc_next_chunk(tc);
tc->flags |= TALLOC_FLAG_FREE;
@@ -786,15 +803,15 @@ static inline void _talloc_free_poolmem(struct talloc_chunk *tc,
TC_INVALIDATE_FULL_CHUNK(tc);
- if (unlikely(pool->hdr.object_count == 0)) {
+ if (unlikely(pool->object_count == 0)) {
talloc_abort("Pool object count zero!");
return;
}
- pool->hdr.object_count--;
+ pool->object_count--;
- if (unlikely(pool->hdr.object_count == 1
- && !(pool->hdr.c.flags & TALLOC_FLAG_FREE))) {
+ if (unlikely(pool->object_count == 1
+ && !(pool_tc->flags & TALLOC_FLAG_FREE))) {
/*
* if there is just one object left in the pool
* and pool->flags does not have TALLOC_FLAG_FREE,
@@ -802,33 +819,33 @@ static inline void _talloc_free_poolmem(struct talloc_chunk *tc,
* the rest is available for new objects
* again.
*/
- pool->hdr.next = tc_pool_first_chunk(pool);
+ pool->end = tc_pool_first_chunk(pool);
tc_invalidate_pool(pool);
return;
}
- if (unlikely(pool->hdr.object_count == 0)) {
+ if (unlikely(pool->object_count == 0)) {
/*
* we mark the freed memory with where we called the free
* from. This means on a double free error we can report where
* the first free came from
*/
- pool->hdr.c.name = location;
+ pool_tc->name = location;
- talloc_memlimit_update_on_free(&pool->hdr.c);
+ talloc_memlimit_update_on_free(pool_tc);
- TC_INVALIDATE_FULL_CHUNK(&pool->hdr.c);
+ TC_INVALIDATE_FULL_CHUNK(pool_tc);
free(pool);
return;
}
- if (pool->hdr.next == next_tc) {
+ if (pool->end == next_tc) {
/*
* if pool->pool still points to end of
* 'tc' (which is stored in the 'next_tc' variable),
* we can reclaim the memory of 'tc'.
*/
- pool->hdr.next = tc;
+ pool->end = tc;
return;
}
@@ -924,23 +941,30 @@ static inline int _talloc_free_internal(void *ptr, const char *location)
tc->name = location;
if (tc->flags & TALLOC_FLAG_POOL) {
- union talloc_pool_chunk *pool = (union talloc_pool_chunk *)tc;
+ struct talloc_pool_hdr *pool;
+
+ pool = talloc_pool_from_chunk(tc);
- if (unlikely(pool->hdr.object_count == 0)) {
+ if (unlikely(pool->object_count == 0)) {
talloc_abort("Pool object count zero!");
return 0;
}
- pool->hdr.object_count--;
+ pool->object_count--;
- if (likely(pool->hdr.object_count != 0)) {
+ if (likely(pool->object_count != 0)) {
return 0;
}
+ /*
+ * This call takes into account the
+ * prefix TP_HDR_SIZE allocated before
+ * the pool talloc_chunk.
+ */
talloc_memlimit_update_on_free(tc);
TC_INVALIDATE_FULL_CHUNK(tc);
- free(tc);
+ free(pool);
return 0;
}
@@ -1486,7 +1510,7 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
struct talloc_chunk *tc;
void *new_ptr;
bool malloced = false;
- union talloc_pool_chunk *pool_tc = NULL;
+ struct talloc_pool_hdr *pool_hdr = NULL;
size_t old_size = 0;
size_t new_size = 0;
@@ -1526,19 +1550,19 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
/* handle realloc inside a talloc_pool */
if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
- pool_tc = tc->pool;
+ pool_hdr = tc->pool;
}
#if (ALWAYS_REALLOC == 0)
/* don't shrink if we have less than 1k to gain */
if (size < tc->size && tc->limit == NULL) {
- if (pool_tc) {
+ if (pool_hdr) {
void *next_tc = tc_next_chunk(tc);
TC_INVALIDATE_SHRINK_CHUNK(tc, size);
tc->size = size;
- if (next_tc == pool_tc->hdr.next) {
+ if (next_tc == pool_hdr->end) {
/* note: tc->size has changed, so this works */
- pool_tc->hdr.next = tc_next_chunk(tc);
+ pool_hdr->end = tc_next_chunk(tc);
}
return ptr;
} else if ((tc->size - size) < 1024) {
@@ -1569,9 +1593,9 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
tc->flags |= TALLOC_FLAG_FREE;
#if ALWAYS_REALLOC
- if (pool_tc) {
+ if (pool_hdr) {
new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE, 0);
- pool_tc->hdr.object_count--;
+ pool_hdr->object_count--;
if (new_ptr == NULL) {
new_ptr = malloc(TC_HDR_SIZE+size);
@@ -1594,15 +1618,17 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
}
}
#else
- if (pool_tc) {
+ if (pool_hdr) {
+ struct talloc_chunk *pool_tc;
void *next_tc = tc_next_chunk(tc);
size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size);
size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
size_t space_needed;
size_t space_left;
- unsigned int chunk_count = pool_tc->hdr.object_count;
+ unsigned int chunk_count = pool_hdr->object_count;
- if (!(pool_tc->hdr.c.flags & TALLOC_FLAG_FREE)) {
+ pool_tc = talloc_chunk_from_pool(pool_hdr);
+ if (!(pool_tc->flags & TALLOC_FLAG_FREE)) {
chunk_count -= 1;
}
@@ -1611,9 +1637,9 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
* optimize for the case where 'tc' is the only
* chunk in the pool.
*/
- char *start = tc_pool_first_chunk(pool_tc);
+ char *start = tc_pool_first_chunk(pool_hdr);
space_needed = new_chunk_size;
- space_left = (char *)tc_pool_end(pool_tc) - start;
+ space_left = (char *)tc_pool_end(pool_hdr) - start;
if (space_left >= space_needed) {
size_t old_used = TC_HDR_SIZE + tc->size;
@@ -1650,11 +1676,11 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
* because we want to invalidate the padding
* too.
*/
- pool_tc->hdr.next = new_used + (char *)new_ptr;
- tc_invalidate_pool(pool_tc);
+ pool_hdr->end = new_used + (char *)new_ptr;
+ tc_invalidate_pool(pool_hdr);
/* now the aligned pointer */
- pool_tc->hdr.next = new_chunk_size + (char *)new_ptr;
+ pool_hdr->end = new_chunk_size + (char *)new_ptr;
goto got_new_ptr;
}
@@ -1668,19 +1694,19 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
return ptr;
}
- if (next_tc == pool_tc->hdr.next) {
+ if (next_tc == pool_hdr->end) {
/*
* optimize for the case where 'tc' is the last
* chunk in the pool.
*/
space_needed = new_chunk_size - old_chunk_size;
- space_left = tc_pool_space_left(pool_tc);
+ space_left = tc_pool_space_left(pool_hdr);
if (space_left >= space_needed) {
TC_UNDEFINE_GROW_CHUNK(tc, size);
tc->flags &= ~TALLOC_FLAG_FREE;
tc->size = size;
- pool_tc->hdr.next = tc_next_chunk(tc);
+ pool_hdr->end = tc_next_chunk(tc);
return ptr;
}
}
@@ -1822,6 +1848,13 @@ static size_t _talloc_total_mem_internal(const void *ptr,
*/
if (!(tc->flags & TALLOC_FLAG_POOLMEM)) {
total = tc->size + TC_HDR_SIZE;
+ /*
+ * If this is a pool, remember to
+ * add the prefix length.
+ */
+ if (tc->flags & TALLOC_FLAG_POOL) {
+ total += TP_HDR_SIZE;
+ }
}
}
break;
@@ -2579,6 +2612,8 @@ static bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size)
*/
static void talloc_memlimit_update_on_free(struct talloc_chunk *tc)
{
+ size_t limit_shrink_size;
+
if (!tc->limit) {
return;
}
@@ -2597,7 +2632,18 @@ static void talloc_memlimit_update_on_free(struct talloc_chunk *tc)
* we need to subtract the memory used from the counters
*/
- talloc_memlimit_shrink(tc->limit, tc->size+TC_HDR_SIZE);
+ limit_shrink_size = tc->size+TC_HDR_SIZE;
+
+ /*
+ * If we're deallocating a pool, take into
+ * account the prefix size added for the pool.
+ */
+
+ if (tc->flags & TALLOC_FLAG_POOL) {
+ limit_shrink_size += TP_HDR_SIZE;
+ }
+
+ talloc_memlimit_shrink(tc->limit, limit_shrink_size);
if (tc->limit->parent == tc) {
free(tc->limit);