diff options
author | Ivan Maidanski <ivmai@mail.ru> | 2022-08-22 07:36:37 +0300 |
---|---|---|
committer | Ivan Maidanski <ivmai@mail.ru> | 2022-08-24 13:57:06 +0300 |
commit | ec82377f1c28e749c18f47d77891f4ad10481bab (patch) | |
tree | a0758cb982356bdf117a5ac53665d76fdedbe2dc | |
parent | da821621ecd6fa4d233f16ecd455b9c5d54d01e1 (diff) | |
download | libatomic_ops-ec82377f1c28e749c18f47d77891f4ad10481bab.tar.gz |
Avoid 'cast increases required alignment' warnings in atomic_ops_malloc.c
* src/atomic_ops_malloc.c [HAVE_MMAP] (AO_malloc_large): Change type of
result local variable from char* to void*.
* src/atomic_ops_malloc.c [HAVE_MMAP] (AO_free_large): Change type of
argument from char* to void*.
* src/atomic_ops_malloc.c (add_chunk_as): Cast to AO_t* from void*
directly (adjust ofs accordingly); add assertion that sz is multiple
of AO_t size.
* src/atomic_ops_malloc.c (AO_free): Remove cast to char* in
AO_free_large() call.
-rw-r--r-- | src/atomic_ops_malloc.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/src/atomic_ops_malloc.c b/src/atomic_ops_malloc.c index 595ec83..8c6dc64 100644 --- a/src/atomic_ops_malloc.c +++ b/src/atomic_ops_malloc.c @@ -185,7 +185,8 @@ static char *get_mmaped(size_t sz) static char * AO_malloc_large(size_t sz) { - char *result; + void *result; + /* The header will force us to waste ALIGNMENT bytes, incl. header. */ /* Round to multiple of CHUNK_SIZE. */ sz = SIZET_SAT_ADD(sz, ALIGNMENT + CHUNK_SIZE - 1) & ~(CHUNK_SIZE - 1); @@ -193,16 +194,17 @@ AO_malloc_large(size_t sz) result = get_mmaped(sz); if (AO_EXPECT_FALSE(NULL == result)) return NULL; - result += ALIGNMENT; + + result = (AO_t *)result + ALIGNMENT / sizeof(AO_t); ((AO_t *)result)[-1] = (AO_t)sz; - return result; + return (char *)result; } static void -AO_free_large(char * p) +AO_free_large(void *p) { AO_t sz = ((AO_t *)p)[-1]; - if (munmap(p - ALIGNMENT, (size_t)sz) != 0) + if (munmap((AO_t *)p - ALIGNMENT / sizeof(AO_t), (size_t)sz) != 0) abort(); /* Programmer error. Not really async-signal-safe, but ... */ } @@ -266,12 +268,13 @@ static void add_chunk_as(void * chunk, unsigned log_sz) size_t ofs, limit; size_t sz = (size_t)1 << log_sz; - assert (CHUNK_SIZE >= sz); + assert(CHUNK_SIZE >= sz); + assert(sz % sizeof(AO_t) == 0); limit = (size_t)CHUNK_SIZE - sz; for (ofs = ALIGNMENT - sizeof(AO_t); ofs <= limit; ofs += sz) { ASAN_POISON_MEMORY_REGION((char *)chunk + ofs + sizeof(AO_t), sz - sizeof(AO_t)); - AO_stack_push(&AO_free_list[log_sz], (AO_t *)((char *)chunk + ofs)); + AO_stack_push(&AO_free_list[log_sz], (AO_t *)chunk + ofs / sizeof(AO_t)); } } @@ -374,7 +377,7 @@ AO_free(void *p) log_sz > LOG_MAX_SIZE ? (unsigned)log_sz : 1UL << log_sz); # endif if (AO_EXPECT_FALSE(log_sz > LOG_MAX_SIZE)) { - AO_free_large((char *)p); + AO_free_large(p); } else { ASAN_POISON_MEMORY_REGION(base + 1, ((size_t)1 << log_sz) - sizeof(AO_t)); AO_stack_push(AO_free_list + log_sz, base); |