diff options
author | sf <sf@13f79535-47bb-0310-9956-ffa450edef68> | 2011-03-20 22:09:25 +0000 |
---|---|---|
committer | sf <sf@13f79535-47bb-0310-9956-ffa450edef68> | 2011-03-20 22:09:25 +0000 |
commit | 5be920d7f2485815995341cc1ddf0a42fcba18c2 (patch) | |
tree | e0109f9e2cd5c3a90c2a2452c55b134b34ae32f2 /memory | |
parent | 215a6e4ec6f0cab9324b96b626eb03b860b38e7b (diff) | |
download | libapr-5be920d7f2485815995341cc1ddf0a42fcba18c2.tar.gz |
Backport r1072165:
Add new configure option --enable-allocator-uses-mmap to use mmap
instead of malloc in apr_allocator_alloc(). This greatly reduces
memory fragmentation with malloc implementations (e.g. glibc) that
don't handle allocationss of a page-size-multiples in an efficient way.
It also makes apr_allocator_max_free_set() actually have some effect
on such platforms.
The handling of page sizes other than 4k seems like a lot of trouble for a
very small number of platforms, but there does not seem to be a reasonable
way to check this at compile time.
git-svn-id: http://svn.apache.org/repos/asf/apr/apr/branches/1.5.x@1083592 13f79535-47bb-0310-9956-ffa450edef68
Diffstat (limited to 'memory')
-rw-r--r-- | memory/unix/apr_pools.c | 41 |
1 files changed, 40 insertions, 1 deletions
diff --git a/memory/unix/apr_pools.c b/memory/unix/apr_pools.c index fcc0bf0b2..2178ebf44 100644 --- a/memory/unix/apr_pools.c +++ b/memory/unix/apr_pools.c @@ -36,9 +36,12 @@ #endif #if APR_HAVE_UNISTD_H -#include <unistd.h> /* for getpid */ +#include <unistd.h> /* for getpid and sysconf */ #endif +#if APR_ALLOCATOR_USES_MMAP +#include <sys/mman.h> +#endif /* * Magic numbers @@ -47,8 +50,15 @@ #define MIN_ALLOC 8192 #define MAX_INDEX 20 +#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE) +static unsigned int boundary_index; +static unsigned int boundary_size; +#define BOUNDARY_INDEX boundary_index +#define BOUNDARY_SIZE boundary_size +#else #define BOUNDARY_INDEX 12 #define BOUNDARY_SIZE (1 << BOUNDARY_INDEX) +#endif /* * Timing constants for killing subprocesses @@ -131,7 +141,11 @@ APR_DECLARE(void) apr_allocator_destroy(apr_allocator_t *allocator) ref = &allocator->free[index]; while ((node = *ref) != NULL) { *ref = node->next; +#if APR_ALLOCATOR_USES_MMAP + munmap(node, (node->index+1) << BOUNDARY_INDEX); +#else free(node); +#endif } } @@ -323,7 +337,12 @@ apr_memnode_t *allocator_alloc(apr_allocator_t *allocator, apr_size_t in_size) /* If we haven't got a suitable node, malloc a new one * and initialize it. */ +#if APR_ALLOCATOR_USES_MMAP + if ((node = mmap(NULL, size, PROT_READ|PROT_WRITE, + MAP_PRIVATE|MAP_ANON, -1, 0)) == MAP_FAILED) +#else if ((node = malloc(size)) == NULL) +#endif return NULL; node->next = NULL; @@ -400,7 +419,11 @@ void allocator_free(apr_allocator_t *allocator, apr_memnode_t *node) while (freelist != NULL) { node = freelist; freelist = node->next; +#if APR_ALLOCATOR_USES_MMAP + munmap(node, (node->index+1) << BOUNDARY_INDEX); +#else free(node); +#endif } } @@ -549,6 +572,14 @@ APR_DECLARE(apr_status_t) apr_pool_initialize(void) if (apr_pools_initialized++) return APR_SUCCESS; +#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE) + boundary_size = sysconf(_SC_PAGESIZE); + boundary_index = 12; + while ( (1 << boundary_index) < boundary_size) + boundary_index++; + boundary_size = (1 << boundary_index); +#endif + if ((rv = apr_allocator_create(&global_allocator)) != APR_SUCCESS) { apr_pools_initialized = 0; return rv; @@ -1336,6 +1367,14 @@ APR_DECLARE(apr_status_t) apr_pool_initialize(void) if (apr_pools_initialized++) return APR_SUCCESS; +#if APR_ALLOCATOR_USES_MMAP && defined(_SC_PAGESIZE) + boundary_size = sysconf(_SC_PAGESIZE); + boundary_index = 12; + while ( (1 << boundary_index) < boundary_size) + boundary_index++; + boundary_size = (1 << boundary_index); +#endif + /* Since the debug code works a bit differently then the * regular pools code, we ask for a lock here. The regular * pools code has got this lock embedded in the global |