summaryrefslogtreecommitdiff
path: root/testsuite/tests/rts/testblockalloc.c
diff options
context:
space:
mode:
authorÖmer Sinan Ağacan <omer@well-typed.com>2018-03-05 15:57:47 +0300
committerBen Gamari <ben@smart-cactus.org>2019-10-18 15:27:56 -0400
commit921e4e360a1244ee63241efc62da28642343fece (patch)
tree456292c188343645c56c882d393965fe201b273a /testsuite/tests/rts/testblockalloc.c
parentc4c9904b324736dc5d190a91418e8d8f564d4104 (diff)
downloadhaskell-wip/gc/aligned-block-allocation.tar.gz
rts/BlockAlloc: Allow aligned allocation requestswip/gc/aligned-block-allocation
This implements support for block group allocations which are aligned to an integral number of blocks. This will be used by the nonmoving garbage collector, which uses the block allocator to allocate the segments which back its heap. These segments are a fixed number of blocks in size, with each segment being aligned to the segment size boundary. This allows us to easily find the segment metadata stored at the beginning of the segment.
Diffstat (limited to 'testsuite/tests/rts/testblockalloc.c')
-rw-r--r--testsuite/tests/rts/testblockalloc.c121
1 files changed, 84 insertions, 37 deletions
diff --git a/testsuite/tests/rts/testblockalloc.c b/testsuite/tests/rts/testblockalloc.c
index 577245f45e..53eed24015 100644
--- a/testsuite/tests/rts/testblockalloc.c
+++ b/testsuite/tests/rts/testblockalloc.c
@@ -3,6 +3,7 @@
#include <stdio.h>
extern bdescr *allocGroup_lock_lock(uint32_t n);
+extern bdescr *allocAlignedGroupOnNode (uint32_t node, W_ n);
extern void freeGroup_lock(bdescr *p);
const int ARRSIZE = 256;
@@ -13,64 +14,110 @@ const int SEED = 0xf00f00;
extern StgWord mblocks_allocated;
-int main (int argc, char *argv[])
+static void test_random_alloc(void)
{
- int i, j, b;
-
bdescr *a[ARRSIZE];
- srand(SEED);
+ // repeatedly sweep though the array, allocating new random-sized
+ // objects and deallocating the old ones.
+ for (int i=0; i < LOOPS; i++)
+ {
+ for (int j=0; j < ARRSIZE; j++)
+ {
+ if (i > 0)
+ {
+ IF_DEBUG(block_alloc, debugBelch("A%d: freeing %p, %d blocks @ %p\n", j, a[j], a[j]->blocks, a[j]->start));
+ freeGroup_lock(a[j]);
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+
+ int b = (rand() % MAXALLOC) + 1;
+ a[j] = allocGroup_lock(b);
+ IF_DEBUG(block_alloc, debugBelch("A%d: allocated %p, %d blocks @ %p\n", j, a[j], b, a[j]->start));
+ // allocating zero blocks isn't allowed
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+ }
+ for (int j=0; j < ARRSIZE; j++)
{
- RtsConfig conf = defaultRtsConfig;
- conf.rts_opts_enabled = RtsOptsAll;
- hs_init_ghc(&argc, &argv, conf);
+ freeGroup_lock(a[j]);
}
+}
+
+static void test_sequential_alloc(void)
+{
+ bdescr *a[ARRSIZE];
- // repeatedly sweep though the array, allocating new random-sized
- // objects and deallocating the old ones.
- for (i=0; i < LOOPS; i++)
- {
- for (j=0; j < ARRSIZE; j++)
- {
- if (i > 0)
- {
- IF_DEBUG(block_alloc, debugBelch("A%d: freeing %p, %d blocks @ %p\n", j, a[j], a[j]->blocks, a[j]->start));
- freeGroup_lock(a[j]);
- DEBUG_ONLY(checkFreeListSanity());
- }
- b = (rand() % MAXALLOC) + 1;
- a[j] = allocGroup_lock(b);
- IF_DEBUG(block_alloc, debugBelch("A%d: allocated %p, %d blocks @ %p\n", j, a[j], b, a[j]->start));
- // allocating zero blocks isn't allowed
- DEBUG_ONLY(checkFreeListSanity());
- }
- }
-
- for (j=0; j < ARRSIZE; j++)
- {
- freeGroup_lock(a[j]);
- }
-
// this time, sweep forwards allocating new blocks, and then
// backwards deallocating them.
- for (i=0; i < LOOPS; i++)
+ for (int i=0; i < LOOPS; i++)
{
- for (j=0; j < ARRSIZE; j++)
+ for (int j=0; j < ARRSIZE; j++)
{
- b = (rand() % MAXALLOC) + 1;
+ int b = (rand() % MAXALLOC) + 1;
a[j] = allocGroup_lock(b);
IF_DEBUG(block_alloc, debugBelch("B%d,%d: allocated %p, %d blocks @ %p\n", i, j, a[j], b, a[j]->start));
DEBUG_ONLY(checkFreeListSanity());
}
- for (j=ARRSIZE-1; j >= 0; j--)
+ for (int j=ARRSIZE-1; j >= 0; j--)
{
IF_DEBUG(block_alloc, debugBelch("B%d,%d: freeing %p, %d blocks @ %p\n", i, j, a[j], a[j]->blocks, a[j]->start));
freeGroup_lock(a[j]);
DEBUG_ONLY(checkFreeListSanity());
}
}
-
+}
+
+static void test_aligned_alloc(void)
+{
+ bdescr *a[ARRSIZE];
+
+ // this time, sweep forwards allocating new blocks, and then
+ // backwards deallocating them.
+ for (int i=0; i < LOOPS; i++)
+ {
+ for (int j=0; j < ARRSIZE; j++)
+ {
+ // allocAlignedGroupOnNode does not support allocating more than
+ // BLOCKS_PER_MBLOCK/2 blocks.
+ int b = rand() % (BLOCKS_PER_MBLOCK / 2);
+ if (b == 0) { b = 1; }
+ a[j] = allocAlignedGroupOnNode(0, b);
+ if ((((W_)(a[j]->start)) % (b*BLOCK_SIZE)) != 0)
+ {
+ barf("%p is not aligned to allocation size %d", a[j], b);
+ }
+ IF_DEBUG(block_alloc, debugBelch("B%d,%d: allocated %p, %d blocks @ %p\n", i, j, a[j], b, a[j]->start));
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+ for (int j=ARRSIZE-1; j >= 0; j--)
+ {
+ IF_DEBUG(block_alloc, debugBelch("B%d,%d: freeing %p, %d blocks @ %p\n", i, j, a[j], a[j]->blocks, a[j]->start));
+ freeGroup_lock(a[j]);
+ DEBUG_ONLY(checkFreeListSanity());
+ }
+ }
+}
+
+int main (int argc, char *argv[])
+{
+ int i, j, b;
+
+ bdescr *a[ARRSIZE];
+
+ srand(SEED);
+
+ {
+ RtsConfig conf = defaultRtsConfig;
+ conf.rts_opts_enabled = RtsOptsAll;
+ hs_init_ghc(&argc, &argv, conf);
+ }
+
+ test_random_alloc();
+ test_sequential_alloc();
+ test_aligned_alloc();
+
DEBUG_ONLY(checkFreeListSanity());
hs_exit(); // will do a memory leak test