summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2020-03-03 23:42:14 -0500
committerBen Gamari <ben@smart-cactus.org>2020-03-04 09:39:45 -0500
commit927b7a3de710d53fb11ccea35b807b64b4afc700 (patch)
tree392fc7ecbe97c1f91067f5ee4292d8a3cf4ee56b
parent2a2f51d79f145e015cc089d97cf71c19dd27bee4 (diff)
downloadhaskell-wip/gc/concurrent-filled-segment-processing.tar.gz
nonmoving: Don't traverse filled segment list in pausewip/gc/concurrent-filled-segment-processing
The non-moving collector would previously walk the entire filled segment list during the preparatory pause. However, this is far more work than is strictly necessary. We can rather get away with merely collecting the allocators' filled segment list heads and process the lists themselves during the concurrent phase. This can significantly reduce the maximum gen1 GC pause time in programs with high rates of long-lived allocations.
-rw-r--r--rts/sm/NonMoving.c43
-rw-r--r--rts/sm/NonMoving.h1
2 files changed, 26 insertions, 18 deletions
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 0bd96d1800..68a36f00cd 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -707,25 +707,10 @@ static void nonmovingPrepareMark(void)
nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
}
- // Update filled segments' snapshot pointers and move to sweep_list
- uint32_t n_filled = 0;
- struct NonmovingSegment *const filled = alloca->filled;
+ // Save the filled segments for later processing during the concurrent
+ // mark phase.
+ alloca->saved_filled = alloca->filled;
alloca->filled = NULL;
- if (filled) {
- struct NonmovingSegment *seg = filled;
- while (true) {
- // Set snapshot
- nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
- n_filled++;
- if (seg->link)
- seg = seg->link;
- else
- break;
- }
- // add filled segments to sweep_list
- seg->link = nonmovingHeap.sweep_list;
- nonmovingHeap.sweep_list = filled;
- }
// N.B. It's not necessary to update snapshot pointers of active segments;
// they were set after they were swept and haven't seen any allocation
@@ -948,6 +933,28 @@ static void nonmovingMark_(MarkQueue *mark_queue, StgWeak **dead_weaks, StgTSO *
ACQUIRE_LOCK(&nonmoving_collection_mutex);
debugTrace(DEBUG_nonmoving_gc, "Starting mark...");
+ // Walk the list of filled segments that we collected during preparation,
+ // updated their snapshot pointers and move them to the sweep list.
+ for (int alloca_idx = 0; alloca_idx < NONMOVING_ALLOCA_CNT; ++alloca_idx) {
+ struct NonmovingSegment *filled = nonmovingHeap.allocators[alloca_idx]->saved_filled;
+ uint32_t n_filled = 0;
+ if (filled) {
+ struct NonmovingSegment *seg = filled;
+ while (true) {
+ // Set snapshot
+ nonmovingSegmentInfo(seg)->next_free_snap = seg->next_free;
+ n_filled++;
+ if (seg->link)
+ seg = seg->link;
+ else
+ break;
+ }
+ // add filled segments to sweep_list
+ seg->link = nonmovingHeap.sweep_list;
+ nonmovingHeap.sweep_list = filled;
+ }
+ }
+
// Do concurrent marking; most of the heap will get marked here.
nonmovingMarkThreadsWeaks(mark_queue);
diff --git a/rts/sm/NonMoving.h b/rts/sm/NonMoving.h
index 36ecd8b0af..6eabcb8493 100644
--- a/rts/sm/NonMoving.h
+++ b/rts/sm/NonMoving.h
@@ -62,6 +62,7 @@ struct NonmovingSegment {
// A non-moving allocator for a particular block size
struct NonmovingAllocator {
struct NonmovingSegment *filled;
+ struct NonmovingSegment *saved_filled;
struct NonmovingSegment *active;
// indexed by capability number
struct NonmovingSegment *current[];