summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2020-05-29 23:05:10 -0400
committerBen Gamari <ben@smart-cactus.org>2020-11-01 13:57:10 -0500
commit1a184ceb8d4c3093fe1a8a5085a747943274a8d9 (patch)
treea752e2b5c192cd8daac51a24e2e21259bf2cae53
parente63db32c7eb089985a1a7279a0a886a32d70ac0e (diff)
downloadhaskell-wip/perform-blocking-gc.tar.gz
rts: Introduce performBlockingMajorGCwip/perform-blocking-gc
-rw-r--r--includes/rts/storage/GC.h1
-rw-r--r--libraries/base/System/Mem.hs9
-rw-r--r--rts/RtsSymbols.c9
-rw-r--r--rts/Schedule.c37
-rw-r--r--rts/sm/GC.c3
-rw-r--r--rts/sm/GC.h1
-rw-r--r--rts/sm/NonMoving.c6
-rw-r--r--rts/sm/NonMoving.h3
8 files changed, 48 insertions, 21 deletions
diff --git a/includes/rts/storage/GC.h b/includes/rts/storage/GC.h
index 9f4a0dde07..dd26399ab5 100644
--- a/includes/rts/storage/GC.h
+++ b/includes/rts/storage/GC.h
@@ -216,6 +216,7 @@ extern W_ large_alloc_lim;
void performGC(void);
void performMajorGC(void);
+void performBlockingMajorGC(void);
/* -----------------------------------------------------------------------------
The CAF table - used to let us revert CAFs in GHCi
diff --git a/libraries/base/System/Mem.hs b/libraries/base/System/Mem.hs
index c47a52d2f7..843cf66877 100644
--- a/libraries/base/System/Mem.hs
+++ b/libraries/base/System/Mem.hs
@@ -3,7 +3,7 @@
-- Module : System.Mem
-- Copyright : (c) The University of Glasgow 2001
-- License : BSD-style (see the file libraries/base/LICENSE)
---
+--
-- Maintainer : libraries@haskell.org
-- Stability : provisional
-- Portability : portable
@@ -19,6 +19,7 @@ module System.Mem
(
-- * Garbage collection
performGC
+ , performBlockingMajorGC
, performMajorGC
, performMinorGC
@@ -35,6 +36,12 @@ import GHC.Conc.Sync
performGC :: IO ()
performGC = performMajorGC
+-- | Triggers an immediate major garbage collection, ensuring that collection
+-- finishes before returning.
+--
+-- @since 4.16.0.0
+foreign import ccall "performBlockingMajorGC" performBlockingMajorGC :: IO ()
+
-- | Triggers an immediate major garbage collection.
--
-- @since 4.7.0.0
diff --git a/rts/RtsSymbols.c b/rts/RtsSymbols.c
index e433d9d369..6735e4cb03 100644
--- a/rts/RtsSymbols.c
+++ b/rts/RtsSymbols.c
@@ -732,13 +732,14 @@
SymI_HasProto(stg_isMutableByteArrayPinnedzh) \
SymI_HasProto(stg_shrinkMutableByteArrayzh) \
SymI_HasProto(stg_resizzeMutableByteArrayzh) \
- SymI_HasProto(stg_shrinkSmallMutableArrayzh) \
+ SymI_HasProto(stg_shrinkSmallMutableArrayzh) \
SymI_HasProto(newSpark) \
- SymI_HasProto(updateRemembSetPushThunk) \
- SymI_HasProto(updateRemembSetPushThunk_) \
- SymI_HasProto(updateRemembSetPushClosure_) \
+ SymI_HasProto(updateRemembSetPushThunk) \
+ SymI_HasProto(updateRemembSetPushThunk_) \
+ SymI_HasProto(updateRemembSetPushClosure_) \
SymI_HasProto(performGC) \
SymI_HasProto(performMajorGC) \
+ SymI_HasProto(performBlockingMajorGC) \
SymI_HasProto(prog_argc) \
SymI_HasProto(prog_argv) \
SymI_HasProto(stg_putMVarzh) \
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 41d0dba953..12b192c3d5 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -165,7 +165,9 @@ static bool scheduleHandleThreadFinished( Capability *cap, Task *task,
StgTSO *t );
static bool scheduleNeedHeapProfile(bool ready_to_gc);
static void scheduleDoGC( Capability **pcap, Task *task,
- bool force_major, bool deadlock_detect );
+ bool force_major,
+ bool deadlock_detect,
+ bool force_nonconcurrent );
static void deleteThread (StgTSO *tso);
static void deleteAllThreads (void);
@@ -264,7 +266,7 @@ schedule (Capability *initialCapability, Task *task)
case SCHED_INTERRUPTING:
debugTrace(DEBUG_sched, "SCHED_INTERRUPTING");
/* scheduleDoGC() deletes all the threads */
- scheduleDoGC(&cap,task,true,false);
+ scheduleDoGC(&cap, task, true, false, false);
// after scheduleDoGC(), we must be shutting down. Either some
// other Capability did the final GC, or we did it above,
@@ -573,7 +575,7 @@ run_thread:
}
if (ready_to_gc || scheduleNeedHeapProfile(ready_to_gc)) {
- scheduleDoGC(&cap,task,false,false);
+ scheduleDoGC(&cap, task, false, false, false);
}
} /* end of while() */
}
@@ -937,7 +939,10 @@ scheduleDetectDeadlock (Capability **pcap, Task *task)
// they are unreachable and will therefore be sent an
// exception. Any threads thus released will be immediately
// runnable.
- scheduleDoGC (pcap, task, true/*force major GC*/, true/*deadlock detection*/);
+ scheduleDoGC (pcap, task,
+ true/*force major GC*/,
+ true/*deadlock detection*/,
+ false/*force_nonconcurrent*/);
cap = *pcap;
// when force_major == true. scheduleDoGC sets
// recent_activity to ACTIVITY_DONE_GC and turns off the timer
@@ -1562,7 +1567,7 @@ void releaseAllCapabilities(uint32_t n, Capability *keep_cap, Task *task)
// behind deadlock_detect argument.
static void
scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
- bool force_major, bool deadlock_detect)
+ bool force_major, bool deadlock_detect, bool force_nonconcurrent)
{
Capability *cap = *pcap;
bool heap_census;
@@ -1855,9 +1860,11 @@ delete_threads_and_gc:
// emerge they don't immediately re-enter the GC.
pending_sync = 0;
signalCondition(&sync_finished_cond);
- GarbageCollect(collect_gen, heap_census, deadlock_detect, gc_type, cap, idle_cap);
+ GarbageCollect(collect_gen, heap_census, deadlock_detect,
+ force_nonconcurrent, gc_type, cap, idle_cap);
#else
- GarbageCollect(collect_gen, heap_census, deadlock_detect, 0, cap, NULL);
+ GarbageCollect(collect_gen, heap_census, deadlock_detect,
+ force_nonconcurrent, 0, cap, NULL);
#endif
// If we're shutting down, don't leave any idle GC work to do.
@@ -2734,7 +2741,7 @@ exitScheduler (bool wait_foreign USED_IF_THREADS)
nonmovingStop();
Capability *cap = task->cap;
waitForCapability(&cap,task);
- scheduleDoGC(&cap,task,true,false);
+ scheduleDoGC(&cap,task,true,false,false);
ASSERT(task->incall->tso == NULL);
releaseCapability(cap);
}
@@ -2789,7 +2796,7 @@ void markScheduler (evac_fn evac USED_IF_NOT_THREADS,
-------------------------------------------------------------------------- */
static void
-performGC_(bool force_major)
+performGC_(bool force_major, bool force_nonconcurrent)
{
Task *task;
Capability *cap = NULL;
@@ -2802,7 +2809,7 @@ performGC_(bool force_major)
// TODO: do we need to traceTask*() here?
waitForCapability(&cap,task);
- scheduleDoGC(&cap,task,force_major,false);
+ scheduleDoGC(&cap, task, force_major, false, force_nonconcurrent);
releaseCapability(cap);
boundTaskExiting(task);
}
@@ -2810,13 +2817,19 @@ performGC_(bool force_major)
void
performGC(void)
{
- performGC_(false);
+ performGC_(false, false);
}
void
performMajorGC(void)
{
- performGC_(true);
+ performGC_(true, false);
+}
+
+void
+performBlockingMajorGC(void)
+{
+ performGC_(true, true);
}
/* ---------------------------------------------------------------------------
diff --git a/rts/sm/GC.c b/rts/sm/GC.c
index c39dcc2e89..fc9146c2fa 100644
--- a/rts/sm/GC.c
+++ b/rts/sm/GC.c
@@ -196,6 +196,7 @@ void
GarbageCollect (uint32_t collect_gen,
const bool do_heap_census,
const bool deadlock_detect,
+ const bool force_nonconcurrent,
uint32_t gc_type USED_IF_THREADS,
Capability *cap,
bool idle_cap[])
@@ -779,7 +780,7 @@ GarbageCollect (uint32_t collect_gen,
// upd_rem_set
nonmovingAddUpdRemSetBlocks(&gct->cap->upd_rem_set.queue);
#endif
- nonmovingCollect(&dead_weak_ptr_list, &resurrected_threads);
+ nonmovingCollect(&dead_weak_ptr_list, &resurrected_threads, force_nonconcurrent);
ACQUIRE_SM_LOCK;
}
diff --git a/rts/sm/GC.h b/rts/sm/GC.h
index bde006913b..33b98b6e20 100644
--- a/rts/sm/GC.h
+++ b/rts/sm/GC.h
@@ -20,6 +20,7 @@
void GarbageCollect (uint32_t collect_gen,
bool do_heap_census,
bool deadlock_detect,
+ bool force_nonconcurrent,
uint32_t gc_type,
Capability *cap,
bool idle_cap[]);
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index 388ceae2fd..1118bf2a5c 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -894,7 +894,9 @@ static void nonmovingMarkWeakPtrList(MarkQueue *mark_queue, StgWeak *dead_weak_p
}
}
-void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
+void nonmovingCollect(StgWeak **dead_weaks,
+ StgTSO **resurrected_threads,
+ bool force_nonmoving)
{
#if defined(THREADED_RTS)
// We can't start a new collection until the old one has finished
@@ -973,7 +975,7 @@ void nonmovingCollect(StgWeak **dead_weaks, StgTSO **resurrected_threads)
// again for the sync if we let it go, because it'll immediately start doing
// a major GC, because that's what we do when exiting scheduler (see
// exitScheduler()).
- if (sched_state == SCHED_RUNNING) {
+ if (sched_state == SCHED_RUNNING && !force_nonconcurrent) {
concurrent_coll_running = true;
nonmoving_write_barrier_enabled = true;
debugTrace(DEBUG_nonmoving_gc, "Starting concurrent mark thread");
diff --git a/rts/sm/NonMoving.h b/rts/sm/NonMoving.h
index 6eabcb8493..1e19babf6f 100644
--- a/rts/sm/NonMoving.h
+++ b/rts/sm/NonMoving.h
@@ -126,7 +126,8 @@ void nonmovingExit(void);
// directly, but in a pause.
//
void nonmovingCollect(StgWeak **dead_weaks,
- StgTSO **resurrected_threads);
+ StgTSO **resurrected_threads,
+ bool force_nonmoving);
void *nonmovingAllocate(Capability *cap, StgWord sz);
void nonmovingAddCapabilities(uint32_t new_n_caps);