summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--rts/sm/MarkWeak.c38
-rw-r--r--rts/sm/NonMoving.c9
-rw-r--r--rts/sm/NonMovingMark.c5
3 files changed, 47 insertions, 5 deletions
diff --git a/rts/sm/MarkWeak.c b/rts/sm/MarkWeak.c
index bae4b9d99f..2eca46f619 100644
--- a/rts/sm/MarkWeak.c
+++ b/rts/sm/MarkWeak.c
@@ -83,6 +83,30 @@ static bool resurrectUnreachableThreads (generation *gen, StgTSO **resurrected_t
static void tidyThreadList (generation *gen);
/*
+ * Note [Weak pointer processing and the non-moving GC]
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ * When using the non-moving GC we defer weak pointer processing
+ * until the concurrent marking phase as weaks in the non-moving heap may be
+ * keyed on objects living in the non-moving generation. To accomplish this
+ * initWeakForGC keeps all weak pointers on oldest_gen->weak_ptr_list, where
+ * nonmovingCollect will find them. From there they will be moved to
+ * nonmoving_old_weak_ptr_list. During the mark loop we will move weaks with
+ * reachable keys to nonmoving_weak_ptr_list. At the end of concurrent marking
+ * we tidy the weak list (in nonmovingTidyWeakList) and perform another set of
+ * marking as necessary, just as is done in tidyWeakList.
+ *
+ * Note that this treatment takes advantage of the fact that we usually need
+ * not worry about Weak#s living in the non-moving heap but being keyed on an
+ * object in the moving heap since the Weak# must be strictly older than the
+ * key. Such objects would otherwise pose a problem since the non-moving
+ * collector would be unable to safely determine the liveness of the key.
+ * In the rare case that we *do* see such a key (e.g. in the case of a
+ * pinned ByteArray# living in a partially-filled accumulator block)
+ * the nonmoving collector assumes that it is live.
+ *
+ */
+
+/*
* Prepare the weak object lists for GC. Specifically, reset weak_stage
* and move all generations' `weak_ptr_list`s to `old_weak_ptr_list`.
* Weaks with live keys will later be moved back to `weak_ptr_list` by
@@ -91,9 +115,13 @@ static void tidyThreadList (generation *gen);
void
initWeakForGC(void)
{
- uint32_t g;
+ uint32_t oldest = N;
+ if (RtsFlags.GcFlags.useNonmoving && N == oldest_gen->no) {
+ // See Note [Weak pointer processing and the non-moving GC].
+ oldest = oldest_gen->no - 1;
+ }
- for (g = 0; g <= N; g++) {
+ for (uint32_t g = 0; g <= oldest; g++) {
generation *gen = &generations[g];
gen->old_weak_ptr_list = gen->weak_ptr_list;
gen->weak_ptr_list = NULL;
@@ -265,6 +293,12 @@ static bool resurrectUnreachableThreads (generation *gen, StgTSO **resurrected_t
*/
static bool tidyWeakList(generation *gen)
{
+ if (RtsFlags.GcFlags.useNonmoving && gen == oldest_gen) {
+ // See Note [Weak pointer processing and the non-moving GC].
+ ASSERT(gen->old_weak_ptr_list == NULL);
+ return false;
+ }
+
StgWeak *w, **last_w, *next_w;
const StgInfoTable *info;
StgClosure *new;
diff --git a/rts/sm/NonMoving.c b/rts/sm/NonMoving.c
index d47faa2b9f..4fd2138f59 100644
--- a/rts/sm/NonMoving.c
+++ b/rts/sm/NonMoving.c
@@ -244,6 +244,9 @@ Mutex concurrent_coll_finished_lock;
* how we use the DIRTY flags associated with MUT_VARs and TVARs to improve
* barrier efficiency.
*
+ * - Note [Weak pointer processing and the non-moving GC] (MarkWeak.c) describes
+ * how weak pointers are handled when the non-moving GC is in use.
+ *
* [ueno 2016]:
* Katsuhiro Ueno and Atsushi Ohori. 2016. A fully concurrent garbage
* collector for functional programs on multicore processors. SIGPLAN Not. 51,
@@ -282,8 +285,8 @@ Mutex concurrent_coll_finished_lock;
* was (unsurprisingly) also found to result in significant amounts of
* unnecessary copying.
*
- * Consequently, we now allow aging. Aging allows the preparatory GC leading up
- * to a major collection to evacuate some objects into the young generation.
+ * Consequently, we now allow "aging", allows the preparatory GC leading up
+ * to a major collection to evacuate objects into the young generation.
* However, this introduces the following tricky case that might arise after
* we have finished the preparatory GC:
*
@@ -292,6 +295,7 @@ Mutex concurrent_coll_finished_lock;
* ┆
* B ←────────────── A ←─────────────── root
* │ ┆ ↖─────────────── gen1 mut_list
+ * │ ┆
* ╰───────────────→ C
* ┆
*
@@ -332,6 +336,7 @@ Mutex concurrent_coll_finished_lock;
* The implementation details of this are described in Note [Non-moving GC:
* Marking evacuated objects] in Evac.c.
*
+ *
* Note [Deadlock detection under the non-moving collector]
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* In GHC the garbage collector is responsible for identifying deadlocked
diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c
index bde650d0de..70e9ee34fc 100644
--- a/rts/sm/NonMovingMark.c
+++ b/rts/sm/NonMovingMark.c
@@ -1842,7 +1842,10 @@ bool nonmovingTidyWeaks (struct MarkQueue_ *queue)
// Otherwise it's a live weak
ASSERT(w->header.info == &stg_WEAK_info);
- if (nonmovingIsNowAlive(w->key)) {
+ // See Note [Weak pointer processing and the non-moving GC] in
+ // MarkWeak.c
+ bool key_in_nonmoving = Bdescr((StgPtr) w->key)->flags & BF_NONMOVING;
+ if (!key_in_nonmoving || nonmovingIsNowAlive(w->key)) {
nonmovingMarkLiveWeak(queue, w);
did_work = true;