diff options
author | Ben Gamari <ben@smart-cactus.org> | 2019-04-17 18:48:38 -0400 |
---|---|---|
committer | Ben Gamari <ben@smart-cactus.org> | 2019-04-17 19:50:30 -0400 |
commit | 90e0ece0f88d13cf703cc2831f7f9b772b81b4ed (patch) | |
tree | c8e4ce4e2021bc535dffbd20ae1393bd2fe71b60 | |
parent | 45662404780b4968b5308ad969e233ffeb175c9a (diff) | |
download | haskell-wip/gc/ben.tar.gz |
Remove redundant write barrier checks and fix predictionwip/gc/ben
-rw-r--r-- | includes/Cmm.h | 21 | ||||
-rw-r--r-- | rts/Messages.c | 4 | ||||
-rw-r--r-- | rts/PrimOps.cmm | 8 | ||||
-rw-r--r-- | rts/STM.c | 4 | ||||
-rw-r--r-- | rts/Schedule.c | 4 | ||||
-rw-r--r-- | rts/ThreadPaused.c | 4 | ||||
-rw-r--r-- | rts/Threads.c | 2 | ||||
-rw-r--r-- | rts/Updates.h | 4 | ||||
-rw-r--r-- | rts/sm/NonMovingMark.c | 7 | ||||
-rw-r--r-- | rts/sm/Storage.c | 16 |
10 files changed, 38 insertions, 36 deletions
diff --git a/includes/Cmm.h b/includes/Cmm.h index 6317445f5b..151b09b71b 100644 --- a/includes/Cmm.h +++ b/includes/Cmm.h @@ -797,11 +797,6 @@ Misc junk -------------------------------------------------------------------------- */ -#if !defined(THREADED_RTS) -// This is also done in rts/NonMoving.h, but that isn't visible from C-- -#define nonmoving_write_barrier_enabled 0 -#endif - #define NO_TREC stg_NO_TREC_closure #define END_TSO_QUEUE stg_END_TSO_QUEUE_closure #define STM_AWOKEN stg_STM_AWOKEN_closure @@ -940,9 +935,19 @@ return (dst); +#if defined(THREADED_RTS) +#define IF_WRITE_BARRIER_ENABLED \ + if (W_[nonmoving_write_barrier_enabled] != 0) (likely: False) +#else +// A similar measure is also taken in rts/NonMoving.h, but that isn't visible from C-- +#define IF_WRITE_BARRIER_ENABLED \ + if (0) +#define nonmoving_write_barrier_enabled 0 +#endif + // A useful helper for pushing a pointer to the update remembered set. // See Note [Update remembered set] in NonMovingMark.c. -#define updateRemembSetPushPtr(p) \ - if (nonmoving_write_barrier_enabled != 0) (likely: False) { \ - ccall updateRemembSetPushClosure_(BaseReg "ptr", p "ptr"); \ +#define updateRemembSetPushPtr(p) \ + IF_WRITE_BARRIER_ENABLED { \ + ccall updateRemembSetPushClosure_(BaseReg "ptr", p "ptr"); \ } diff --git a/rts/Messages.c b/rts/Messages.c index 89415ea3d6..4283df4c39 100644 --- a/rts/Messages.c +++ b/rts/Messages.c @@ -256,7 +256,7 @@ loop: // point to the BLOCKING_QUEUE from the BLACKHOLE write_barrier(); // make the BQ visible - if (nonmoving_write_barrier_enabled) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) { updateRemembSetPushClosure(cap, (StgClosure*)p); } ((StgInd*)bh)->indirectee = (StgClosure *)bq; @@ -287,7 +287,7 @@ loop: } #endif - if (nonmoving_write_barrier_enabled) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) { // We are about to overwrite bq->queue; make sure its current value // makes it into the update remembered set updateRemembSetPushClosure(cap, (StgClosure*)bq->queue); diff --git a/rts/PrimOps.cmm b/rts/PrimOps.cmm index 9ba70e70b5..3477efde61 100644 --- a/rts/PrimOps.cmm +++ b/rts/PrimOps.cmm @@ -474,7 +474,9 @@ stg_copyArray_barrier ( W_ hdr_size, gcptr dst, W_ dst_off, W_ n) end = p + WDS(n); again: - ccall updateRemembSetPushClosure_(BaseReg "ptr", W_[p] "ptr"); + IF_WRITE_BARRIER_ENABLED { + ccall updateRemembSetPushClosure_(BaseReg "ptr", W_[p] "ptr"); + } p = p + WDS(1); if (p < end) { goto again; @@ -488,7 +490,7 @@ stg_copySmallArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n) W_ dst_p, src_p, bytes; if (n > 0) { - if (nonmoving_write_barrier_enabled != 0 || 1) { + IF_WRITE_BARRIER_ENABLED { call stg_copyArray_barrier(SIZEOF_StgSmallMutArrPtrs, dst, dst_off, n); } @@ -509,7 +511,7 @@ stg_copySmallMutableArrayzh ( gcptr src, W_ src_off, gcptr dst, W_ dst_off, W_ n W_ dst_p, src_p, bytes; if (n > 0) { - if (nonmoving_write_barrier_enabled != 0 || 1) { + IF_WRITE_BARRIER_ENABLED { call stg_copyArray_barrier(SIZEOF_StgSmallMutArrPtrs, dst, dst_off, n); } @@ -297,7 +297,7 @@ static StgClosure *lock_tvar(Capability *cap, } while (cas((void *)&(s -> current_value), (StgWord)result, (StgWord)trec) != (StgWord)result); - if (nonmoving_write_barrier_enabled && result) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled && result)) { updateRemembSetPushClosure(cap, result); } return result; @@ -323,7 +323,7 @@ static StgBool cond_lock_tvar(Capability *cap, TRACE("%p : cond_lock_tvar(%p, %p)", trec, s, expected); w = cas((void *)&(s -> current_value), (StgWord)expected, (StgWord)trec); result = (StgClosure *)w; - if (nonmoving_write_barrier_enabled && result) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled && result)) { updateRemembSetPushClosure(cap, expected); } TRACE("%p : %s", trec, result ? "success" : "failure"); diff --git a/rts/Schedule.c b/rts/Schedule.c index 37ff07f838..84dc406c90 100644 --- a/rts/Schedule.c +++ b/rts/Schedule.c @@ -2501,7 +2501,9 @@ resumeThread (void *task_) incall->suspended_tso = NULL; incall->suspended_cap = NULL; // we will modify tso->_link - updateRemembSetPushClosure(cap, (StgClosure *)tso->_link); + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) { + updateRemembSetPushClosure(cap, (StgClosure *)tso->_link); + } tso->_link = END_TSO_QUEUE; traceEventRunThread(cap, tso); diff --git a/rts/ThreadPaused.c b/rts/ThreadPaused.c index aa76f5ed0e..4b24362442 100644 --- a/rts/ThreadPaused.c +++ b/rts/ThreadPaused.c @@ -330,8 +330,8 @@ threadPaused(Capability *cap, StgTSO *tso) } #endif - if (nonmoving_write_barrier_enabled - && ip_THUNK(INFO_PTR_TO_STRUCT(bh_info))) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled + && ip_THUNK(INFO_PTR_TO_STRUCT(bh_info)))) { // We are about to replace a thunk with a blackhole. // Add the free variables of the closure we are about to // overwrite to the update remembered set. diff --git a/rts/Threads.c b/rts/Threads.c index 7907fc9196..37d58d7027 100644 --- a/rts/Threads.c +++ b/rts/Threads.c @@ -712,7 +712,7 @@ threadStackUnderflow (Capability *cap, StgTSO *tso) barf("threadStackUnderflow: not enough space for return values"); } - if (nonmoving_write_barrier_enabled) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) { // ensure that values that we copy into the new stack are marked // for the nonmoving collector. Note that these values won't // necessarily form a full closure so we need to handle them diff --git a/rts/Updates.h b/rts/Updates.h index b9fb57bac9..7776f92e82 100644 --- a/rts/Updates.h +++ b/rts/Updates.h @@ -44,7 +44,7 @@ W_ bd; \ \ OVERWRITING_CLOSURE(p1); \ - if (nonmoving_write_barrier_enabled != 0) { \ + IF_WRITE_BARRIER_ENABLED { \ ccall updateRemembSetPushThunk_(BaseReg, p1 "ptr"); \ } \ StgInd_indirectee(p1) = p2; \ @@ -73,7 +73,7 @@ INLINE_HEADER void updateWithIndirection (Capability *cap, /* not necessarily true: ASSERT( !closure_IND(p1) ); */ /* occurs in RaiseAsync.c:raiseAsync() */ OVERWRITING_CLOSURE(p1); - if (nonmoving_write_barrier_enabled) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) { updateRemembSetPushThunk(cap, (StgThunk*)p1); } ((StgInd *)p1)->indirectee = p2; diff --git a/rts/sm/NonMovingMark.c b/rts/sm/NonMovingMark.c index e1ccb3a4a2..3bf1d9a932 100644 --- a/rts/sm/NonMovingMark.c +++ b/rts/sm/NonMovingMark.c @@ -459,8 +459,6 @@ bool check_in_nonmoving_heap(StgClosure *p) { */ void updateRemembSetPushThunk(Capability *cap, StgThunk *thunk) { - // TODO: Eliminate this conditional once it's folded into codegen - if (!nonmoving_write_barrier_enabled) return; const StgInfoTable *info; do { info = get_volatile_itbl((StgClosure *) thunk); @@ -517,14 +515,11 @@ void updateRemembSetPushThunkEager(Capability *cap, void updateRemembSetPushThunk_(StgRegTable *reg, StgThunk *p) { - // TODO: Eliminate this conditional once it's folded into codegen - if (!nonmoving_write_barrier_enabled) return; updateRemembSetPushThunk(regTableToCapability(reg), p); } void updateRemembSetPushClosure(Capability *cap, StgClosure *p) { - if (!nonmoving_write_barrier_enabled) return; if (!check_in_nonmoving_heap(p)) return; MarkQueue *queue = &cap->upd_rem_set.queue; push_closure(queue, p, NULL); @@ -578,8 +573,6 @@ STATIC_INLINE void finish_upd_rem_set_mark(StgClosure *p) void updateRemembSetPushTSO(Capability *cap, StgTSO *tso) { - // TODO: Eliminate this conditional once it's folded into codegen - if (!nonmoving_write_barrier_enabled) return; if (needs_upd_rem_set_mark((StgClosure *) tso)) { debugTrace(DEBUG_nonmoving_gc, "upd_rem_set: TSO %p", tso); mark_tso(&cap->upd_rem_set.queue, tso); diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c index 3e782c4a39..077a751c45 100644 --- a/rts/sm/Storage.c +++ b/rts/sm/Storage.c @@ -430,7 +430,7 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf) // reference should be in SRTs ASSERT(orig_info_tbl->layout.payload.ptrs == 0); // Becuase the payload is empty we just push the SRT - if (nonmoving_write_barrier_enabled) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) { StgThunkInfoTable *thunk_info = itbl_to_thunk_itbl(orig_info_tbl); if (thunk_info->i.srt) { updateRemembSetPushClosure(cap, GET_SRT(thunk_info)); @@ -1140,7 +1140,7 @@ dirty_MUT_VAR(StgRegTable *reg, StgMutVar *mvar, StgClosure *old) if (mvar->header.info == &stg_MUT_VAR_CLEAN_info) { mvar->header.info = &stg_MUT_VAR_DIRTY_info; recordClosureMutated(cap, (StgClosure *) mvar); - if (nonmoving_write_barrier_enabled != 0) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled != 0)) { updateRemembSetPushClosure_(reg, old); } } @@ -1159,7 +1159,7 @@ dirty_TVAR(Capability *cap, StgTVar *p, if (p->header.info == &stg_TVAR_CLEAN_info) { p->header.info = &stg_TVAR_DIRTY_info; recordClosureMutated(cap,(StgClosure*)p); - if (nonmoving_write_barrier_enabled != 0) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled != 0)) { updateRemembSetPushClosure(cap, old); } } @@ -1176,7 +1176,7 @@ setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target) if (tso->dirty == 0) { tso->dirty = 1; recordClosureMutated(cap,(StgClosure*)tso); - if (nonmoving_write_barrier_enabled) + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) updateRemembSetPushClosure(cap, (StgClosure *) tso->_link); } tso->_link = target; @@ -1188,7 +1188,7 @@ setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target) if (tso->dirty == 0) { tso->dirty = 1; recordClosureMutated(cap,(StgClosure*)tso); - if (nonmoving_write_barrier_enabled) + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) updateRemembSetPushClosure(cap, (StgClosure *) tso->block_info.prev); } tso->block_info.prev = target; @@ -1202,7 +1202,7 @@ dirty_TSO (Capability *cap, StgTSO *tso) recordClosureMutated(cap,(StgClosure*)tso); } - if (nonmoving_write_barrier_enabled) + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) updateRemembSetPushTSO(cap, tso); } @@ -1211,7 +1211,7 @@ dirty_STACK (Capability *cap, StgStack *stack) { // First push to upd_rem_set before we set stack->dirty since we // the nonmoving collector may already be marking the stack. - if (nonmoving_write_barrier_enabled) + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) updateRemembSetPushStack(cap, stack); if (! (stack->dirty & STACK_DIRTY)) { @@ -1236,7 +1236,7 @@ void update_MVAR(StgRegTable *reg, StgClosure *p, StgClosure *old_val) { Capability *cap = regTableToCapability(reg); - if (nonmoving_write_barrier_enabled) { + if (RTS_UNLIKELY(nonmoving_write_barrier_enabled)) { StgMVar *mvar = (StgMVar *) p; updateRemembSetPushClosure(cap, old_val); updateRemembSetPushClosure(cap, (StgClosure *) mvar->head); |