summaryrefslogtreecommitdiff
path: root/rts/sm/Storage.c
diff options
context:
space:
mode:
Diffstat (limited to 'rts/sm/Storage.c')
-rw-r--r--rts/sm/Storage.c50
1 files changed, 26 insertions, 24 deletions
diff --git a/rts/sm/Storage.c b/rts/sm/Storage.c
index 96bc133d02..98aefa9a4b 100644
--- a/rts/sm/Storage.c
+++ b/rts/sm/Storage.c
@@ -302,7 +302,7 @@ exitStorage (void)
{
nonmovingExit();
updateNurseriesStats();
- stat_exit();
+ stat_exitReport();
}
void
@@ -445,7 +445,7 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf)
Capability *cap = regTableToCapability(reg);
StgInd *bh;
- orig_info = caf->header.info;
+ orig_info = RELAXED_LOAD(&caf->header.info);
#if defined(THREADED_RTS)
const StgInfoTable *cur_info;
@@ -501,12 +501,11 @@ lockCAF (StgRegTable *reg, StgIndStatic *caf)
}
bh->indirectee = (StgClosure *)cap->r.rCurrentTSO;
SET_HDR(bh, &stg_CAF_BLACKHOLE_info, caf->header.prof.ccs);
- // Ensure that above writes are visible before we introduce reference as CAF indirectee.
- write_barrier();
- caf->indirectee = (StgClosure *)bh;
- write_barrier();
- SET_INFO((StgClosure*)caf,&stg_IND_STATIC_info);
+ // RELEASE ordering to ensure that above writes are visible before we
+ // introduce reference as CAF indirectee.
+ RELEASE_STORE(&caf->indirectee, (StgClosure *) bh);
+ SET_INFO_RELEASE((StgClosure*)caf, &stg_IND_STATIC_info);
return bh;
}
@@ -1033,8 +1032,8 @@ allocateMightFail (Capability *cap, W_ n)
g0->n_new_large_words += n;
RELEASE_SM_LOCK;
initBdescr(bd, g0, g0);
- bd->flags = BF_LARGE;
- bd->free = bd->start + n;
+ RELAXED_STORE(&bd->flags, BF_LARGE);
+ RELAXED_STORE(&bd->free, bd->start + n);
cap->total_allocated += n;
return bd->start;
}
@@ -1300,8 +1299,8 @@ dirty_MUT_VAR(StgRegTable *reg, StgMutVar *mvar, StgClosure *old)
Capability *cap = regTableToCapability(reg);
// No barrier required here as no other heap object fields are read. See
// note [Heap memory barriers] in SMP.h.
- if (mvar->header.info == &stg_MUT_VAR_CLEAN_info) {
- mvar->header.info = &stg_MUT_VAR_DIRTY_info;
+ if (RELAXED_LOAD(&mvar->header.info) == &stg_MUT_VAR_CLEAN_info) {
+ SET_INFO((StgClosure*) mvar, &stg_MUT_VAR_DIRTY_info);
recordClosureMutated(cap, (StgClosure *) mvar);
IF_NONMOVING_WRITE_BARRIER_ENABLED {
// See Note [Dirty flags in the non-moving collector] in NonMoving.c
@@ -1323,8 +1322,8 @@ dirty_TVAR(Capability *cap, StgTVar *p,
{
// No barrier required here as no other heap object fields are read. See
// note [Heap memory barriers] in SMP.h.
- if (p->header.info == &stg_TVAR_CLEAN_info) {
- p->header.info = &stg_TVAR_DIRTY_info;
+ if (RELAXED_LOAD(&p->header.info) == &stg_TVAR_CLEAN_info) {
+ SET_INFO((StgClosure*) p, &stg_TVAR_DIRTY_info);
recordClosureMutated(cap,(StgClosure*)p);
IF_NONMOVING_WRITE_BARRIER_ENABLED {
// See Note [Dirty flags in the non-moving collector] in NonMoving.c
@@ -1341,8 +1340,8 @@ dirty_TVAR(Capability *cap, StgTVar *p,
void
setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
{
- if (tso->dirty == 0) {
- tso->dirty = 1;
+ if (RELAXED_LOAD(&tso->dirty) == 0) {
+ RELAXED_STORE(&tso->dirty, 1);
recordClosureMutated(cap,(StgClosure*)tso);
IF_NONMOVING_WRITE_BARRIER_ENABLED {
updateRemembSetPushClosure(cap, (StgClosure *) tso->_link);
@@ -1354,8 +1353,8 @@ setTSOLink (Capability *cap, StgTSO *tso, StgTSO *target)
void
setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target)
{
- if (tso->dirty == 0) {
- tso->dirty = 1;
+ if (RELAXED_LOAD(&tso->dirty) == 0) {
+ RELAXED_STORE(&tso->dirty, 1);
recordClosureMutated(cap,(StgClosure*)tso);
IF_NONMOVING_WRITE_BARRIER_ENABLED {
updateRemembSetPushClosure(cap, (StgClosure *) tso->block_info.prev);
@@ -1367,8 +1366,8 @@ setTSOPrev (Capability *cap, StgTSO *tso, StgTSO *target)
void
dirty_TSO (Capability *cap, StgTSO *tso)
{
- if (tso->dirty == 0) {
- tso->dirty = 1;
+ if (RELAXED_LOAD(&tso->dirty) == 0) {
+ RELAXED_STORE(&tso->dirty, 1);
recordClosureMutated(cap,(StgClosure*)tso);
}
@@ -1386,8 +1385,8 @@ dirty_STACK (Capability *cap, StgStack *stack)
updateRemembSetPushStack(cap, stack);
}
- if (! (stack->dirty & STACK_DIRTY)) {
- stack->dirty = STACK_DIRTY;
+ if (RELAXED_LOAD(&stack->dirty) == 0) {
+ RELAXED_STORE(&stack->dirty, 1);
recordClosureMutated(cap,(StgClosure*)stack);
}
@@ -1562,10 +1561,13 @@ calcNeeded (bool force_major, memcount *blocks_needed)
for (uint32_t g = 0; g < RtsFlags.GcFlags.generations; g++) {
generation *gen = &generations[g];
-
W_ blocks = gen->live_estimate ? (gen->live_estimate / BLOCK_SIZE_W) : gen->n_blocks;
- blocks += gen->n_large_blocks
- + gen->n_compact_blocks;
+
+ // This can race with allocate() and compactAllocateBlockInternal()
+ // but only needs to be approximate
+ TSAN_ANNOTATE_BENIGN_RACE(&gen->n_large_blocks, "n_large_blocks");
+ blocks += RELAXED_LOAD(&gen->n_large_blocks)
+ + RELAXED_LOAD(&gen->n_compact_blocks);
// we need at least this much space
needed += blocks;