summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDaniel Gröber <dxld@darkboxed.org>2020-04-19 10:56:13 +0200
committerMarge Bot <ben+marge-bot@smart-cactus.org>2020-06-01 06:32:56 -0400
commit389920858e0b9efe5234cb7dac55d06e955768f7 (patch)
tree2e16cd65ed1e4f28b86c580acabb9bdb4b0ab3ad
parent6159559b5eb71ed7287998e954f96cdfb2d48f04 (diff)
downloadhaskell-389920858e0b9efe5234cb7dac55d06e955768f7.tar.gz
Always zero shrunk mutable array slop when profiling
When shrinking arrays in the profiling way we currently don't always zero the leftover slop. This means we can't traverse such closures in the heap profiler. The old Note [zeroing slop] and #8402 have some rationale for why this is so but I belive the reasoning doesn't apply to mutable closures. There users already have to ensure multiple threads don't step on each other's toes so zeroing should be safe.
-rw-r--r--includes/rts/storage/ClosureMacros.h31
1 files changed, 26 insertions, 5 deletions
diff --git a/includes/rts/storage/ClosureMacros.h b/includes/rts/storage/ClosureMacros.h
index 15146c0592..0043145c3f 100644
--- a/includes/rts/storage/ClosureMacros.h
+++ b/includes/rts/storage/ClosureMacros.h
@@ -532,10 +532,11 @@ zeroSlop (
StgClosure *p,
uint32_t offset, /*< offset to start zeroing at, in words */
uint32_t size, /*< total closure size, in words */
+ bool known_mutable /*< is this a closure who's slop we can always zero? */
);
EXTERN_INLINE void
-zeroSlop (StgClosure *p, uint32_t offset, uint32_t size)
+zeroSlop (StgClosure *p, uint32_t offset, uint32_t size, bool known_mutable)
{
// see Note [zeroing slop when overwriting closures], also #8402
@@ -555,7 +556,27 @@ zeroSlop (StgClosure *p, uint32_t offset, uint32_t size)
const bool zero_slop_immutable =
want_to_zero_immutable_slop && can_zero_immutable_slop;
- if(!zero_slop_immutable)
+ const bool zero_slop_mutable =
+#if defined(PROFILING)
+ // Always zero mutable closure slop when profiling. We do this to cover
+ // the case of shrinking mutable arrays in pinned blocks for the heap
+ // profiler, see Note [skipping slop in the heap profiler]
+ //
+ // TODO: We could make this check more specific and only zero if the
+ // object is in a BF_PINNED bdescr here. Update Note [slop on the heap]
+ // and [zeroing slop when overwriting closures] if you change this.
+ true
+#else
+ zero_slop_immutable
+#endif
+ ;
+
+ const bool zero_slop =
+ // If we're not sure this is a mutable closure treat it like an
+ // immutable one.
+ known_mutable ? zero_slop_mutable : zero_slop_immutable;
+
+ if(!zero_slop)
return;
for (uint32_t i = offset; i < size; i++) {
@@ -571,7 +592,7 @@ EXTERN_INLINE void overwritingClosure (StgClosure *p)
if(era > 0 && !isInherentlyUsed(get_itbl(p)->type))
LDV_recordDead(p, size);
#endif
- zeroSlop(p, sizeofW(StgThunkHeader), size);
+ zeroSlop(p, sizeofW(StgThunkHeader), size, /*known_mutable=*/false);
}
// Version of 'overwritingClosure' which overwrites only a suffix of a
@@ -595,7 +616,7 @@ overwritingMutableClosureOfs (StgClosure *p, uint32_t offset)
#if defined(PROFILING)
ASSERT(isInherentlyUsed(get_itbl(p)->type) == true);
#endif
- zeroSlop(p, offset, closure_sizeW(p));
+ zeroSlop(p, offset, closure_sizeW(p), /*known_mutable=*/true);
}
// Version of 'overwritingClosure' which takes closure size as argument.
@@ -609,5 +630,5 @@ EXTERN_INLINE void overwritingClosureSize (StgClosure *p, uint32_t size)
if(era > 0)
LDV_recordDead(p, size);
#endif
- zeroSlop(p, sizeofW(StgThunkHeader), size);
+ zeroSlop(p, sizeofW(StgThunkHeader), size, /*known_mutable=*/false);
}