summaryrefslogtreecommitdiff
path: root/rts
diff options
context:
space:
mode:
authorSimon Peyton Jones <simonpj@microsoft.com>2011-07-18 23:11:02 +0100
committerSimon Peyton Jones <simonpj@microsoft.com>2011-07-18 23:11:02 +0100
commit5c9dfadd979ca3ccb8dd7c21ddb9fb0fe9cdb3fe (patch)
treeaedac951e211cd35fa93140fbb7640cac555784a /rts
parent72883e48d93528acf44e3ba67c66a66833fe61f3 (diff)
parent8f4f29f655fdda443861152a24588fcaba29b168 (diff)
downloadhaskell-5c9dfadd979ca3ccb8dd7c21ddb9fb0fe9cdb3fe.tar.gz
Merge branch 'master' of http://darcs.haskell.org/ghc
Diffstat (limited to 'rts')
-rw-r--r--rts/Capability.c78
-rw-r--r--rts/Capability.h10
-rw-r--r--rts/Globals.c8
-rw-r--r--rts/Linker.c1
-rw-r--r--rts/RtsFlags.c60
-rw-r--r--rts/RtsProbes.d14
-rw-r--r--rts/Schedule.c24
-rw-r--r--rts/Sparks.c75
-rw-r--r--rts/Sparks.h42
-rw-r--r--rts/Stats.c23
-rw-r--r--rts/StgCRun.c12
-rw-r--r--rts/StgRun.h4
-rw-r--r--rts/Trace.c174
-rw-r--r--rts/Trace.h196
-rw-r--r--rts/eventlog/EventLog.c111
-rw-r--r--rts/eventlog/EventLog.h12
16 files changed, 639 insertions, 205 deletions
diff --git a/rts/Capability.c b/rts/Capability.c
index fe5dbdca40..91c5e2d98e 100644
--- a/rts/Capability.c
+++ b/rts/Capability.c
@@ -92,12 +92,17 @@ findSpark (Capability *cap)
// spark = reclaimSpark(cap->sparks);
// However, measurements show that this makes at least one benchmark
// slower (prsa) and doesn't affect the others.
- spark = tryStealSpark(cap);
+ spark = tryStealSpark(cap->sparks);
+ while (spark != NULL && fizzledSpark(spark)) {
+ cap->spark_stats.fizzled++;
+ traceEventSparkFizzle(cap);
+ spark = tryStealSpark(cap->sparks);
+ }
if (spark != NULL) {
- cap->sparks_converted++;
+ cap->spark_stats.converted++;
// Post event for running a spark from capability's own pool.
- traceEventRunSpark(cap, cap->r.rCurrentTSO);
+ traceEventSparkRun(cap);
return spark;
}
@@ -121,7 +126,12 @@ findSpark (Capability *cap)
if (emptySparkPoolCap(robbed)) // nothing to steal here
continue;
- spark = tryStealSpark(robbed);
+ spark = tryStealSpark(robbed->sparks);
+ while (spark != NULL && fizzledSpark(spark)) {
+ cap->spark_stats.fizzled++;
+ traceEventSparkFizzle(cap);
+ spark = tryStealSpark(robbed->sparks);
+ }
if (spark == NULL && !emptySparkPoolCap(robbed)) {
// we conflicted with another thread while trying to steal;
// try again later.
@@ -129,9 +139,8 @@ findSpark (Capability *cap)
}
if (spark != NULL) {
- cap->sparks_converted++;
-
- traceEventStealSpark(cap, cap->r.rCurrentTSO, robbed->no);
+ cap->spark_stats.converted++;
+ traceEventSparkSteal(cap, robbed->no);
return spark;
}
@@ -224,11 +233,13 @@ initCapability( Capability *cap, nat i )
cap->returning_tasks_hd = NULL;
cap->returning_tasks_tl = NULL;
cap->inbox = (Message*)END_TSO_QUEUE;
- cap->sparks_created = 0;
- cap->sparks_dud = 0;
- cap->sparks_converted = 0;
- cap->sparks_gcd = 0;
- cap->sparks_fizzled = 0;
+ cap->sparks = allocSparkPool();
+ cap->spark_stats.created = 0;
+ cap->spark_stats.dud = 0;
+ cap->spark_stats.overflowed = 0;
+ cap->spark_stats.converted = 0;
+ cap->spark_stats.gcd = 0;
+ cap->spark_stats.fizzled = 0;
#endif
cap->f.stgEagerBlackholeInfo = (W_)&__stg_EAGER_BLACKHOLE_info;
@@ -255,6 +266,9 @@ initCapability( Capability *cap, nat i )
cap->pinned_object_block = NULL;
traceCapsetAssignCap(CAPSET_OSPROCESS_DEFAULT, i);
+#if defined(THREADED_RTS)
+ traceSparkCounters(cap);
+#endif
}
/* ---------------------------------------------------------------------------
@@ -608,6 +622,7 @@ yieldCapability (Capability** pCap, Task *task)
traceEventGcStart(cap);
gcWorkerThread(cap);
traceEventGcEnd(cap);
+ traceSparkCounters(cap);
return;
}
@@ -819,7 +834,9 @@ shutdownCapability (Capability *cap,
// threads performing foreign calls that will eventually try to
// return via resumeThread() and attempt to grab cap->lock.
// closeMutex(&cap->lock);
-
+
+ traceSparkCounters(cap);
+
#endif /* THREADED_RTS */
traceCapsetRemoveCap(CAPSET_OSPROCESS_DEFAULT, cap->no);
@@ -834,6 +851,10 @@ shutdownCapabilities(Task *task, rtsBool safe)
shutdownCapability(&capabilities[i], task, safe);
}
traceCapsetDelete(CAPSET_OSPROCESS_DEFAULT);
+
+#if defined(THREADED_RTS)
+ ASSERT(checkSparkCountInvariant());
+#endif
}
static void
@@ -904,3 +925,34 @@ markCapabilities (evac_fn evac, void *user)
markCapability(evac, user, &capabilities[n], rtsFalse);
}
}
+
+#if defined(THREADED_RTS)
+rtsBool checkSparkCountInvariant (void)
+{
+ SparkCounters sparks = { 0, 0, 0, 0, 0, 0 };
+ StgWord64 remaining = 0;
+ nat i;
+
+ for (i = 0; i < n_capabilities; i++) {
+ sparks.created += capabilities[i].spark_stats.created;
+ sparks.dud += capabilities[i].spark_stats.dud;
+ sparks.overflowed+= capabilities[i].spark_stats.overflowed;
+ sparks.converted += capabilities[i].spark_stats.converted;
+ sparks.gcd += capabilities[i].spark_stats.gcd;
+ sparks.fizzled += capabilities[i].spark_stats.fizzled;
+ remaining += sparkPoolSize(capabilities[i].sparks);
+ }
+
+ /* The invariant is
+ * created = converted + remaining + gcd + fizzled
+ */
+ debugTrace(DEBUG_sparks,"spark invariant: %ld == %ld + %ld + %ld + %ld "
+ "(created == converted + remaining + gcd + fizzled)",
+ sparks.created, sparks.converted, remaining,
+ sparks.gcd, sparks.fizzled);
+
+ return (sparks.created ==
+ sparks.converted + remaining + sparks.gcd + sparks.fizzled);
+
+}
+#endif
diff --git a/rts/Capability.h b/rts/Capability.h
index d380af9cff..10c7c496e4 100644
--- a/rts/Capability.h
+++ b/rts/Capability.h
@@ -98,11 +98,7 @@ struct Capability_ {
SparkPool *sparks;
// Stats on spark creation/conversion
- nat sparks_created;
- nat sparks_dud;
- nat sparks_converted;
- nat sparks_gcd;
- nat sparks_fizzled;
+ SparkCounters spark_stats;
#endif
// Per-capability STM-related data
@@ -143,6 +139,10 @@ struct Capability_ {
ASSERT(myTask() == task); \
ASSERT_TASK_ID(task);
+#if defined(THREADED_RTS)
+rtsBool checkSparkCountInvariant (void);
+#endif
+
// Converts a *StgRegTable into a *Capability.
//
INLINE_HEADER Capability *
diff --git a/rts/Globals.c b/rts/Globals.c
index 7b8967f685..06b2f9721f 100644
--- a/rts/Globals.c
+++ b/rts/Globals.c
@@ -19,7 +19,6 @@
#include "Stable.h"
typedef enum {
- TypeableStore,
GHCConcSignalSignalHandlerStore,
GHCConcWindowsPendingDelaysStore,
GHCConcWindowsIOManagerThreadStore,
@@ -80,13 +79,6 @@ static StgStablePtr getOrSetKey(StoreKey key, StgStablePtr ptr)
return ret;
}
-
-StgStablePtr
-getOrSetTypeableStore(StgStablePtr ptr)
-{
- return getOrSetKey(TypeableStore,ptr);
-}
-
StgStablePtr
getOrSetGHCConcSignalSignalHandlerStore(StgStablePtr ptr)
{
diff --git a/rts/Linker.c b/rts/Linker.c
index 6d29ce7409..781f705536 100644
--- a/rts/Linker.c
+++ b/rts/Linker.c
@@ -787,7 +787,6 @@ typedef struct _RtsSymbolVal {
SymI_HasProto(forkProcess) \
SymI_HasProto(forkOS_createThread) \
SymI_HasProto(freeHaskellFunctionPtr) \
- SymI_HasProto(getOrSetTypeableStore) \
SymI_HasProto(getOrSetGHCConcSignalSignalHandlerStore) \
SymI_HasProto(getOrSetGHCConcWindowsPendingDelaysStore) \
SymI_HasProto(getOrSetGHCConcWindowsIOManagerThreadStore) \
diff --git a/rts/RtsFlags.c b/rts/RtsFlags.c
index 24181d32b0..fcc1f49a36 100644
--- a/rts/RtsFlags.c
+++ b/rts/RtsFlags.c
@@ -163,6 +163,9 @@ void initRtsFlagsDefaults(void)
RtsFlags.TraceFlags.tracing = TRACE_NONE;
RtsFlags.TraceFlags.timestamp = rtsFalse;
RtsFlags.TraceFlags.scheduler = rtsFalse;
+ RtsFlags.TraceFlags.gc = rtsFalse;
+ RtsFlags.TraceFlags.sparks_sampled= rtsFalse;
+ RtsFlags.TraceFlags.sparks_full = rtsFalse;
#endif
RtsFlags.MiscFlags.tickInterval = 20; /* In milliseconds */
@@ -288,9 +291,15 @@ usage_text[] = {
# endif
" where [flags] can contain:",
" s scheduler events",
+" g GC events",
+" p par spark events (sampled)",
+" f par spark events (full detail)",
# ifdef DEBUG
" t add time stamps (only useful with -v)",
# endif
+" a all event classes above",
+" -x disable an event class, for any flag above",
+" the initial enabled event classes are 'sgp'",
#endif
#if !defined(PROFILING)
@@ -1429,19 +1438,64 @@ decodeSize(const char *flag, nat offset, StgWord64 min, StgWord64 max)
static void read_trace_flags(char *arg)
{
char *c;
+ rtsBool enabled = rtsTrue;
+ /* Syntax for tracing flags currently looks like:
+ *
+ * -l To turn on eventlog tracing with default trace classes
+ * -lx Turn on class 'x' (for some class listed below)
+ * -l-x Turn off class 'x'
+ * -la Turn on all classes
+ * -l-a Turn off all classes
+ *
+ * This lets users say things like:
+ * -la-p "all but sparks"
+ * -l-ap "only sparks"
+ */
+
+ /* Start by turning on the default tracing flags.
+ *
+ * Currently this is all the trace classes, except full-detail sparks.
+ * Similarly, in future we might default to slightly less verbose
+ * scheduler or GC tracing.
+ */
+ RtsFlags.TraceFlags.scheduler = rtsTrue;
+ RtsFlags.TraceFlags.gc = rtsTrue;
+ RtsFlags.TraceFlags.sparks_sampled = rtsTrue;
for (c = arg; *c != '\0'; c++) {
switch(*c) {
case '\0':
break;
+ case '-':
+ enabled = rtsFalse;
+ break;
+ case 'a':
+ RtsFlags.TraceFlags.scheduler = enabled;
+ RtsFlags.TraceFlags.gc = enabled;
+ RtsFlags.TraceFlags.sparks_sampled = enabled;
+ RtsFlags.TraceFlags.sparks_full = enabled;
+ enabled = rtsTrue;
+ break;
+
case 's':
- RtsFlags.TraceFlags.scheduler = rtsTrue;
+ RtsFlags.TraceFlags.scheduler = enabled;
+ enabled = rtsTrue;
+ break;
+ case 'p':
+ RtsFlags.TraceFlags.sparks_sampled = enabled;
+ enabled = rtsTrue;
+ break;
+ case 'f':
+ RtsFlags.TraceFlags.sparks_full = enabled;
+ enabled = rtsTrue;
break;
case 't':
- RtsFlags.TraceFlags.timestamp = rtsTrue;
+ RtsFlags.TraceFlags.timestamp = enabled;
+ enabled = rtsTrue;
break;
case 'g':
- // ignored for backwards-compat
+ RtsFlags.TraceFlags.gc = enabled;
+ enabled = rtsTrue;
break;
default:
errorBelch("unknown trace option: %c",*c);
diff --git a/rts/RtsProbes.d b/rts/RtsProbes.d
index bd32fca385..755b25525b 100644
--- a/rts/RtsProbes.d
+++ b/rts/RtsProbes.d
@@ -43,8 +43,6 @@ provider HaskellEvent {
probe stop__thread (EventCapNo, EventThreadID, EventThreadStatus, EventThreadID);
probe thread__runnable (EventCapNo, EventThreadID);
probe migrate__thread (EventCapNo, EventThreadID, EventCapNo);
- probe run__spark (EventCapNo, EventThreadID);
- probe steal__spark (EventCapNo, EventThreadID, EventCapNo);
probe shutdown (EventCapNo);
probe thread_wakeup (EventCapNo, EventThreadID, EventCapNo);
probe gc__start (EventCapNo);
@@ -67,4 +65,16 @@ provider HaskellEvent {
probe capset__assign__cap(EventCapsetID, EventCapNo);
probe capset__remove__cap(EventCapsetID, EventCapNo);
+ probe spark__counters(EventCapNo,
+ StgWord, StgWord, StgWord
+ StgWord, StgWord, StgWord
+ StgWord);
+
+ probe spark__create (EventCapNo);
+ probe spark__dud (EventCapNo);
+ probe spark__overflow (EventCapNo);
+ probe spark__run (EventCapNo);
+ probe spark__steal (EventCapNo, EventCapNo);
+ probe spark__fizzle (EventCapNo);
+ probe spark__gc (EventCapNo);
};
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 45959a92eb..2a2cc22a66 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -581,6 +581,10 @@ static void
schedulePreLoop(void)
{
// initialisation for scheduler - what cannot go into initScheduler()
+
+#if defined(mingw32_HOST_OS)
+ win32AllocStack();
+#endif
}
/* -----------------------------------------------------------------------------
@@ -779,6 +783,10 @@ schedulePushWork(Capability *cap USED_IF_THREADS,
if (emptySparkPoolCap(free_caps[i])) {
spark = tryStealSpark(cap->sparks);
if (spark != NULL) {
+ /* TODO: if anyone wants to re-enable this code then
+ * they must consider the fizzledSpark(spark) case
+ * and update the per-cap spark statistics.
+ */
debugTrace(DEBUG_sched, "pushing spark %p to capability %d", spark, free_caps[i]->no);
traceEventStealSpark(free_caps[i], t, cap->no);
@@ -1406,6 +1414,11 @@ scheduleDoGC (Capability *cap, Task *task USED_IF_THREADS, rtsBool force_major)
// multi-threaded GC: make sure all the Capabilities donate one
// GC thread each.
waitForGcThreads(cap);
+
+#if defined(THREADED_RTS)
+ // Stable point where we can do a global check on our spark counters
+ ASSERT(checkSparkCountInvariant());
+#endif
}
#endif
@@ -1436,6 +1449,8 @@ delete_threads_and_gc:
#endif
traceEventGcEnd(cap);
+ traceSparkCounters(cap);
+
if (recent_activity == ACTIVITY_INACTIVE && force_major)
{
// We are doing a GC because the system has been idle for a
@@ -1453,6 +1468,11 @@ delete_threads_and_gc:
recent_activity = ACTIVITY_YES;
}
+#if defined(THREADED_RTS)
+ // Stable point where we can do a global check on our spark counters
+ ASSERT(checkSparkCountInvariant());
+#endif
+
if (heap_census) {
debugTrace(DEBUG_sched, "performing heap census");
heapCensus();
@@ -1992,10 +2012,6 @@ initScheduler(void)
initTaskManager();
-#if defined(THREADED_RTS)
- initSparkPools();
-#endif
-
RELEASE_LOCK(&sched_mutex);
#if defined(THREADED_RTS)
diff --git a/rts/Sparks.c b/rts/Sparks.c
index a826190941..4241656795 100644
--- a/rts/Sparks.c
+++ b/rts/Sparks.c
@@ -17,14 +17,10 @@
#if defined(THREADED_RTS)
-void
-initSparkPools( void )
+SparkPool *
+allocSparkPool( void )
{
- /* walk over the capabilities, allocating a spark pool for each one */
- nat i;
- for (i = 0; i < n_capabilities; i++) {
- capabilities[i].sparks = newWSDeque(RtsFlags.ParFlags.maxLocalSparks);
- }
+ return newWSDeque(RtsFlags.ParFlags.maxLocalSparks);
}
void
@@ -63,48 +59,23 @@ newSpark (StgRegTable *reg, StgClosure *p)
Capability *cap = regTableToCapability(reg);
SparkPool *pool = cap->sparks;
- /* I am not sure whether this is the right thing to do.
- * Maybe it is better to exploit the tag information
- * instead of throwing it away?
- */
- p = UNTAG_CLOSURE(p);
-
- if (closure_SHOULD_SPARK(p)) {
- pushWSDeque(pool,p);
- cap->sparks_created++;
+ if (!fizzledSpark(p)) {
+ if (pushWSDeque(pool,p)) {
+ cap->spark_stats.created++;
+ traceEventSparkCreate(cap);
+ } else {
+ /* overflowing the spark pool */
+ cap->spark_stats.overflowed++;
+ traceEventSparkOverflow(cap);
+ }
} else {
- cap->sparks_dud++;
+ cap->spark_stats.dud++;
+ traceEventSparkDud(cap);
}
return 1;
}
-/* -----------------------------------------------------------------------------
- *
- * tryStealSpark: try to steal a spark from a Capability.
- *
- * Returns a valid spark, or NULL if the pool was empty, and can
- * occasionally return NULL if there was a race with another thread
- * stealing from the same pool. In this case, try again later.
- *
- -------------------------------------------------------------------------- */
-
-StgClosure *
-tryStealSpark (Capability *cap)
-{
- SparkPool *pool = cap->sparks;
- StgClosure *stolen;
-
- do {
- stolen = stealWSDeque_(pool);
- // use the no-loopy version, stealWSDeque_(), since if we get a
- // spurious NULL here the caller may want to try stealing from
- // other pools before trying again.
- } while (stolen != NULL && !closure_SHOULD_SPARK(stolen));
-
- return stolen;
-}
-
/* --------------------------------------------------------------------------
* Remove all sparks from the spark queues which should not spark any
* more. Called after GC. We assume exclusive access to the structure
@@ -205,7 +176,8 @@ pruneSparkQueue (Capability *cap)
// evaluated, but it doesn't hurt to have this check for
// robustness.
pruned_sparks++;
- cap->sparks_fizzled++;
+ cap->spark_stats.fizzled++;
+ traceEventSparkFizzle(cap);
} else {
info = spark->header.info;
if (IS_FORWARDING_PTR(info)) {
@@ -217,7 +189,8 @@ pruneSparkQueue (Capability *cap)
n++;
} else {
pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
+ cap->spark_stats.fizzled++;
+ traceEventSparkFizzle(cap);
}
} else if (HEAP_ALLOCED(spark)) {
if ((Bdescr((P_)spark)->flags & BF_EVACUATED)) {
@@ -227,11 +200,13 @@ pruneSparkQueue (Capability *cap)
n++;
} else {
pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
+ cap->spark_stats.fizzled++;
+ traceEventSparkFizzle(cap);
}
} else {
pruned_sparks++; // discard spark
- cap->sparks_gcd++;
+ cap->spark_stats.gcd++;
+ traceEventSparkGC(cap);
}
} else {
if (INFO_PTR_TO_STRUCT(info)->type == THUNK_STATIC) {
@@ -241,11 +216,13 @@ pruneSparkQueue (Capability *cap)
n++;
} else {
pruned_sparks++; // discard spark
- cap->sparks_gcd++;
+ cap->spark_stats.gcd++;
+ traceEventSparkGC(cap);
}
} else {
pruned_sparks++; // discard spark
- cap->sparks_fizzled++;
+ cap->spark_stats.fizzled++;
+ traceEventSparkFizzle(cap);
}
}
}
diff --git a/rts/Sparks.h b/rts/Sparks.h
index cffe99dd39..e381dd540f 100644
--- a/rts/Sparks.h
+++ b/rts/Sparks.h
@@ -15,12 +15,22 @@
/* typedef for SparkPool in RtsTypes.h */
+/* Stats on spark creation/conversion */
+typedef struct {
+ StgWord created;
+ StgWord dud;
+ StgWord overflowed;
+ StgWord converted;
+ StgWord gcd;
+ StgWord fizzled;
+} SparkCounters;
+
#if defined(THREADED_RTS)
typedef WSDeque SparkPool;
// Initialisation
-void initSparkPools (void);
+SparkPool *allocSparkPool (void);
// Take a spark from the "write" end of the pool. Can be called
// by the pool owner only.
@@ -30,7 +40,9 @@ INLINE_HEADER StgClosure* reclaimSpark(SparkPool *pool);
// if the pool is almost empty).
INLINE_HEADER rtsBool looksEmpty(SparkPool* deque);
-StgClosure * tryStealSpark (Capability *cap);
+INLINE_HEADER StgClosure * tryStealSpark (SparkPool *pool);
+INLINE_HEADER rtsBool fizzledSpark (StgClosure *);
+
void freeSparkPool (SparkPool *pool);
void createSparkThread (Capability *cap);
void traverseSparkQueue(evac_fn evac, void *user, Capability *cap);
@@ -63,6 +75,32 @@ INLINE_HEADER void discardSparks (SparkPool *pool)
discardElements(pool);
}
+/* ----------------------------------------------------------------------------
+ *
+ * tryStealSpark: try to steal a spark from a Capability.
+ *
+ * Returns either:
+ * (a) a useful spark;
+ * (b) a fizzled spark (use fizzledSpark to check);
+ * (c) or NULL if the pool was empty, and can occasionally return NULL
+ * if there was a race with another thread stealing from the same
+ * pool. In this case, try again later.
+ *
+ -------------------------------------------------------------------------- */
+
+INLINE_HEADER StgClosure * tryStealSpark (SparkPool *pool)
+{
+ return stealWSDeque_(pool);
+ // use the no-loopy version, stealWSDeque_(), since if we get a
+ // spurious NULL here the caller may want to try stealing from
+ // other pools before trying again.
+}
+
+INLINE_HEADER rtsBool fizzledSpark (StgClosure *spark)
+{
+ return (GET_CLOSURE_TAG(spark) != 0 || !closure_SHOULD_SPARK(spark));
+}
+
#endif // THREADED_RTS
#include "EndPrivate.h"
diff --git a/rts/Stats.c b/rts/Stats.c
index 9fc702a2a3..7c02b5a7d9 100644
--- a/rts/Stats.c
+++ b/rts/Stats.c
@@ -629,21 +629,20 @@ stat_exit(int alloc)
{
nat i;
- lnat sparks_created = 0;
- lnat sparks_dud = 0;
- lnat sparks_converted = 0;
- lnat sparks_gcd = 0;
- lnat sparks_fizzled = 0;
+ SparkCounters sparks = { 0, 0, 0, 0, 0, 0};
for (i = 0; i < n_capabilities; i++) {
- sparks_created += capabilities[i].sparks_created;
- sparks_dud += capabilities[i].sparks_dud;
- sparks_converted += capabilities[i].sparks_converted;
- sparks_gcd += capabilities[i].sparks_gcd;
- sparks_fizzled += capabilities[i].sparks_fizzled;
+ sparks.created += capabilities[i].spark_stats.created;
+ sparks.dud += capabilities[i].spark_stats.dud;
+ sparks.overflowed+= capabilities[i].spark_stats.overflowed;
+ sparks.converted += capabilities[i].spark_stats.converted;
+ sparks.gcd += capabilities[i].spark_stats.gcd;
+ sparks.fizzled += capabilities[i].spark_stats.fizzled;
}
- statsPrintf(" SPARKS: %ld (%ld converted, %ld dud, %ld GC'd, %ld fizzled)\n\n",
- sparks_created + sparks_dud, sparks_converted, sparks_dud, sparks_gcd, sparks_fizzled);
+ statsPrintf(" SPARKS: %ld (%ld converted, %ld overflowed, %ld dud, %ld GC'd, %ld fizzled)\n\n",
+ sparks.created + sparks.dud + sparks.overflowed,
+ sparks.converted, sparks.overflowed, sparks.dud,
+ sparks.gcd, sparks.fizzled);
}
#endif
diff --git a/rts/StgCRun.c b/rts/StgCRun.c
index 54ac04151c..69d9549f6e 100644
--- a/rts/StgCRun.c
+++ b/rts/StgCRun.c
@@ -192,6 +192,18 @@ StgRunIsImplementedInAssembler(void)
);
}
+#if defined(mingw32_HOST_OS)
+// On windows the stack has to be allocated 4k at a time, otherwise
+// we get a segfault. The C compiler knows how to do this (it calls
+// _alloca()), so we make sure that we can allocate as much stack as
+// we need:
+StgWord8 *win32AllocStack(void)
+{
+ StgWord8 stack[RESERVED_C_STACK_BYTES + 16 + 12];
+ return stack;
+}
+#endif
+
#endif
/* ----------------------------------------------------------------------------
diff --git a/rts/StgRun.h b/rts/StgRun.h
index f277097df7..71b92e2d88 100644
--- a/rts/StgRun.h
+++ b/rts/StgRun.h
@@ -11,4 +11,8 @@
RTS_PRIVATE StgRegTable * StgRun (StgFunPtr f, StgRegTable *basereg);
+#if defined(mingw32_HOST_OS)
+StgWord8 *win32AllocStack(void);
+#endif
+
#endif /* STGRUN_H */
diff --git a/rts/Trace.c b/rts/Trace.c
index 70f4a39742..1dce968490 100644
--- a/rts/Trace.c
+++ b/rts/Trace.c
@@ -47,6 +47,9 @@ int DEBUG_sparks;
// events
int TRACE_sched;
+int TRACE_gc;
+int TRACE_spark_sampled;
+int TRACE_spark_full;
#ifdef THREADED_RTS
static Mutex trace_utx;
@@ -90,8 +93,25 @@ void initTracing (void)
RtsFlags.TraceFlags.scheduler ||
RtsFlags.DebugFlags.scheduler;
+ // -Dg turns on gc tracing too
+ TRACE_gc =
+ RtsFlags.TraceFlags.gc ||
+ RtsFlags.DebugFlags.gc;
+
+ TRACE_spark_sampled =
+ RtsFlags.TraceFlags.sparks_sampled;
+
+ // -Dr turns on full spark tracing
+ TRACE_spark_full =
+ RtsFlags.TraceFlags.sparks_full ||
+ RtsFlags.DebugFlags.sparks;
+
eventlog_enabled = RtsFlags.TraceFlags.tracing == TRACE_EVENTLOG;
+ /* Note: we can have TRACE_sched or TRACE_spark turned on even when
+ eventlog_enabled is off. In the DEBUG way we may be tracing to stderr.
+ */
+
if (eventlog_enabled) {
initEventLogging();
}
@@ -179,22 +199,10 @@ static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
debugBelch("cap %d: thread %lu appended to run queue\n",
cap->no, (lnat)tso->id);
break;
- case EVENT_RUN_SPARK: // (cap, thread)
- debugBelch("cap %d: thread %lu running a spark\n",
- cap->no, (lnat)tso->id);
- break;
- case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
- debugBelch("cap %d: creating spark thread %lu\n",
- cap->no, (long)info1);
- break;
case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
debugBelch("cap %d: thread %lu migrating to cap %d\n",
cap->no, (lnat)tso->id, (int)info1);
break;
- case EVENT_STEAL_SPARK: // (cap, thread, victim_cap)
- debugBelch("cap %d: thread %lu stealing a spark from cap %d\n",
- cap->no, (lnat)tso->id, (int)info1);
- break;
case EVENT_THREAD_WAKEUP: // (cap, thread, info1_cap)
debugBelch("cap %d: waking up thread %lu on cap %d\n",
cap->no, (lnat)tso->id, (int)info1);
@@ -212,27 +220,6 @@ static void traceSchedEvent_stderr (Capability *cap, EventTypeNum tag,
case EVENT_SHUTDOWN: // (cap)
debugBelch("cap %d: shutting down\n", cap->no);
break;
- case EVENT_REQUEST_SEQ_GC: // (cap)
- debugBelch("cap %d: requesting sequential GC\n", cap->no);
- break;
- case EVENT_REQUEST_PAR_GC: // (cap)
- debugBelch("cap %d: requesting parallel GC\n", cap->no);
- break;
- case EVENT_GC_START: // (cap)
- debugBelch("cap %d: starting GC\n", cap->no);
- break;
- case EVENT_GC_END: // (cap)
- debugBelch("cap %d: finished GC\n", cap->no);
- break;
- case EVENT_GC_IDLE: // (cap)
- debugBelch("cap %d: GC idle\n", cap->no);
- break;
- case EVENT_GC_WORK: // (cap)
- debugBelch("cap %d: GC working\n", cap->no);
- break;
- case EVENT_GC_DONE: // (cap)
- debugBelch("cap %d: GC done\n", cap->no);
- break;
default:
debugBelch("cap %d: thread %lu: event %d\n\n",
cap->no, (lnat)tso->id, tag);
@@ -256,6 +243,56 @@ void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
}
}
+#ifdef DEBUG
+static void traceGcEvent_stderr (Capability *cap, EventTypeNum tag)
+{
+ ACQUIRE_LOCK(&trace_utx);
+
+ tracePreface();
+ switch (tag) {
+ case EVENT_REQUEST_SEQ_GC: // (cap)
+ debugBelch("cap %d: requesting sequential GC\n", cap->no);
+ break;
+ case EVENT_REQUEST_PAR_GC: // (cap)
+ debugBelch("cap %d: requesting parallel GC\n", cap->no);
+ break;
+ case EVENT_GC_START: // (cap)
+ debugBelch("cap %d: starting GC\n", cap->no);
+ break;
+ case EVENT_GC_END: // (cap)
+ debugBelch("cap %d: finished GC\n", cap->no);
+ break;
+ case EVENT_GC_IDLE: // (cap)
+ debugBelch("cap %d: GC idle\n", cap->no);
+ break;
+ case EVENT_GC_WORK: // (cap)
+ debugBelch("cap %d: GC working\n", cap->no);
+ break;
+ case EVENT_GC_DONE: // (cap)
+ debugBelch("cap %d: GC done\n", cap->no);
+ break;
+ default:
+ barf("traceGcEvent: unknown event tag %d", tag);
+ break;
+ }
+
+ RELEASE_LOCK(&trace_utx);
+}
+#endif
+
+void traceGcEvent_ (Capability *cap, EventTypeNum tag)
+{
+#ifdef DEBUG
+ if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
+ traceGcEvent_stderr(cap, tag);
+ } else
+#endif
+ {
+ /* currently all GC events are nullary events */
+ postEvent(cap, tag);
+ }
+}
+
void traceCapsetModify_ (EventTypeNum tag,
CapsetID capset,
StgWord32 other)
@@ -335,15 +372,80 @@ void traceOSProcessInfo_(void) {
}
}
-void traceEvent_ (Capability *cap, EventTypeNum tag)
+#ifdef DEBUG
+static void traceSparkEvent_stderr (Capability *cap, EventTypeNum tag,
+ StgWord info1)
+{
+ ACQUIRE_LOCK(&trace_utx);
+
+ tracePreface();
+ switch (tag) {
+
+ case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
+ debugBelch("cap %d: creating spark thread %lu\n",
+ cap->no, (long)info1);
+ break;
+ case EVENT_SPARK_CREATE: // (cap)
+ debugBelch("cap %d: added spark to pool\n",
+ cap->no);
+ break;
+ case EVENT_SPARK_DUD: // (cap)
+ debugBelch("cap %d: discarded dud spark\n",
+ cap->no);
+ break;
+ case EVENT_SPARK_OVERFLOW: // (cap)
+ debugBelch("cap %d: discarded overflowed spark\n",
+ cap->no);
+ break;
+ case EVENT_SPARK_RUN: // (cap)
+ debugBelch("cap %d: running a spark\n",
+ cap->no);
+ break;
+ case EVENT_SPARK_STEAL: // (cap, victim_cap)
+ debugBelch("cap %d: stealing a spark from cap %d\n",
+ cap->no, (int)info1);
+ break;
+ case EVENT_SPARK_FIZZLE: // (cap)
+ debugBelch("cap %d: fizzled spark removed from pool\n",
+ cap->no);
+ break;
+ case EVENT_SPARK_GC: // (cap)
+ debugBelch("cap %d: GCd spark removed from pool\n",
+ cap->no);
+ break;
+ default:
+ barf("traceSparkEvent: unknown event tag %d", tag);
+ break;
+ }
+
+ RELEASE_LOCK(&trace_utx);
+}
+#endif
+
+void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1)
+{
+#ifdef DEBUG
+ if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
+ traceSparkEvent_stderr(cap, tag, info1);
+ } else
+#endif
+ {
+ postSparkEvent(cap,tag,info1);
+ }
+}
+
+void traceSparkCounters_ (Capability *cap,
+ SparkCounters counters,
+ StgWord remaining)
{
#ifdef DEBUG
if (RtsFlags.TraceFlags.tracing == TRACE_STDERR) {
- traceSchedEvent_stderr(cap, tag, 0, 0, 0);
+ /* we currently don't do debug tracing of spark stats but we must
+ test for TRACE_STDERR because of the !eventlog_enabled case. */
} else
#endif
{
- postEvent(cap,tag);
+ postSparkCountersEvent(cap, counters, remaining);
}
}
diff --git a/rts/Trace.h b/rts/Trace.h
index dd396904e7..40a4522a26 100644
--- a/rts/Trace.h
+++ b/rts/Trace.h
@@ -62,6 +62,9 @@ extern int DEBUG_sparks;
// events
extern int TRACE_sched;
+extern int TRACE_gc;
+extern int TRACE_spark_sampled;
+extern int TRACE_spark_full;
// -----------------------------------------------------------------------------
// Posting events
@@ -96,16 +99,30 @@ void traceEnd (void);
void traceSchedEvent_ (Capability *cap, EventTypeNum tag,
StgTSO *tso, StgWord info1, StgWord info2);
+/*
+ * Record a GC event
+ */
+#define traceGcEvent(cap, tag) \
+ if (RTS_UNLIKELY(TRACE_gc)) { \
+ traceGcEvent_(cap, tag); \
+ }
-/*
- * Record a nullary event
+void traceGcEvent_ (Capability *cap, EventTypeNum tag);
+
+/*
+ * Record a spark event
*/
-#define traceEvent(cap, tag) \
- if (RTS_UNLIKELY(TRACE_sched)) { \
- traceEvent_(cap, tag); \
+#define traceSparkEvent(cap, tag) \
+ if (RTS_UNLIKELY(TRACE_spark_full)) { \
+ traceSparkEvent_(cap, tag, 0); \
+ }
+
+#define traceSparkEvent2(cap, tag, other) \
+ if (RTS_UNLIKELY(TRACE_spark_full)) { \
+ traceSparkEvent_(cap, tag, other); \
}
-void traceEvent_ (Capability *cap, EventTypeNum tag);
+void traceSparkEvent_ (Capability *cap, EventTypeNum tag, StgWord info1);
// variadic macros are C99, and supported by gcc. However, the
// ##__VA_ARGS syntax is a gcc extension, which allows the variable
@@ -184,11 +201,17 @@ void traceCapsetModify_ (EventTypeNum tag,
void traceOSProcessInfo_ (void);
+void traceSparkCounters_ (Capability *cap,
+ SparkCounters counters,
+ StgWord remaining);
+
#else /* !TRACING */
#define traceSchedEvent(cap, tag, tso, other) /* nothing */
#define traceSchedEvent2(cap, tag, tso, other, info) /* nothing */
-#define traceEvent(cap, tag) /* nothing */
+#define traceGcEvent(cap, tag) /* nothing */
+#define traceSparkEvent(cap, tag) /* nothing */
+#define traceSparkEvent2(cap, tag, other) /* nothing */
#define traceCap(class, cap, msg, ...) /* nothing */
#define trace(class, msg, ...) /* nothing */
#define debugTrace(class, str, ...) /* nothing */
@@ -197,6 +220,7 @@ void traceOSProcessInfo_ (void);
INLINE_HEADER void traceEventStartup_ (int n_caps STG_UNUSED) {};
#define traceCapsetModify_(tag, capset, other) /* nothing */
#define traceOSProcessInfo_() /* nothing */
+#define traceSparkCounters_(cap, counters, remaining) /* nothing */
#endif /* TRACING */
@@ -225,10 +249,6 @@ void dtraceUserMsgWrapper(Capability *cap, char *msg);
HASKELLEVENT_THREAD_RUNNABLE(cap, tid)
#define dtraceMigrateThread(cap, tid, new_cap) \
HASKELLEVENT_MIGRATE_THREAD(cap, tid, new_cap)
-#define dtraceRunSpark(cap, tid) \
- HASKELLEVENT_RUN_SPARK(cap, tid)
-#define dtraceStealSpark(cap, tid, victim_cap) \
- HASKELLEVENT_STEAL_SPARK(cap, tid, victim_cap)
#define dtraceShutdown(cap) \
HASKELLEVENT_SHUTDOWN(cap)
#define dtraceThreadWakeup(cap, tid, other_cap) \
@@ -262,6 +282,22 @@ INLINE_HEADER void dtraceStartup (int num_caps) {
HASKELLEVENT_CAPSET_ASSIGN_CAP(capset, capno)
#define dtraceCapsetRemoveCap(capset, capno) \
HASKELLEVENT_CAPSET_REMOVE_CAP(capset, capno)
+#define dtraceSparkCounters(cap, a, b, c, d, e, f, g) \
+ HASKELLEVENT_SPARK_COUNTERS(cap, a, b, c, d, e, f, g)
+#define dtraceSparkCreate(cap) \
+ HASKELLEVENT_SPARK_CREATE(cap)
+#define dtraceSparkDud(cap) \
+ HASKELLEVENT_SPARK_DUD(cap)
+#define dtraceSparkOverflow(cap) \
+ HASKELLEVENT_SPARK_OVERFLOW(cap)
+#define dtraceSparkRun(cap) \
+ HASKELLEVENT_SPARK_RUN(cap)
+#define dtraceSparkSteal(cap, victim_cap) \
+ HASKELLEVENT_SPARK_STEAL(cap, victim_cap)
+#define dtraceSparkFizzle(cap) \
+ HASKELLEVENT_SPARK_FIZZLE(cap)
+#define dtraceSparkGc(cap) \
+ HASKELLEVENT_SPARK_GC(cap)
#else /* !defined(DTRACE) */
@@ -270,8 +306,6 @@ INLINE_HEADER void dtraceStartup (int num_caps) {
#define dtraceStopThread(cap, tid, status, info) /* nothing */
#define dtraceThreadRunnable(cap, tid) /* nothing */
#define dtraceMigrateThread(cap, tid, new_cap) /* nothing */
-#define dtraceRunSpark(cap, tid) /* nothing */
-#define dtraceStealSpark(cap, tid, victim_cap) /* nothing */
#define dtraceShutdown(cap) /* nothing */
#define dtraceThreadWakeup(cap, tid, other_cap) /* nothing */
#define dtraceGcStart(cap) /* nothing */
@@ -288,6 +322,14 @@ INLINE_HEADER void dtraceStartup (int num_caps STG_UNUSED) {};
#define dtraceCapsetDelete(capset) /* nothing */
#define dtraceCapsetAssignCap(capset, capno) /* nothing */
#define dtraceCapsetRemoveCap(capset, capno) /* nothing */
+#define dtraceSparkCounters(cap, a, b, c, d, e, f, g) /* nothing */
+#define dtraceSparkCreate(cap) /* nothing */
+#define dtraceSparkDud(cap) /* nothing */
+#define dtraceSparkOverflow(cap) /* nothing */
+#define dtraceSparkRun(cap) /* nothing */
+#define dtraceSparkSteal(cap, victim_cap) /* nothing */
+#define dtraceSparkFizzle(cap) /* nothing */
+#define dtraceSparkGc(cap) /* nothing */
#endif
@@ -352,22 +394,6 @@ INLINE_HEADER void traceEventMigrateThread(Capability *cap STG_UNUSED,
(EventCapNo)new_cap);
}
-INLINE_HEADER void traceEventRunSpark(Capability *cap STG_UNUSED,
- StgTSO *tso STG_UNUSED)
-{
- traceSchedEvent(cap, EVENT_RUN_SPARK, tso, 0);
- dtraceRunSpark((EventCapNo)cap->no, (EventThreadID)tso->id);
-}
-
-INLINE_HEADER void traceEventStealSpark(Capability *cap STG_UNUSED,
- StgTSO *tso STG_UNUSED,
- nat victim_cap STG_UNUSED)
-{
- traceSchedEvent(cap, EVENT_STEAL_SPARK, tso, victim_cap);
- dtraceStealSpark((EventCapNo)cap->no, (EventThreadID)tso->id,
- (EventCapNo)victim_cap);
-}
-
INLINE_HEADER void traceEventShutdown(Capability *cap STG_UNUSED)
{
traceSchedEvent(cap, EVENT_SHUTDOWN, 0, 0);
@@ -385,33 +411,44 @@ INLINE_HEADER void traceEventThreadWakeup(Capability *cap STG_UNUSED,
INLINE_HEADER void traceEventGcStart(Capability *cap STG_UNUSED)
{
- traceSchedEvent(cap, EVENT_GC_START, 0, 0);
+ traceGcEvent(cap, EVENT_GC_START);
dtraceGcStart((EventCapNo)cap->no);
}
INLINE_HEADER void traceEventGcEnd(Capability *cap STG_UNUSED)
{
- traceSchedEvent(cap, EVENT_GC_END, 0, 0);
+ traceGcEvent(cap, EVENT_GC_END);
dtraceGcEnd((EventCapNo)cap->no);
}
INLINE_HEADER void traceEventRequestSeqGc(Capability *cap STG_UNUSED)
{
- traceSchedEvent(cap, EVENT_REQUEST_SEQ_GC, 0, 0);
+ traceGcEvent(cap, EVENT_REQUEST_SEQ_GC);
dtraceRequestSeqGc((EventCapNo)cap->no);
}
INLINE_HEADER void traceEventRequestParGc(Capability *cap STG_UNUSED)
{
- traceSchedEvent(cap, EVENT_REQUEST_PAR_GC, 0, 0);
+ traceGcEvent(cap, EVENT_REQUEST_PAR_GC);
dtraceRequestParGc((EventCapNo)cap->no);
}
-INLINE_HEADER void traceEventCreateSparkThread(Capability *cap STG_UNUSED,
- StgThreadID spark_tid STG_UNUSED)
+INLINE_HEADER void traceEventGcIdle(Capability *cap STG_UNUSED)
{
- traceSchedEvent(cap, EVENT_CREATE_SPARK_THREAD, 0, spark_tid);
- dtraceCreateSparkThread((EventCapNo)cap->no, (EventThreadID)spark_tid);
+ traceGcEvent(cap, EVENT_GC_IDLE);
+ dtraceGcIdle((EventCapNo)cap->no);
+}
+
+INLINE_HEADER void traceEventGcWork(Capability *cap STG_UNUSED)
+{
+ traceGcEvent(cap, EVENT_GC_WORK);
+ dtraceGcWork((EventCapNo)cap->no);
+}
+
+INLINE_HEADER void traceEventGcDone(Capability *cap STG_UNUSED)
+{
+ traceGcEvent(cap, EVENT_GC_DONE);
+ dtraceGcDone((EventCapNo)cap->no);
}
INLINE_HEADER void traceEventStartup(void)
@@ -428,24 +465,6 @@ INLINE_HEADER void traceEventStartup(void)
dtraceStartup(n_caps);
}
-INLINE_HEADER void traceEventGcIdle(Capability *cap STG_UNUSED)
-{
- traceEvent(cap, EVENT_GC_IDLE);
- dtraceGcIdle((EventCapNo)cap->no);
-}
-
-INLINE_HEADER void traceEventGcWork(Capability *cap STG_UNUSED)
-{
- traceEvent(cap, EVENT_GC_WORK);
- dtraceGcWork((EventCapNo)cap->no);
-}
-
-INLINE_HEADER void traceEventGcDone(Capability *cap STG_UNUSED)
-{
- traceEvent(cap, EVENT_GC_DONE);
- dtraceGcDone((EventCapNo)cap->no);
-}
-
INLINE_HEADER void traceCapsetCreate(CapsetID capset STG_UNUSED,
CapsetType capset_type STG_UNUSED)
{
@@ -480,6 +499,73 @@ INLINE_HEADER void traceOSProcessInfo(void)
* is available to DTrace directly */
}
+INLINE_HEADER void traceEventCreateSparkThread(Capability *cap STG_UNUSED,
+ StgThreadID spark_tid STG_UNUSED)
+{
+ traceSparkEvent2(cap, EVENT_CREATE_SPARK_THREAD, spark_tid);
+ dtraceCreateSparkThread((EventCapNo)cap->no, (EventThreadID)spark_tid);
+}
+
+INLINE_HEADER void traceSparkCounters(Capability *cap STG_UNUSED)
+{
+#ifdef THREADED_RTS
+ if (RTS_UNLIKELY(TRACE_spark_sampled)) {
+ traceSparkCounters_(cap, cap->spark_stats, sparkPoolSize(cap->sparks));
+ }
+#endif
+ dtraceSparkCounters((EventCapNo)cap->no,
+ cap->spark_stats.created,
+ cap->spark_stats.dud,
+ cap->spark_stats.overflowed,
+ cap->spark_stats.converted,
+ cap->spark_stats.gcd,
+ cap->spark_stats.fizzled,
+ sparkPoolSize(cap->sparks));
+}
+
+INLINE_HEADER void traceEventSparkCreate(Capability *cap STG_UNUSED)
+{
+ traceSparkEvent(cap, EVENT_SPARK_CREATE);
+ dtraceSparkCreate((EventCapNo)cap->no);
+}
+
+INLINE_HEADER void traceEventSparkDud(Capability *cap STG_UNUSED)
+{
+ traceSparkEvent(cap, EVENT_SPARK_DUD);
+ dtraceSparkDud((EventCapNo)cap->no);
+}
+
+INLINE_HEADER void traceEventSparkOverflow(Capability *cap STG_UNUSED)
+{
+ traceSparkEvent(cap, EVENT_SPARK_OVERFLOW);
+ dtraceSparkOverflow((EventCapNo)cap->no);
+}
+
+INLINE_HEADER void traceEventSparkRun(Capability *cap STG_UNUSED)
+{
+ traceSparkEvent(cap, EVENT_SPARK_RUN);
+ dtraceSparkRun((EventCapNo)cap->no);
+}
+
+INLINE_HEADER void traceEventSparkSteal(Capability *cap STG_UNUSED,
+ nat victim_cap STG_UNUSED)
+{
+ traceSparkEvent2(cap, EVENT_SPARK_STEAL, victim_cap);
+ dtraceSparkSteal((EventCapNo)cap->no, (EventCapNo)victim_cap);
+}
+
+INLINE_HEADER void traceEventSparkFizzle(Capability *cap STG_UNUSED)
+{
+ traceSparkEvent(cap, EVENT_SPARK_FIZZLE);
+ dtraceSparkFizzle((EventCapNo)cap->no);
+}
+
+INLINE_HEADER void traceEventSparkGC(Capability *cap STG_UNUSED)
+{
+ traceSparkEvent(cap, EVENT_SPARK_GC);
+ dtraceSparkGc((EventCapNo)cap->no);
+}
+
#include "EndPrivate.h"
#endif /* TRACE_H */
diff --git a/rts/eventlog/EventLog.c b/rts/eventlog/EventLog.c
index cea313e660..db0f3e4ad5 100644
--- a/rts/eventlog/EventLog.c
+++ b/rts/eventlog/EventLog.c
@@ -60,8 +60,6 @@ char *EventDesc[] = {
[EVENT_STOP_THREAD] = "Stop thread",
[EVENT_THREAD_RUNNABLE] = "Thread runnable",
[EVENT_MIGRATE_THREAD] = "Migrate thread",
- [EVENT_RUN_SPARK] = "Run spark",
- [EVENT_STEAL_SPARK] = "Steal spark",
[EVENT_SHUTDOWN] = "Shutdown",
[EVENT_THREAD_WAKEUP] = "Wakeup thread",
[EVENT_GC_START] = "Starting GC",
@@ -84,7 +82,15 @@ char *EventDesc[] = {
[EVENT_PROGRAM_ARGS] = "Program arguments",
[EVENT_PROGRAM_ENV] = "Program environment variables",
[EVENT_OSPROCESS_PID] = "Process ID",
- [EVENT_OSPROCESS_PPID] = "Parent process ID"
+ [EVENT_OSPROCESS_PPID] = "Parent process ID",
+ [EVENT_SPARK_COUNTERS] = "Spark counters",
+ [EVENT_SPARK_CREATE] = "Spark create",
+ [EVENT_SPARK_DUD] = "Spark dud",
+ [EVENT_SPARK_OVERFLOW] = "Spark overflow",
+ [EVENT_SPARK_RUN] = "Spark run",
+ [EVENT_SPARK_STEAL] = "Spark steal",
+ [EVENT_SPARK_FIZZLE] = "Spark fizzle",
+ [EVENT_SPARK_GC] = "Spark GC",
};
// Event type.
@@ -95,7 +101,7 @@ typedef struct _EventType {
char *desc; // Description
} EventType;
-EventType eventTypes[NUM_EVENT_TAGS];
+EventType eventTypes[NUM_GHC_EVENT_TAGS];
static void initEventsBuf(EventsBuf* eb, StgWord64 size, EventCapNo capno);
static void resetEventsBuf(EventsBuf* eb);
@@ -194,7 +200,7 @@ initEventLogging(void)
+ 10 /* .eventlog */,
"initEventLogging");
- if (sizeof(EventDesc) / sizeof(char*) != NUM_EVENT_TAGS) {
+ if (sizeof(EventDesc) / sizeof(char*) != NUM_GHC_EVENT_TAGS) {
barf("EventDesc array has the wrong number of elements");
}
@@ -244,7 +250,7 @@ initEventLogging(void)
// Mark beginning of event types in the header.
postInt32(&eventBuf, EVENT_HET_BEGIN);
- for (t = 0; t < NUM_EVENT_TAGS; ++t) {
+ for (t = 0; t < NUM_GHC_EVENT_TAGS; ++t) {
eventTypes[t].etNum = t;
eventTypes[t].desc = EventDesc[t];
@@ -253,13 +259,11 @@ initEventLogging(void)
case EVENT_CREATE_THREAD: // (cap, thread)
case EVENT_RUN_THREAD: // (cap, thread)
case EVENT_THREAD_RUNNABLE: // (cap, thread)
- case EVENT_RUN_SPARK: // (cap, thread)
case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
eventTypes[t].size = sizeof(EventThreadID);
break;
case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
- case EVENT_STEAL_SPARK: // (cap, thread, victim_cap)
case EVENT_THREAD_WAKEUP: // (cap, thread, other_cap)
eventTypes[t].size =
sizeof(EventThreadID) + sizeof(EventCapNo);
@@ -295,6 +299,11 @@ initEventLogging(void)
sizeof(EventCapsetID) + sizeof(StgWord32);
break;
+ case EVENT_SPARK_STEAL: // (cap, victim_cap)
+ eventTypes[t].size =
+ sizeof(EventCapNo);
+ break;
+
case EVENT_SHUTDOWN: // (cap)
case EVENT_REQUEST_SEQ_GC: // (cap)
case EVENT_REQUEST_PAR_GC: // (cap)
@@ -303,6 +312,12 @@ initEventLogging(void)
case EVENT_GC_IDLE:
case EVENT_GC_WORK:
case EVENT_GC_DONE:
+ case EVENT_SPARK_CREATE: // (cap)
+ case EVENT_SPARK_DUD: // (cap)
+ case EVENT_SPARK_OVERFLOW: // (cap)
+ case EVENT_SPARK_RUN: // (cap)
+ case EVENT_SPARK_FIZZLE: // (cap)
+ case EVENT_SPARK_GC: // (cap)
eventTypes[t].size = 0;
break;
@@ -314,6 +329,10 @@ initEventLogging(void)
eventTypes[t].size = 0xffff;
break;
+ case EVENT_SPARK_COUNTERS: // (cap, 7*counter)
+ eventTypes[t].size = 7 * sizeof(StgWord64);
+ break;
+
case EVENT_BLOCK_MARKER:
eventTypes[t].size = sizeof(StgWord32) + sizeof(EventTimestamp) +
sizeof(EventCapNo);
@@ -435,7 +454,6 @@ postSchedEvent (Capability *cap,
case EVENT_CREATE_THREAD: // (cap, thread)
case EVENT_RUN_THREAD: // (cap, thread)
case EVENT_THREAD_RUNNABLE: // (cap, thread)
- case EVENT_RUN_SPARK: // (cap, thread)
{
postThreadID(eb,thread);
break;
@@ -448,7 +466,6 @@ postSchedEvent (Capability *cap,
}
case EVENT_MIGRATE_THREAD: // (cap, thread, new_cap)
- case EVENT_STEAL_SPARK: // (cap, thread, victim_cap)
case EVENT_THREAD_WAKEUP: // (cap, thread, other_cap)
{
postThreadID(eb,thread);
@@ -465,19 +482,83 @@ postSchedEvent (Capability *cap,
}
case EVENT_SHUTDOWN: // (cap)
- case EVENT_REQUEST_SEQ_GC: // (cap)
- case EVENT_REQUEST_PAR_GC: // (cap)
- case EVENT_GC_START: // (cap)
- case EVENT_GC_END: // (cap)
{
break;
}
default:
- barf("postEvent: unknown event tag %d", tag);
+ barf("postSchedEvent: unknown event tag %d", tag);
+ }
+}
+
+void
+postSparkEvent (Capability *cap,
+ EventTypeNum tag,
+ StgWord info1)
+{
+ EventsBuf *eb;
+
+ eb = &capEventBuf[cap->no];
+
+ if (!hasRoomForEvent(eb, tag)) {
+ // Flush event buffer to make room for new event.
+ printAndClearEventBuf(eb);
+ }
+
+ postEventHeader(eb, tag);
+
+ switch (tag) {
+ case EVENT_CREATE_SPARK_THREAD: // (cap, spark_thread)
+ {
+ postThreadID(eb,info1 /* spark_thread */);
+ break;
+ }
+
+ case EVENT_SPARK_STEAL: // (cap, victim_cap)
+ {
+ postCapNo(eb,info1 /* victim_cap */);
+ break;
+ }
+
+ case EVENT_SPARK_CREATE: // (cap)
+ case EVENT_SPARK_DUD: // (cap)
+ case EVENT_SPARK_OVERFLOW: // (cap)
+ case EVENT_SPARK_RUN: // (cap)
+ case EVENT_SPARK_FIZZLE: // (cap)
+ case EVENT_SPARK_GC: // (cap)
+ {
+ break;
+ }
+
+ default:
+ barf("postSparkEvent: unknown event tag %d", tag);
}
}
+void
+postSparkCountersEvent (Capability *cap,
+ SparkCounters counters,
+ StgWord remaining)
+{
+ EventsBuf *eb;
+
+ eb = &capEventBuf[cap->no];
+
+ if (!hasRoomForEvent(eb, EVENT_SPARK_COUNTERS)) {
+ // Flush event buffer to make room for new event.
+ printAndClearEventBuf(eb);
+ }
+
+ postEventHeader(eb, EVENT_SPARK_COUNTERS);
+ postWord64(eb,counters.created);
+ postWord64(eb,counters.dud);
+ postWord64(eb,counters.overflowed);
+ postWord64(eb,counters.converted);
+ postWord64(eb,counters.gcd);
+ postWord64(eb,counters.fizzled);
+ postWord64(eb,remaining);
+}
+
void postCapsetModifyEvent (EventTypeNum tag,
EventCapsetID capset,
StgWord32 other)
diff --git a/rts/eventlog/EventLog.h b/rts/eventlog/EventLog.h
index 602ac2c87b..6bb1404e92 100644
--- a/rts/eventlog/EventLog.h
+++ b/rts/eventlog/EventLog.h
@@ -69,6 +69,18 @@ void postCapsetVecEvent (EventTypeNum tag,
int argc,
char *msg[]);
+/*
+ * Post a `par` spark event
+ */
+void postSparkEvent(Capability *cap, EventTypeNum tag, StgWord info1);
+
+/*
+ * Post an event with several counters relating to `par` sparks.
+ */
+void postSparkCountersEvent (Capability *cap,
+ SparkCounters counters,
+ StgWord remaining);
+
#else /* !TRACING */
INLINE_HEADER void postSchedEvent (Capability *cap STG_UNUSED,