summaryrefslogtreecommitdiff
path: root/rts/Schedule.c
diff options
context:
space:
mode:
authorBen Gamari <ben@smart-cactus.org>2017-04-21 09:16:48 -0400
committerBen Gamari <ben@smart-cactus.org>2017-04-28 22:35:04 -0400
commit945c45ad50ed31e3acb96fdaafb21640c4669f12 (patch)
treeae2e59ba8d3a49bbd3c3dcece39d53aef691ed44 /rts/Schedule.c
parente5b3492f23c2296d0d8221e1787ee585331f726e (diff)
downloadhaskell-945c45ad50ed31e3acb96fdaafb21640c4669f12.tar.gz
Prefer #if defined to #ifdef
Our new CPP linter enforces this.
Diffstat (limited to 'rts/Schedule.c')
-rw-r--r--rts/Schedule.c62
1 files changed, 31 insertions, 31 deletions
diff --git a/rts/Schedule.c b/rts/Schedule.c
index 296461b38d..0a1d761b6a 100644
--- a/rts/Schedule.c
+++ b/rts/Schedule.c
@@ -44,10 +44,10 @@
#include "Stable.h"
#include "TopHandler.h"
-#ifdef HAVE_SYS_TYPES_H
+#if defined(HAVE_SYS_TYPES_H)
#include <sys/types.h>
#endif
-#ifdef HAVE_UNISTD_H
+#if defined(HAVE_UNISTD_H)
#include <unistd.h>
#endif
@@ -55,11 +55,11 @@
#include <stdlib.h>
#include <stdarg.h>
-#ifdef HAVE_ERRNO_H
+#if defined(HAVE_ERRNO_H)
#include <errno.h>
#endif
-#ifdef TRACING
+#if defined(TRACING)
#include "eventlog/EventLog.h"
#endif
/* -----------------------------------------------------------------------------
@@ -154,7 +154,7 @@ static void scheduleDoGC(Capability **pcap, Task *task, bool force_major);
static void deleteThread (Capability *cap, StgTSO *tso);
static void deleteAllThreads (Capability *cap);
-#ifdef FORKPROCESS_PRIMOP_SUPPORTED
+#if defined(FORKPROCESS_PRIMOP_SUPPORTED)
static void deleteThread_(Capability *cap, StgTSO *tso);
#endif
@@ -375,7 +375,7 @@ schedule (Capability *initialCapability, Task *task)
// them back if it rises again. Presumably we should, but after
// the thread has been migrated we no longer know what capability
// it was originally on.
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
if (cap->disabled && !t->bound) {
Capability *dest_cap = capabilities[cap->no % enabled_capabilities];
migrateThread(cap, t, dest_cap);
@@ -411,7 +411,7 @@ run_thread:
prev_what_next = t->what_next;
errno = t->saved_errno;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
SetLastError(t->saved_winerror);
#endif
@@ -432,7 +432,7 @@ run_thread:
uint32_t prev;
prev = xchg((P_)&recent_activity, ACTIVITY_YES);
if (prev == ACTIVITY_DONE_GC) {
-#ifndef PROFILING
+#if !defined(PROFILING)
startTimer();
#endif
}
@@ -490,7 +490,7 @@ run_thread:
// XXX: possibly bogus for SMP because this thread might already
// be running again, see code below.
t->saved_errno = errno;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
// Similarly for Windows error code
t->saved_winerror = GetLastError();
#endif
@@ -1267,7 +1267,7 @@ scheduleHandleThreadBlocked( StgTSO *t
// threadPaused() might have raised a blocked throwTo
// exception, see maybePerformBlockedException().
-#ifdef DEBUG
+#if defined(DEBUG)
traceThreadStatus(DEBUG_sched, t);
#endif
}
@@ -1341,7 +1341,7 @@ scheduleHandleThreadFinished (Capability *cap STG_UNUSED, Task *task, StgTSO *t)
task->incall->rstat = Killed;
}
}
-#ifdef DEBUG
+#if defined(DEBUG)
removeThreadLabel((StgWord)task->incall->tso->id);
#endif
@@ -1471,7 +1471,7 @@ static bool requestSync (
* ensue if another thread is trying to synchronise.
* -------------------------------------------------------------------------- */
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
static void acquireAllCapabilities(Capability *cap, Task *task)
{
Capability *tmpcap;
@@ -1506,7 +1506,7 @@ static void acquireAllCapabilities(Capability *cap, Task *task)
* the one passed in as cap.
* -------------------------------------------------------------------------- */
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
static void releaseAllCapabilities(uint32_t n, Capability *cap, Task *task)
{
uint32_t i;
@@ -1533,7 +1533,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
bool heap_census;
uint32_t collect_gen;
bool major_gc;
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
uint32_t gc_type;
uint32_t i;
uint32_t need_idle;
@@ -1560,7 +1560,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
collect_gen = calcNeeded(force_major || heap_census, NULL);
major_gc = (collect_gen == RtsFlags.GcFlags.generations-1);
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
if (sched_state < SCHED_INTERRUPTING
&& RtsFlags.ParFlags.parGcEnabled
&& collect_gen >= RtsFlags.ParFlags.parGcGen
@@ -1673,7 +1673,7 @@ scheduleDoGC (Capability **pcap, Task *task USED_IF_THREADS,
stat_startGCSync(gc_threads[cap->no]);
-#ifdef DEBUG
+#if defined(DEBUG)
unsigned int old_n_capabilities = n_capabilities;
#endif
@@ -1833,7 +1833,7 @@ delete_threads_and_gc:
// fact that we've done a GC and turn off the timer signal;
// it will get re-enabled if we run any threads after the GC.
recent_activity = ACTIVITY_DONE_GC;
-#ifndef PROFILING
+#if !defined(PROFILING)
stopTimer();
#endif
break;
@@ -1934,7 +1934,7 @@ delete_threads_and_gc:
throwToSelf(cap, main_thread, heapOverflow_closure);
}
}
-#ifdef SPARKBALANCE
+#if defined(SPARKBALANCE)
/* JB
Once we are all together... this would be the place to balance all
spark pools. No concurrent stealing or adding of new sparks can
@@ -1960,12 +1960,12 @@ delete_threads_and_gc:
pid_t
forkProcess(HsStablePtr *entry
-#ifndef FORKPROCESS_PRIMOP_SUPPORTED
+#if !defined(FORKPROCESS_PRIMOP_SUPPORTED)
STG_UNUSED
#endif
)
{
-#ifdef FORKPROCESS_PRIMOP_SUPPORTED
+#if defined(FORKPROCESS_PRIMOP_SUPPORTED)
pid_t pid;
StgTSO* t,*next;
Capability *cap;
@@ -1980,7 +1980,7 @@ forkProcess(HsStablePtr *entry
cap = NULL;
waitForCapability(&cap, task);
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
stopAllCapabilities(&cap, task);
#endif
@@ -1997,7 +1997,7 @@ forkProcess(HsStablePtr *entry
ACQUIRE_LOCK(&capabilities[i]->lock);
}
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
ACQUIRE_LOCK(&all_tasks_mutex);
#endif
@@ -2023,7 +2023,7 @@ forkProcess(HsStablePtr *entry
RELEASE_LOCK(&capabilities[i]->lock);
}
-#ifdef THREADED_RTS
+#if defined(THREADED_RTS)
RELEASE_LOCK(&all_tasks_mutex);
#endif
@@ -2047,7 +2047,7 @@ forkProcess(HsStablePtr *entry
initMutex(&all_tasks_mutex);
#endif
-#ifdef TRACING
+#if defined(TRACING)
resetTracing();
#endif
@@ -2374,12 +2374,12 @@ suspendThread (StgRegTable *reg, bool interruptible)
int saved_errno;
StgTSO *tso;
Task *task;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
StgWord32 saved_winerror;
#endif
saved_errno = errno;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
saved_winerror = GetLastError();
#endif
@@ -2419,7 +2419,7 @@ suspendThread (StgRegTable *reg, bool interruptible)
RELEASE_LOCK(&cap->lock);
errno = saved_errno;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
SetLastError(saved_winerror);
#endif
return task;
@@ -2433,12 +2433,12 @@ resumeThread (void *task_)
Capability *cap;
Task *task = task_;
int saved_errno;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
StgWord32 saved_winerror;
#endif
saved_errno = errno;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
saved_winerror = GetLastError();
#endif
@@ -2475,7 +2475,7 @@ resumeThread (void *task_)
cap->r.rCurrentTSO = tso;
cap->in_haskell = true;
errno = saved_errno;
-#ifdef mingw32_HOST_OS
+#if defined(mingw32_HOST_OS)
SetLastError(saved_winerror);
#endif
@@ -2816,7 +2816,7 @@ deleteThread (Capability *cap STG_UNUSED, StgTSO *tso)
}
}
-#ifdef FORKPROCESS_PRIMOP_SUPPORTED
+#if defined(FORKPROCESS_PRIMOP_SUPPORTED)
static void
deleteThread_(Capability *cap, StgTSO *tso)
{ // for forkProcess only: