summaryrefslogtreecommitdiff
path: root/ghc/rts/StgMiscClosures.cmm
diff options
context:
space:
mode:
authorsimonmar <unknown>2004-08-13 13:11:23 +0000
committersimonmar <unknown>2004-08-13 13:11:23 +0000
commit423d477bfecd490de1449c59325c8776f91d7aac (patch)
tree2fe481e38a21be66b17539de24a4fe56daf80642 /ghc/rts/StgMiscClosures.cmm
parent553e90d9a32ee1b1809430f260c401cc4169c6c7 (diff)
downloadhaskell-423d477bfecd490de1449c59325c8776f91d7aac.tar.gz
[project @ 2004-08-13 13:04:50 by simonmar]
Merge backend-hacking-branch onto HEAD. Yay!
Diffstat (limited to 'ghc/rts/StgMiscClosures.cmm')
-rw-r--r--ghc/rts/StgMiscClosures.cmm984
1 files changed, 984 insertions, 0 deletions
diff --git a/ghc/rts/StgMiscClosures.cmm b/ghc/rts/StgMiscClosures.cmm
new file mode 100644
index 0000000000..78eef91e9f
--- /dev/null
+++ b/ghc/rts/StgMiscClosures.cmm
@@ -0,0 +1,984 @@
+/* ----------------------------------------------------------------------------
+ *
+ * (c) The GHC Team, 1998-2004
+ *
+ * Entry code for various built-in closure types.
+ *
+ * This file is written in a subset of C--, extended with various
+ * features specific to GHC. It is compiled by GHC directly. For the
+ * syntax of .cmm files, see the parser in ghc/compiler/cmm/CmmParse.y.
+ *
+ * --------------------------------------------------------------------------*/
+
+#include "Cmm.h"
+
+/* ----------------------------------------------------------------------------
+ Support for the bytecode interpreter.
+ ------------------------------------------------------------------------- */
+
+/* 9 bits of return code for constructors created by the interpreter. */
+stg_interp_constr_entry
+{
+ /* R1 points at the constructor */
+ jump %ENTRY_CODE(Sp(0));
+}
+
+stg_interp_constr1_entry { jump %RET_VEC(Sp(0),0); }
+stg_interp_constr2_entry { jump %RET_VEC(Sp(0),1); }
+stg_interp_constr3_entry { jump %RET_VEC(Sp(0),2); }
+stg_interp_constr4_entry { jump %RET_VEC(Sp(0),3); }
+stg_interp_constr5_entry { jump %RET_VEC(Sp(0),4); }
+stg_interp_constr6_entry { jump %RET_VEC(Sp(0),5); }
+stg_interp_constr7_entry { jump %RET_VEC(Sp(0),6); }
+stg_interp_constr8_entry { jump %RET_VEC(Sp(0),7); }
+
+/* Some info tables to be used when compiled code returns a value to
+ the interpreter, i.e. the interpreter pushes one of these onto the
+ stack before entering a value. What the code does is to
+ impedance-match the compiled return convention (in R1p/R1n/F1/D1 etc) to
+ the interpreter's convention (returned value is on top of stack),
+ and then cause the scheduler to enter the interpreter.
+
+ On entry, the stack (growing down) looks like this:
+
+ ptr to BCO holding return continuation
+ ptr to one of these info tables.
+
+ The info table code, both direct and vectored, must:
+ * push R1/F1/D1 on the stack, and its tag if necessary
+ * push the BCO (so it's now on the stack twice)
+ * Yield, ie, go to the scheduler.
+
+ Scheduler examines the t.o.s, discovers it is a BCO, and proceeds
+ directly to the bytecode interpreter. That pops the top element
+ (the BCO, containing the return continuation), and interprets it.
+ Net result: return continuation gets interpreted, with the
+ following stack:
+
+ ptr to this BCO
+ ptr to the info table just jumped thru
+ return value
+
+ which is just what we want -- the "standard" return layout for the
+ interpreter. Hurrah!
+
+ Don't ask me how unboxed tuple returns are supposed to work. We
+ haven't got a good story about that yet.
+*/
+
+INFO_TABLE_RET( stg_ctoi_R1p,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO,
+ RET_LBL(stg_ctoi_R1p),
+ RET_LBL(stg_ctoi_R1p),
+ RET_LBL(stg_ctoi_R1p),
+ RET_LBL(stg_ctoi_R1p),
+ RET_LBL(stg_ctoi_R1p),
+ RET_LBL(stg_ctoi_R1p),
+ RET_LBL(stg_ctoi_R1p),
+ RET_LBL(stg_ctoi_R1p))
+{
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_enter_info;
+ jump stg_yield_to_interpreter;
+}
+
+#if MAX_VECTORED_RTN != 8
+#error MAX_VECTORED_RTN has changed: please modify stg_ctoi_R1p too.
+#endif
+
+/*
+ * When the returned value is a pointer, but unlifted, in R1 ...
+ */
+INFO_TABLE_RET( stg_ctoi_R1unpt,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO )
+{
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_gc_unpt_r1_info;
+ jump stg_yield_to_interpreter;
+}
+
+/*
+ * When the returned value is a non-pointer in R1 ...
+ */
+INFO_TABLE_RET( stg_ctoi_R1n,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO )
+{
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_gc_unbx_r1_info;
+ jump stg_yield_to_interpreter;
+}
+
+/*
+ * When the returned value is in F1
+ */
+INFO_TABLE_RET( stg_ctoi_F1,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO )
+{
+ Sp_adj(-2);
+ F_[Sp + WDS(1)] = F1;
+ Sp(0) = stg_gc_f1_info;
+ jump stg_yield_to_interpreter;
+}
+
+/*
+ * When the returned value is in D1
+ */
+INFO_TABLE_RET( stg_ctoi_D1,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO )
+{
+ Sp_adj(-1) - SIZEOF_DOUBLE;
+ D_[Sp + WDS(1)] = D1;
+ Sp(0) = stg_gc_d1_info;
+ jump stg_yield_to_interpreter;
+}
+
+/*
+ * When the returned value is in L1
+ */
+INFO_TABLE_RET( stg_ctoi_L1,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO )
+{
+ Sp_adj(-1) - 8;
+ L_[Sp + WDS(1)] = L1;
+ Sp(0) = stg_gc_l1_info;
+ jump stg_yield_to_interpreter;
+}
+
+/*
+ * When the returned value is a void
+ */
+INFO_TABLE_RET( stg_ctoi_V,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO )
+{
+ Sp_adj(-1);
+ Sp(0) = stg_gc_void_info;
+ jump stg_yield_to_interpreter;
+}
+
+/*
+ * Dummy info table pushed on the top of the stack when the interpreter
+ * should apply the BCO on the stack to its arguments, also on the
+ * stack.
+ */
+INFO_TABLE_RET( stg_apply_interp,
+ 0/*size*/, 0/*bitmap*/, /* special layout! */
+ RET_BCO )
+{
+ /* Just in case we end up in here... (we shouldn't) */
+ jump stg_yield_to_interpreter;
+}
+
+/* ----------------------------------------------------------------------------
+ Entry code for a BCO
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE_FUN( stg_BCO, 4, 0, BCO, "BCO", "BCO", ARG_BCO )
+{
+ /* entering a BCO means "apply it", same as a function */
+ Sp_adj(-2);
+ Sp(1) = R1;
+ Sp(0) = stg_apply_interp_info;
+ jump stg_yield_to_interpreter;
+}
+
+/* ----------------------------------------------------------------------------
+ Info tables for indirections.
+
+ SPECIALISED INDIRECTIONS: we have a specialised indirection for each
+ kind of return (direct, vectored 0-7), so that we can avoid entering
+ the object when we know what kind of return it will do. The update
+ code (Updates.hc) updates objects with the appropriate kind of
+ indirection. We only do this for young-gen indirections.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_IND,1,0,IND,"IND","IND")
+{
+ TICK_ENT_DYN_IND(); /* tick */
+ R1 = StgInd_indirectee(R1);
+ TICK_ENT_VIA_NODE();
+ jump %GET_ENTRY(R1);
+}
+
+#define IND_SPEC(label,ret) \
+INFO_TABLE(label,1,0,IND,"IND","IND") \
+{ \
+ TICK_ENT_DYN_IND(); /* tick */ \
+ R1 = StgInd_indirectee(R1); \
+ TICK_ENT_VIA_NODE(); \
+ jump ret; \
+}
+
+IND_SPEC(stg_IND_direct, %ENTRY_CODE(Sp(0)))
+IND_SPEC(stg_IND_0, %RET_VEC(Sp(0),0))
+IND_SPEC(stg_IND_1, %RET_VEC(Sp(0),1))
+IND_SPEC(stg_IND_2, %RET_VEC(Sp(0),2))
+IND_SPEC(stg_IND_3, %RET_VEC(Sp(0),3))
+IND_SPEC(stg_IND_4, %RET_VEC(Sp(0),4))
+IND_SPEC(stg_IND_5, %RET_VEC(Sp(0),5))
+IND_SPEC(stg_IND_6, %RET_VEC(Sp(0),6))
+IND_SPEC(stg_IND_7, %RET_VEC(Sp(0),7))
+
+INFO_TABLE(stg_IND_STATIC,1,0,IND_STATIC,"IND_STATIC","IND_STATIC")
+{
+ TICK_ENT_STATIC_IND(); /* tick */
+ R1 = StgInd_indirectee(R1);
+ TICK_ENT_VIA_NODE();
+ jump %GET_ENTRY(R1);
+}
+
+INFO_TABLE(stg_IND_PERM,1,1,IND_PERM,"IND_PERM","IND_PERM")
+{
+ /* Don't add INDs to granularity cost */
+
+ /* Don't: TICK_ENT_STATIC_IND(Node); for ticky-ticky; this ind is
+ here only to help profiling */
+
+#if defined(TICKY_TICKY) && !defined(PROFILING)
+ /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND, rather than
+ being extra */
+ TICK_ENT_PERM_IND();
+#endif
+
+ LDV_ENTER(R1);
+
+ /* Enter PAP cost centre */
+ ENTER_CCS_PAP_CL(R1);
+
+ /* For ticky-ticky, change the perm_ind to a normal ind on first
+ * entry, so the number of ent_perm_inds is the number of *thunks*
+ * entered again, not the number of subsequent entries.
+ *
+ * Since this screws up cost centres, we die if profiling and
+ * ticky_ticky are on at the same time. KSW 1999-01.
+ */
+#ifdef TICKY_TICKY
+# ifdef PROFILING
+# error Profiling and ticky-ticky do not mix at present!
+# endif /* PROFILING */
+ StgHeader_info(R1) = stg_IND_info;
+#endif /* TICKY_TICKY */
+
+ R1 = StgInd_indirectee(R1);
+
+#if defined(TICKY_TICKY) && !defined(PROFILING)
+ TICK_ENT_VIA_NODE();
+#endif
+
+ jump %GET_ENTRY(R1);
+}
+
+
+INFO_TABLE(stg_IND_OLDGEN,1,1,IND_OLDGEN,"IND_OLDGEN","IND_OLDGEN")
+{
+ TICK_ENT_STATIC_IND(); /* tick */
+ R1 = StgInd_indirectee(R1);
+ TICK_ENT_VIA_NODE();
+ jump %GET_ENTRY(R1);
+}
+
+INFO_TABLE(stg_IND_OLDGEN_PERM,1,1,IND_OLDGEN_PERM,"IND_OLDGEN_PERM","IND_OLDGEN_PERM")
+{
+ /* Don't: TICK_ENT_STATIC_IND(Node); for ticky-ticky;
+ this ind is here only to help profiling */
+
+#if defined(TICKY_TICKY) && !defined(PROFILING)
+ /* TICKY_TICKY && !PROFILING means PERM_IND *replaces* an IND,
+ rather than being extra */
+ TICK_ENT_PERM_IND(R1); /* tick */
+#endif
+
+ LDV_ENTER(R1);
+
+ /* Enter PAP cost centre -- lexical scoping only */
+ ENTER_CCS_PAP_CL(R1);
+
+ /* see comment in IND_PERM */
+#ifdef TICKY_TICKY
+# ifdef PROFILING
+# error Profiling and ticky-ticky do not mix at present!
+# endif /* PROFILING */
+ StgHeader_info(R1) = stg_IND_OLDGEN_info;
+#endif /* TICKY_TICKY */
+
+ R1 = StgInd_indirectee(R1);
+
+ TICK_ENT_VIA_NODE();
+ jump %GET_ENTRY(R1);
+}
+
+/* ----------------------------------------------------------------------------
+ Black holes.
+
+ Entering a black hole normally causes a cyclic data dependency, but
+ in the concurrent world, black holes are synchronization points,
+ and they are turned into blocking queues when there are threads
+ waiting for the evaluation of the closure to finish.
+ ------------------------------------------------------------------------- */
+
+/* Note: a BLACKHOLE and BLACKHOLE_BQ must be big enough to be
+ * overwritten with an indirection/evacuee/catch. Thus we claim it
+ * has 1 non-pointer word of payload (in addition to the pointer word
+ * for the blocking queue in a BQ), which should be big enough for an
+ * old-generation indirection.
+ */
+INFO_TABLE(stg_BLACKHOLE,0,2,BLACKHOLE,"BLACKHOLE","BLACKHOLE")
+{
+#if defined(GRAN)
+ /* Before overwriting TSO_LINK */
+ STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
+#endif
+
+ TICK_ENT_BH();
+
+ /* Actually this is not necessary because R1 is about to be destroyed. */
+ LDV_ENTER(R1);
+
+ /* Put ourselves on the blocking queue for this black hole */
+ StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+
+ /* jot down why and on what closure we are blocked */
+ StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
+ StgTSO_block_info(CurrentTSO) = R1;
+
+ /* Change the BLACKHOLE into a BLACKHOLE_BQ */
+#ifdef PROFILING
+ /* The size remains the same, so we call LDV_recordDead() -
+ no need to fill slop. */
+ foreign "C" LDV_recordDead(R1 "ptr", BYTES_TO_WDS(SIZEOF_StgBlockingQueue));
+#endif
+ /*
+ * Todo: maybe use SET_HDR() and remove LDV_RECORD_CREATE()?
+ */
+ StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
+#ifdef PROFILING
+ foreign "C" LDV_RECORD_CREATE(R1);
+#endif
+
+ /* closure is mutable since something has just been added to its BQ */
+ foreign "C" recordMutable(R1 "ptr");
+
+ /* PAR: dumping of event now done in blockThread -- HWL */
+
+ /* stg_gen_block is too heavyweight, use a specialised one */
+ jump stg_block_1;
+}
+
+INFO_TABLE(stg_BLACKHOLE_BQ,1,1,BLACKHOLE_BQ,"BLACKHOLE_BQ","BLACKHOLE_BQ")
+{
+#if defined(GRAN)
+ /* Before overwriting TSO_LINK */
+ STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
+#endif
+
+ TICK_ENT_BH();
+ LDV_ENTER(R1);
+
+ /* Put ourselves on the blocking queue for this black hole */
+ StgTSO_link(CurrentTSO) = StgBlockingQueue_blocking_queue(R1);
+ StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+
+ /* jot down why and on what closure we are blocked */
+ StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
+ StgTSO_block_info(CurrentTSO) = R1;
+
+ /* PAR: dumping of event now done in blockThread -- HWL */
+
+ /* stg_gen_block is too heavyweight, use a specialised one */
+ jump stg_block_1;
+}
+
+/*
+ Revertible black holes are needed in the parallel world, to handle
+ negative acknowledgements of messages containing updatable closures.
+ The idea is that when the original message is transmitted, the closure
+ is turned into a revertible black hole...an object which acts like a
+ black hole when local threads try to enter it, but which can be reverted
+ back to the original closure if necessary.
+
+ It's actually a lot like a blocking queue (BQ) entry, because revertible
+ black holes are initially set up with an empty blocking queue.
+*/
+
+#if defined(PAR) || defined(GRAN)
+
+INFO_TABLE(stg_RBH,1,1,RBH,"RBH","RBH")
+{
+# if defined(GRAN)
+ /* mainly statistics gathering for GranSim simulation */
+ STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
+# endif
+
+ /* exactly the same as a BLACKHOLE_BQ_entry -- HWL */
+ /* Put ourselves on the blocking queue for this black hole */
+ TSO_link(CurrentTSO) = StgBlockingQueue_blocking_queue(R1);
+ StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+ /* jot down why and on what closure we are blocked */
+ TSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
+ TSO_block_info(CurrentTSO) = R1;
+
+ /* PAR: dumping of event now done in blockThread -- HWL */
+
+ /* stg_gen_block is too heavyweight, use a specialised one */
+ jump stg_block_1;
+}
+
+INFO_TABLE(stg_RBH_Save_0,0,2,CONSTR,"RBH_Save_0","RBH_Save_0")
+{ foreign "C" barf("RBH_Save_0 object entered!"); }
+
+INFO_TABLE(stg_RBH_Save_1,1,1,CONSTR,"RBH_Save_1","RBH_Save_1");
+{ foreign "C" barf("RBH_Save_1 object entered!"); }
+
+INFO_TABLE(stg_RBH_Save_2,2,0,CONSTR,"RBH_Save_2","RBH_Save_2");
+{ foreign "C" barf("RBH_Save_2 object entered!"); }
+
+#endif /* defined(PAR) || defined(GRAN) */
+
+/* identical to BLACKHOLEs except for the infotag */
+INFO_TABLE(stg_CAF_BLACKHOLE,0,2,CAF_BLACKHOLE,"CAF_BLACKHOLE","CAF_BLACKHOLE")
+{
+#if defined(GRAN)
+ /* mainly statistics gathering for GranSim simulation */
+ STGCALL3(GranSimBlock,CurrentTSO,CurrentProc,(StgClosure *)R1 /*Node*/);
+#endif
+
+ TICK_ENT_BH();
+ LDV_ENTER(R1);
+
+ /* Put ourselves on the blocking queue for this black hole */
+ StgTSO_link(CurrentTSO) = stg_END_TSO_QUEUE_closure;
+ StgBlockingQueue_blocking_queue(R1) = CurrentTSO;
+
+ /* jot down why and on what closure we are blocked */
+ StgTSO_why_blocked(CurrentTSO) = BlockedOnBlackHole::I16;
+ StgTSO_block_info(CurrentTSO) = R1;
+
+ /* Change the CAF_BLACKHOLE into a BLACKHOLE_BQ_STATIC */
+ StgHeader_info(R1) = stg_BLACKHOLE_BQ_info;
+
+ /* closure is mutable since something has just been added to its BQ */
+ foreign "C" recordMutable(R1 "ptr");
+
+ /* PAR: dumping of event now done in blockThread -- HWL */
+
+ /* stg_gen_block is too heavyweight, use a specialised one */
+ jump stg_block_1;
+}
+
+#ifdef EAGER_BLACKHOLING
+INFO_TABLE(stg_SE_BLACKHOLE_info, stg_SE_BLACKHOLE_entry,0,2,SE_BLACKHOLE,,IF_,"SE_BLACKHOLE","SE_BLACKHOLE");
+IF_(stg_SE_BLACKHOLE_entry)
+{
+ STGCALL3(fprintf,stderr,"SE_BLACKHOLE at %p entered!\n",R1);
+ STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
+}
+
+INFO_TABLE(stg_SE_CAF_BLACKHOLE_info, SE_CAF_BLACKHOLE_entry,0,2,SE_CAF_BLACKHOLE,,IF_,"CAF_BLACKHOLE","CAF_BLACKHOLE");
+IF_(stg_SE_CAF_BLACKHOLE_entry)
+{
+ STGCALL3(fprintf,stderr,"SE_CAF_BLACKHOLE at %p entered!\n",R1);
+ STGCALL1(shutdownHaskellAndExit,EXIT_FAILURE);
+}
+#endif
+
+/* ----------------------------------------------------------------------------
+ Some static info tables for things that don't get entered, and
+ therefore don't need entry code (i.e. boxed but unpointed objects)
+ NON_ENTERABLE_ENTRY_CODE now defined at the beginning of the file
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_TSO, 0,0,TSO, "TSO", "TSO")
+{ foreign "C" barf("TSO object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ Evacuees are left behind by the garbage collector. Any attempt to enter
+ one is a real bug.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_EVACUATED,1,0,EVACUATED,"EVACUATED","EVACUATED")
+{ foreign "C" barf("EVACUATED object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ Weak pointers
+
+ Live weak pointers have a special closure type. Dead ones are just
+ nullary constructors (although they live on the heap - we overwrite
+ live weak pointers with dead ones).
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_WEAK,0,4,WEAK,"WEAK","WEAK")
+{ foreign "C" barf("WEAK object entered!"); }
+
+/*
+ * It's important when turning an existing WEAK into a DEAD_WEAK
+ * (which is what finalizeWeak# does) that we don't lose the link
+ * field and break the linked list of weak pointers. Hence, we give
+ * DEAD_WEAK 4 non-pointer fields, the same as WEAK.
+ */
+INFO_TABLE_CONSTR(stg_DEAD_WEAK,0,4,0,CONSTR,"DEAD_WEAK","DEAD_WEAK")
+{ foreign "C" barf("DEAD_WEAK object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ NO_FINALIZER
+
+ This is a static nullary constructor (like []) that we use to mark an empty
+ finalizer in a weak pointer object.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE_CONSTR(stg_NO_FINALIZER,0,0,0,CONSTR_NOCAF_STATIC,"NO_FINALIZER","NO_FINALIZER")
+{ foreign "C" barf("NO_FINALIZER object entered!"); }
+
+CLOSURE(stg_NO_FINALIZER_closure,stg_NO_FINALIZER);
+
+/* ----------------------------------------------------------------------------
+ Foreign Objects are unlifted and therefore never entered.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_FOREIGN,0,1,FOREIGN,"FOREIGN","FOREIGN")
+{ foreign "C" barf("FOREIGN object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ Stable Names are unlifted too.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_STABLE_NAME,0,1,STABLE_NAME,"STABLE_NAME","STABLE_NAME")
+{ foreign "C" barf("STABLE_NAME object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ MVars
+
+ There are two kinds of these: full and empty. We need an info table
+ and entry code for each type.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_FULL_MVAR,4,0,MVAR,"MVAR","MVAR")
+{ foreign "C" barf("FULL_MVAR object entered!"); }
+
+INFO_TABLE(stg_EMPTY_MVAR,4,0,MVAR,"MVAR","MVAR")
+{ foreign "C" barf("EMPTY_MVAR object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ END_TSO_QUEUE
+
+ This is a static nullary constructor (like []) that we use to mark the
+ end of a linked TSO queue.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE_CONSTR(stg_END_TSO_QUEUE,0,0,0,CONSTR_NOCAF_STATIC,"END_TSO_QUEUE","END_TSO_QUEUE")
+{ foreign "C" barf("END_TSO_QUEUE object entered!"); }
+
+CLOSURE(stg_END_TSO_QUEUE_closure,stg_END_TSO_QUEUE);
+
+/* ----------------------------------------------------------------------------
+ Mutable lists
+
+ Mutable lists (used by the garbage collector) consist of a chain of
+ StgMutClosures connected through their mut_link fields, ending in
+ an END_MUT_LIST closure.
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE_CONSTR(stg_END_MUT_LIST,0,0,0,CONSTR_NOCAF_STATIC,"END_MUT_LIST","END_MUT_LIST")
+{ foreign "C" barf("END_MUT_LIST object entered!"); }
+
+CLOSURE(stg_END_MUT_LIST_closure,stg_END_MUT_LIST);
+
+INFO_TABLE(stg_MUT_CONS, 1, 1, MUT_CONS, "MUT_CONS", "MUT_CONS")
+{ foreign "C" barf("MUT_CONS object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ Exception lists
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE_CONSTR(stg_END_EXCEPTION_LIST,0,0,0,CONSTR_NOCAF_STATIC,"END_EXCEPTION_LIST","END_EXCEPTION_LIST")
+{ foreign "C" barf("END_EXCEPTION_LIST object entered!"); }
+
+CLOSURE(stg_END_EXCEPTION_LIST_closure,stg_END_EXCEPTION_LIST);
+
+INFO_TABLE(stg_EXCEPTION_CONS,1,1,CONSTR,"EXCEPTION_CONS","EXCEPTION_CONS")
+{ foreign "C" barf("EXCEPTION_CONS object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ Arrays
+
+ These come in two basic flavours: arrays of data (StgArrWords) and arrays of
+ pointers (StgArrPtrs). They all have a similar layout:
+
+ ___________________________
+ | Info | No. of | data....
+ | Ptr | Words |
+ ---------------------------
+
+ These are *unpointed* objects: i.e. they cannot be entered.
+
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_ARR_WORDS, 0, 0, ARR_WORDS, "ARR_WORDS", "ARR_WORDS")
+{ foreign "C" barf("ARR_WORDS object entered!"); }
+
+INFO_TABLE(stg_MUT_ARR_PTRS, 0, 0, MUT_ARR_PTRS, "MUT_ARR_PTRS", "MUT_ARR_PTRS")
+{ foreign "C" barf("MUT_ARR_PTRS object entered!"); }
+
+INFO_TABLE(stg_MUT_ARR_PTRS_FROZEN, 0, 0, MUT_ARR_PTRS_FROZEN, "MUT_ARR_PTRS_FROZEN", "MUT_ARR_PTRS_FROZEN")
+{ foreign "C" barf("MUT_ARR_PTRS_FROZEN object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ Mutable Variables
+ ------------------------------------------------------------------------- */
+
+INFO_TABLE(stg_MUT_VAR, 1, 1, MUT_VAR, "MUT_VAR", "MUT_VAR")
+{ foreign "C" barf("MUT_VAR object entered!"); }
+
+/* ----------------------------------------------------------------------------
+ Dummy return closure
+
+ Entering this closure will just return to the address on the top of the
+ stack. Useful for getting a thread in a canonical form where we can
+ just enter the top stack word to start the thread. (see deleteThread)
+ * ------------------------------------------------------------------------- */
+
+INFO_TABLE( stg_dummy_ret, 0, 0, CONSTR_NOCAF_STATIC, "DUMMY_RET", "DUMMY_RET")
+{
+ jump %ENTRY_CODE(Sp(0));
+}
+CLOSURE(stg_dummy_ret_closure,stg_dummy_ret);
+
+/* ----------------------------------------------------------------------------
+ CHARLIKE and INTLIKE closures.
+
+ These are static representations of Chars and small Ints, so that
+ we can remove dynamic Chars and Ints during garbage collection and
+ replace them with references to the static objects.
+ ------------------------------------------------------------------------- */
+
+#if defined(ENABLE_WIN32_DLL_SUPPORT)
+/*
+ * When sticking the RTS in a DLL, we delay populating the
+ * Charlike and Intlike tables until load-time, which is only
+ * when we've got the real addresses to the C# and I# closures.
+ *
+ */
+static INFO_TBL_CONST StgInfoTable czh_static_info;
+static INFO_TBL_CONST StgInfoTable izh_static_info;
+#define Char_hash_static_info czh_static_info
+#define Int_hash_static_info izh_static_info
+#else
+#define Char_hash_static_info GHCziBase_Czh_static
+#define Int_hash_static_info GHCziBase_Izh_static
+#endif
+
+
+#define CHARLIKE_HDR(n) CLOSURE(Char_hash_static_info, n)
+#define INTLIKE_HDR(n) CLOSURE(Int_hash_static_info, n)
+
+/* put these in the *data* section, since the garbage collector relies
+ * on the fact that static closures live in the data section.
+ */
+
+/* end the name with _closure, to convince the mangler this is a closure */
+
+section "data" {
+ stg_CHARLIKE_closure:
+ CHARLIKE_HDR(0)
+ CHARLIKE_HDR(1)
+ CHARLIKE_HDR(2)
+ CHARLIKE_HDR(3)
+ CHARLIKE_HDR(4)
+ CHARLIKE_HDR(5)
+ CHARLIKE_HDR(6)
+ CHARLIKE_HDR(7)
+ CHARLIKE_HDR(8)
+ CHARLIKE_HDR(9)
+ CHARLIKE_HDR(10)
+ CHARLIKE_HDR(11)
+ CHARLIKE_HDR(12)
+ CHARLIKE_HDR(13)
+ CHARLIKE_HDR(14)
+ CHARLIKE_HDR(15)
+ CHARLIKE_HDR(16)
+ CHARLIKE_HDR(17)
+ CHARLIKE_HDR(18)
+ CHARLIKE_HDR(19)
+ CHARLIKE_HDR(20)
+ CHARLIKE_HDR(21)
+ CHARLIKE_HDR(22)
+ CHARLIKE_HDR(23)
+ CHARLIKE_HDR(24)
+ CHARLIKE_HDR(25)
+ CHARLIKE_HDR(26)
+ CHARLIKE_HDR(27)
+ CHARLIKE_HDR(28)
+ CHARLIKE_HDR(29)
+ CHARLIKE_HDR(30)
+ CHARLIKE_HDR(31)
+ CHARLIKE_HDR(32)
+ CHARLIKE_HDR(33)
+ CHARLIKE_HDR(34)
+ CHARLIKE_HDR(35)
+ CHARLIKE_HDR(36)
+ CHARLIKE_HDR(37)
+ CHARLIKE_HDR(38)
+ CHARLIKE_HDR(39)
+ CHARLIKE_HDR(40)
+ CHARLIKE_HDR(41)
+ CHARLIKE_HDR(42)
+ CHARLIKE_HDR(43)
+ CHARLIKE_HDR(44)
+ CHARLIKE_HDR(45)
+ CHARLIKE_HDR(46)
+ CHARLIKE_HDR(47)
+ CHARLIKE_HDR(48)
+ CHARLIKE_HDR(49)
+ CHARLIKE_HDR(50)
+ CHARLIKE_HDR(51)
+ CHARLIKE_HDR(52)
+ CHARLIKE_HDR(53)
+ CHARLIKE_HDR(54)
+ CHARLIKE_HDR(55)
+ CHARLIKE_HDR(56)
+ CHARLIKE_HDR(57)
+ CHARLIKE_HDR(58)
+ CHARLIKE_HDR(59)
+ CHARLIKE_HDR(60)
+ CHARLIKE_HDR(61)
+ CHARLIKE_HDR(62)
+ CHARLIKE_HDR(63)
+ CHARLIKE_HDR(64)
+ CHARLIKE_HDR(65)
+ CHARLIKE_HDR(66)
+ CHARLIKE_HDR(67)
+ CHARLIKE_HDR(68)
+ CHARLIKE_HDR(69)
+ CHARLIKE_HDR(70)
+ CHARLIKE_HDR(71)
+ CHARLIKE_HDR(72)
+ CHARLIKE_HDR(73)
+ CHARLIKE_HDR(74)
+ CHARLIKE_HDR(75)
+ CHARLIKE_HDR(76)
+ CHARLIKE_HDR(77)
+ CHARLIKE_HDR(78)
+ CHARLIKE_HDR(79)
+ CHARLIKE_HDR(80)
+ CHARLIKE_HDR(81)
+ CHARLIKE_HDR(82)
+ CHARLIKE_HDR(83)
+ CHARLIKE_HDR(84)
+ CHARLIKE_HDR(85)
+ CHARLIKE_HDR(86)
+ CHARLIKE_HDR(87)
+ CHARLIKE_HDR(88)
+ CHARLIKE_HDR(89)
+ CHARLIKE_HDR(90)
+ CHARLIKE_HDR(91)
+ CHARLIKE_HDR(92)
+ CHARLIKE_HDR(93)
+ CHARLIKE_HDR(94)
+ CHARLIKE_HDR(95)
+ CHARLIKE_HDR(96)
+ CHARLIKE_HDR(97)
+ CHARLIKE_HDR(98)
+ CHARLIKE_HDR(99)
+ CHARLIKE_HDR(100)
+ CHARLIKE_HDR(101)
+ CHARLIKE_HDR(102)
+ CHARLIKE_HDR(103)
+ CHARLIKE_HDR(104)
+ CHARLIKE_HDR(105)
+ CHARLIKE_HDR(106)
+ CHARLIKE_HDR(107)
+ CHARLIKE_HDR(108)
+ CHARLIKE_HDR(109)
+ CHARLIKE_HDR(110)
+ CHARLIKE_HDR(111)
+ CHARLIKE_HDR(112)
+ CHARLIKE_HDR(113)
+ CHARLIKE_HDR(114)
+ CHARLIKE_HDR(115)
+ CHARLIKE_HDR(116)
+ CHARLIKE_HDR(117)
+ CHARLIKE_HDR(118)
+ CHARLIKE_HDR(119)
+ CHARLIKE_HDR(120)
+ CHARLIKE_HDR(121)
+ CHARLIKE_HDR(122)
+ CHARLIKE_HDR(123)
+ CHARLIKE_HDR(124)
+ CHARLIKE_HDR(125)
+ CHARLIKE_HDR(126)
+ CHARLIKE_HDR(127)
+ CHARLIKE_HDR(128)
+ CHARLIKE_HDR(129)
+ CHARLIKE_HDR(130)
+ CHARLIKE_HDR(131)
+ CHARLIKE_HDR(132)
+ CHARLIKE_HDR(133)
+ CHARLIKE_HDR(134)
+ CHARLIKE_HDR(135)
+ CHARLIKE_HDR(136)
+ CHARLIKE_HDR(137)
+ CHARLIKE_HDR(138)
+ CHARLIKE_HDR(139)
+ CHARLIKE_HDR(140)
+ CHARLIKE_HDR(141)
+ CHARLIKE_HDR(142)
+ CHARLIKE_HDR(143)
+ CHARLIKE_HDR(144)
+ CHARLIKE_HDR(145)
+ CHARLIKE_HDR(146)
+ CHARLIKE_HDR(147)
+ CHARLIKE_HDR(148)
+ CHARLIKE_HDR(149)
+ CHARLIKE_HDR(150)
+ CHARLIKE_HDR(151)
+ CHARLIKE_HDR(152)
+ CHARLIKE_HDR(153)
+ CHARLIKE_HDR(154)
+ CHARLIKE_HDR(155)
+ CHARLIKE_HDR(156)
+ CHARLIKE_HDR(157)
+ CHARLIKE_HDR(158)
+ CHARLIKE_HDR(159)
+ CHARLIKE_HDR(160)
+ CHARLIKE_HDR(161)
+ CHARLIKE_HDR(162)
+ CHARLIKE_HDR(163)
+ CHARLIKE_HDR(164)
+ CHARLIKE_HDR(165)
+ CHARLIKE_HDR(166)
+ CHARLIKE_HDR(167)
+ CHARLIKE_HDR(168)
+ CHARLIKE_HDR(169)
+ CHARLIKE_HDR(170)
+ CHARLIKE_HDR(171)
+ CHARLIKE_HDR(172)
+ CHARLIKE_HDR(173)
+ CHARLIKE_HDR(174)
+ CHARLIKE_HDR(175)
+ CHARLIKE_HDR(176)
+ CHARLIKE_HDR(177)
+ CHARLIKE_HDR(178)
+ CHARLIKE_HDR(179)
+ CHARLIKE_HDR(180)
+ CHARLIKE_HDR(181)
+ CHARLIKE_HDR(182)
+ CHARLIKE_HDR(183)
+ CHARLIKE_HDR(184)
+ CHARLIKE_HDR(185)
+ CHARLIKE_HDR(186)
+ CHARLIKE_HDR(187)
+ CHARLIKE_HDR(188)
+ CHARLIKE_HDR(189)
+ CHARLIKE_HDR(190)
+ CHARLIKE_HDR(191)
+ CHARLIKE_HDR(192)
+ CHARLIKE_HDR(193)
+ CHARLIKE_HDR(194)
+ CHARLIKE_HDR(195)
+ CHARLIKE_HDR(196)
+ CHARLIKE_HDR(197)
+ CHARLIKE_HDR(198)
+ CHARLIKE_HDR(199)
+ CHARLIKE_HDR(200)
+ CHARLIKE_HDR(201)
+ CHARLIKE_HDR(202)
+ CHARLIKE_HDR(203)
+ CHARLIKE_HDR(204)
+ CHARLIKE_HDR(205)
+ CHARLIKE_HDR(206)
+ CHARLIKE_HDR(207)
+ CHARLIKE_HDR(208)
+ CHARLIKE_HDR(209)
+ CHARLIKE_HDR(210)
+ CHARLIKE_HDR(211)
+ CHARLIKE_HDR(212)
+ CHARLIKE_HDR(213)
+ CHARLIKE_HDR(214)
+ CHARLIKE_HDR(215)
+ CHARLIKE_HDR(216)
+ CHARLIKE_HDR(217)
+ CHARLIKE_HDR(218)
+ CHARLIKE_HDR(219)
+ CHARLIKE_HDR(220)
+ CHARLIKE_HDR(221)
+ CHARLIKE_HDR(222)
+ CHARLIKE_HDR(223)
+ CHARLIKE_HDR(224)
+ CHARLIKE_HDR(225)
+ CHARLIKE_HDR(226)
+ CHARLIKE_HDR(227)
+ CHARLIKE_HDR(228)
+ CHARLIKE_HDR(229)
+ CHARLIKE_HDR(230)
+ CHARLIKE_HDR(231)
+ CHARLIKE_HDR(232)
+ CHARLIKE_HDR(233)
+ CHARLIKE_HDR(234)
+ CHARLIKE_HDR(235)
+ CHARLIKE_HDR(236)
+ CHARLIKE_HDR(237)
+ CHARLIKE_HDR(238)
+ CHARLIKE_HDR(239)
+ CHARLIKE_HDR(240)
+ CHARLIKE_HDR(241)
+ CHARLIKE_HDR(242)
+ CHARLIKE_HDR(243)
+ CHARLIKE_HDR(244)
+ CHARLIKE_HDR(245)
+ CHARLIKE_HDR(246)
+ CHARLIKE_HDR(247)
+ CHARLIKE_HDR(248)
+ CHARLIKE_HDR(249)
+ CHARLIKE_HDR(250)
+ CHARLIKE_HDR(251)
+ CHARLIKE_HDR(252)
+ CHARLIKE_HDR(253)
+ CHARLIKE_HDR(254)
+ CHARLIKE_HDR(255)
+}
+
+section "data" {
+ stg_INTLIKE_closure:
+ INTLIKE_HDR(-16) /* MIN_INTLIKE == -16 */
+ INTLIKE_HDR(-15)
+ INTLIKE_HDR(-14)
+ INTLIKE_HDR(-13)
+ INTLIKE_HDR(-12)
+ INTLIKE_HDR(-11)
+ INTLIKE_HDR(-10)
+ INTLIKE_HDR(-9)
+ INTLIKE_HDR(-8)
+ INTLIKE_HDR(-7)
+ INTLIKE_HDR(-6)
+ INTLIKE_HDR(-5)
+ INTLIKE_HDR(-4)
+ INTLIKE_HDR(-3)
+ INTLIKE_HDR(-2)
+ INTLIKE_HDR(-1)
+ INTLIKE_HDR(0)
+ INTLIKE_HDR(1)
+ INTLIKE_HDR(2)
+ INTLIKE_HDR(3)
+ INTLIKE_HDR(4)
+ INTLIKE_HDR(5)
+ INTLIKE_HDR(6)
+ INTLIKE_HDR(7)
+ INTLIKE_HDR(8)
+ INTLIKE_HDR(9)
+ INTLIKE_HDR(10)
+ INTLIKE_HDR(11)
+ INTLIKE_HDR(12)
+ INTLIKE_HDR(13)
+ INTLIKE_HDR(14)
+ INTLIKE_HDR(15)
+ INTLIKE_HDR(16) /* MAX_INTLIKE == 16 */
+}