summaryrefslogtreecommitdiff
path: root/ghc/compiler/codeGen
diff options
context:
space:
mode:
Diffstat (limited to 'ghc/compiler/codeGen')
-rw-r--r--ghc/compiler/codeGen/CgBindery.hi88
-rw-r--r--ghc/compiler/codeGen/CgBindery.lhs416
-rw-r--r--ghc/compiler/codeGen/CgCase.hi25
-rw-r--r--ghc/compiler/codeGen/CgCase.lhs1107
-rw-r--r--ghc/compiler/codeGen/CgClosure.hi32
-rw-r--r--ghc/compiler/codeGen/CgClosure.lhs1014
-rw-r--r--ghc/compiler/codeGen/CgCompInfo.hi94
-rw-r--r--ghc/compiler/codeGen/CgCompInfo.lhs189
-rw-r--r--ghc/compiler/codeGen/CgCon.hi35
-rw-r--r--ghc/compiler/codeGen/CgCon.lhs515
-rw-r--r--ghc/compiler/codeGen/CgConTbls.hi24
-rw-r--r--ghc/compiler/codeGen/CgConTbls.lhs430
-rw-r--r--ghc/compiler/codeGen/CgExpr.hi24
-rw-r--r--ghc/compiler/codeGen/CgExpr.lhs414
-rw-r--r--ghc/compiler/codeGen/CgHeapery.hi33
-rw-r--r--ghc/compiler/codeGen/CgHeapery.lhs278
-rw-r--r--ghc/compiler/codeGen/CgLetNoEscape.hi12
-rw-r--r--ghc/compiler/codeGen/CgLetNoEscape.lhs202
-rw-r--r--ghc/compiler/codeGen/CgMonad.hi209
-rw-r--r--ghc/compiler/codeGen/CgMonad.lhs914
-rw-r--r--ghc/compiler/codeGen/CgRetConv.hi39
-rw-r--r--ghc/compiler/codeGen/CgRetConv.lhs436
-rw-r--r--ghc/compiler/codeGen/CgStackery.hi35
-rw-r--r--ghc/compiler/codeGen/CgStackery.lhs264
-rw-r--r--ghc/compiler/codeGen/CgTailCall.hi44
-rw-r--r--ghc/compiler/codeGen/CgTailCall.lhs548
-rw-r--r--ghc/compiler/codeGen/CgUpdate.hi7
-rw-r--r--ghc/compiler/codeGen/CgUpdate.lhs155
-rw-r--r--ghc/compiler/codeGen/CgUsages.hi39
-rw-r--r--ghc/compiler/codeGen/CgUsages.lhs152
-rw-r--r--ghc/compiler/codeGen/ClosureInfo.hi169
-rw-r--r--ghc/compiler/codeGen/ClosureInfo.lhs1328
-rw-r--r--ghc/compiler/codeGen/CodeGen.hi27
-rw-r--r--ghc/compiler/codeGen/CodeGen.lhs177
-rw-r--r--ghc/compiler/codeGen/Jmakefile19
-rw-r--r--ghc/compiler/codeGen/SMRep.hi37
-rw-r--r--ghc/compiler/codeGen/SMRep.lhs208
-rw-r--r--ghc/compiler/codeGen/cgintro.lit783
38 files changed, 10522 insertions, 0 deletions
diff --git a/ghc/compiler/codeGen/CgBindery.hi b/ghc/compiler/codeGen/CgBindery.hi
new file mode 100644
index 0000000000..7d11d51c7b
--- /dev/null
+++ b/ghc/compiler/codeGen/CgBindery.hi
@@ -0,0 +1,88 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgBindery where
+import AbsCSyn(AbstractC, CAddrMode, CExprMacro, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgMonad(CgInfoDownwards, CgState, StubFlag)
+import ClosureInfo(ClosureInfo, LambdaFormInfo, StandardFormInfo)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(Id, IdDetails)
+import IdEnv(IdEnv(..))
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import PreludePS(_PackedString)
+import PreludeRatio(Ratio(..))
+import PrimKind(PrimKind)
+import PrimOps(PrimOp)
+import StgSyn(StgAtom)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import UniqSet(UniqSet(..))
+import Unique(Unique)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+data CAddrMode {-# GHC_PRAGMA CVal RegRelative PrimKind | CAddr RegRelative | CReg MagicId | CTableEntry CAddrMode CAddrMode PrimKind | CTemp Unique PrimKind | CLbl CLabel PrimKind | CUnVecLbl CLabel CLabel | CCharLike CAddrMode | CIntLike CAddrMode | CString _PackedString | CLit BasicLit | CLitLit _PackedString PrimKind | COffset HeapOffset | CCode AbstractC | CLabelledCode CLabel AbstractC | CJoinPoint Int Int | CMacroExpr PrimKind CExprMacro [CAddrMode] | CCostCentre CostCentre Bool #-}
+data MagicId {-# GHC_PRAGMA BaseReg | StkOReg | VanillaReg PrimKind Int# | FloatReg Int# | DoubleReg Int# | TagReg | RetReg | SpA | SuA | SpB | SuB | Hp | HpLim | LivenessReg | ActivityReg | StdUpdRetVecReg | StkStubReg | CurCostCentre | VoidReg #-}
+data BasicLit {-# GHC_PRAGMA MachChar Char | MachStr _PackedString | MachAddr Integer | MachInt Integer Bool | MachFloat (Ratio Integer) | MachDouble (Ratio Integer) | MachLitLit _PackedString PrimKind | NoRepStr _PackedString | NoRepInteger Integer | NoRepRational (Ratio Integer) #-}
+data CLabel
+type CgBindings = UniqFM CgIdInfo
+data CgIdInfo = MkCgIdInfo Id VolatileLoc StableLoc LambdaFormInfo
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data LambdaFormInfo {-# GHC_PRAGMA LFReEntrant Bool Int Bool | LFCon Id Bool | LFTuple Id Bool | LFThunk Bool Bool Bool StandardFormInfo | LFArgument | LFImported | LFLetNoEscape Int (UniqFM Id) | LFBlackHole | LFIndirection #-}
+data HeapOffset
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+type IdEnv a = UniqFM a
+data Labda a {-# GHC_PRAGMA Hamna | Ni a #-}
+data StableLoc {-# GHC_PRAGMA NoStableLoc | VirAStkLoc Int | VirBStkLoc Int | LitLoc BasicLit | StableAmodeLoc CAddrMode #-}
+data StgAtom a {-# GHC_PRAGMA StgVarAtom a | StgLitAtom BasicLit #-}
+data UniqFM a {-# GHC_PRAGMA EmptyUFM | LeafUFM Int# a | NodeUFM Int# Int# (UniqFM a) (UniqFM a) #-}
+type UniqSet a = UniqFM a
+data Unique {-# GHC_PRAGMA MkUnique Int# #-}
+data VolatileLoc {-# GHC_PRAGMA NoVolatileLoc | TempVarLoc Unique | RegLoc MagicId | VirHpLoc HeapOffset | VirNodeLoc HeapOffset #-}
+bindArgsToRegs :: [Id] -> [MagicId] -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 2 _U_ 1122 _N_ _S_ "SL" _N_ _N_ #-}
+bindNewPrimToAmode :: Id -> CAddrMode -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 2 _U_ 2122 _N_ _S_ "LS" _N_ _N_ #-}
+bindNewToAStack :: (Id, Int) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 101 _N_ _S_ "U(LL)AU(LLL)" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+bindNewToBStack :: (Id, Int) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 101 _N_ _S_ "U(LL)AU(LLL)" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+bindNewToNode :: Id -> HeapOffset -> LambdaFormInfo -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 5 _U_ 22201 _N_ _S_ "LLLAU(LLL)" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+bindNewToReg :: Id -> MagicId -> LambdaFormInfo -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 5 _U_ 22201 _N_ _S_ "LLLAU(LLL)" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+bindNewToTemp :: Id -> CgInfoDownwards -> CgState -> (CAddrMode, CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 201 _N_ _N_ _N_ _N_ #-}
+getAtomAmode :: StgAtom Id -> CgInfoDownwards -> CgState -> (CAddrMode, CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 122 _N_ _S_ "S" _N_ _N_ #-}
+getAtomAmodes :: [StgAtom Id] -> CgInfoDownwards -> CgState -> ([CAddrMode], CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 122 _N_ _S_ "S" _N_ _N_ #-}
+getCAddrMode :: Id -> CgInfoDownwards -> CgState -> (CAddrMode, CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 122 _N_ _S_ "U(LLLS)" {_A_ 4 _U_ 222222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getCAddrModeAndInfo :: Id -> CgInfoDownwards -> CgState -> ((CAddrMode, LambdaFormInfo), CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 122 _N_ _S_ "U(LLLS)" {_A_ 4 _U_ 222222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getCAddrModeIfVolatile :: Id -> CgInfoDownwards -> CgState -> (Labda CAddrMode, CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 122 _N_ _S_ "U(LLLS)" {_A_ 4 _U_ 222222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getVolatileRegs :: UniqFM Id -> CgInfoDownwards -> CgState -> ([MagicId], CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 222 _N_ _N_ _N_ _N_ #-}
+heapIdInfo :: Id -> HeapOffset -> LambdaFormInfo -> CgIdInfo
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _N_ _N_ _N_ #-}
+idInfoToAmode :: PrimKind -> CgIdInfo -> CgInfoDownwards -> CgState -> (CAddrMode, CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 2122 _N_ _S_ "LU(ASLA)" {_A_ 5 _U_ 21122 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+letNoEscapeIdInfo :: Id -> Int -> Int -> LambdaFormInfo -> CgIdInfo
+ {-# GHC_PRAGMA _A_ 4 _U_ 2222 _N_ _N_ _N_ _N_ #-}
+maybeAStkLoc :: StableLoc -> Labda Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _F_ _IF_ARGS_ 0 1 C 8 \ (u0 :: StableLoc) -> case u0 of { _ALG_ _ORIG_ CgBindery VirAStkLoc (u1 :: Int) -> _!_ _ORIG_ Maybes Ni [Int] [u1]; (u2 :: StableLoc) -> _!_ _ORIG_ Maybes Hamna [Int] [] } _N_ #-}
+maybeBStkLoc :: StableLoc -> Labda Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _F_ _IF_ARGS_ 0 1 C 8 \ (u0 :: StableLoc) -> case u0 of { _ALG_ _ORIG_ CgBindery VirBStkLoc (u1 :: Int) -> _!_ _ORIG_ Maybes Ni [Int] [u1]; (u2 :: StableLoc) -> _!_ _ORIG_ Maybes Hamna [Int] [] } _N_ #-}
+newTempAmodeAndIdInfo :: Id -> LambdaFormInfo -> (CAddrMode, CgIdInfo)
+ {-# GHC_PRAGMA _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+nukeVolatileBinds :: UniqFM CgIdInfo -> UniqFM CgIdInfo
+ {-# GHC_PRAGMA _A_ 1 _U_ 2 _N_ _S_ "S" _N_ _N_ #-}
+rebindToAStack :: Id -> Int -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2201 _N_ _S_ "LLAU(LLL)" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+rebindToBStack :: Id -> Int -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2201 _N_ _S_ "LLAU(LLL)" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+stableAmodeIdInfo :: Id -> CAddrMode -> LambdaFormInfo -> CgIdInfo
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _N_ _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgBindery.lhs b/ghc/compiler/codeGen/CgBindery.lhs
new file mode 100644
index 0000000000..fbc2fc9e21
--- /dev/null
+++ b/ghc/compiler/codeGen/CgBindery.lhs
@@ -0,0 +1,416 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[CgBindery]{Utility functions related to doing @CgBindings@}
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgBindery (
+ CgBindings(..), CgIdInfo(..){-dubiously concrete-},
+ StableLoc, VolatileLoc, LambdaFormInfo{-re-exported-},
+
+ maybeAStkLoc, maybeBStkLoc,
+
+ stableAmodeIdInfo, heapIdInfo, newTempAmodeAndIdInfo,
+ letNoEscapeIdInfo, idInfoToAmode,
+
+ nukeVolatileBinds,
+
+ bindNewToAStack, bindNewToBStack,
+ bindNewToNode, bindNewToReg, bindArgsToRegs,
+--UNUSED: bindNewToSameAsOther,
+ bindNewToTemp, bindNewPrimToAmode,
+ getAtomAmode, getAtomAmodes,
+ getCAddrModeAndInfo, getCAddrMode,
+ getCAddrModeIfVolatile, getVolatileRegs,
+ rebindToAStack, rebindToBStack,
+--UNUSED: rebindToTemp,
+
+ -- and to make a self-sufficient interface...
+ AbstractC, CAddrMode, HeapOffset, MagicId, CLabel, CgState,
+ BasicLit, IdEnv(..), UniqFM,
+ Id, Maybe, Unique, StgAtom, UniqSet(..)
+ ) where
+
+IMPORT_Trace -- ToDo: rm (debugging only)
+import Outputable
+import Unpretty
+import PprAbsC
+
+import AbsCSyn
+import CgMonad
+
+import CgUsages ( getHpRelOffset, getSpARelOffset, getSpBRelOffset )
+import CLabelInfo ( mkClosureLabel, CLabel )
+import ClosureInfo
+import Id ( getIdKind, toplevelishId, isDataCon, Id )
+import IdEnv -- used to build CgBindings
+import Maybes ( catMaybes, Maybe(..) )
+import UniqSet -- ( setToList )
+import StgSyn
+import Util
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[Bindery-datatypes]{Data types}
+%* *
+%************************************************************************
+
+@(CgBinding a b)@ is a type of finite maps from a to b.
+
+The assumption used to be that @lookupCgBind@ must get exactly one
+match. This is {\em completely wrong} in the case of compiling
+letrecs (where knot-tying is used). An initial binding is fed in (and
+never evaluated); eventually, a correct binding is put into the
+environment. So there can be two bindings for a given name.
+
+\begin{code}
+type CgBindings = IdEnv CgIdInfo
+
+data CgIdInfo
+ = MkCgIdInfo Id -- Id that this is the info for
+ VolatileLoc
+ StableLoc
+ LambdaFormInfo
+
+data VolatileLoc
+ = NoVolatileLoc
+ | TempVarLoc Unique
+
+ | RegLoc MagicId -- in one of the magic registers
+ -- (probably {Int,Float,Char,etc}Reg
+
+ | VirHpLoc VirtualHeapOffset -- Hp+offset (address of closure)
+
+ | VirNodeLoc VirtualHeapOffset -- Cts of offset indirect from Node
+ -- ie *(Node+offset)
+
+data StableLoc
+ = NoStableLoc
+ | VirAStkLoc VirtualSpAOffset
+ | VirBStkLoc VirtualSpBOffset
+ | LitLoc BasicLit
+ | StableAmodeLoc CAddrMode
+
+-- these are so StableLoc can be abstract:
+
+maybeAStkLoc (VirAStkLoc offset) = Just offset
+maybeAStkLoc _ = Nothing
+
+maybeBStkLoc (VirBStkLoc offset) = Just offset
+maybeBStkLoc _ = Nothing
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[Bindery-idInfo]{Manipulating IdInfo}
+%* *
+%************************************************************************
+
+\begin{code}
+stableAmodeIdInfo i amode lf_info = MkCgIdInfo i NoVolatileLoc (StableAmodeLoc amode) lf_info
+heapIdInfo i offset lf_info = MkCgIdInfo i (VirHpLoc offset) NoStableLoc lf_info
+tempIdInfo i uniq lf_info = MkCgIdInfo i (TempVarLoc uniq) NoStableLoc lf_info
+
+letNoEscapeIdInfo i spa spb lf_info
+ = MkCgIdInfo i NoVolatileLoc (StableAmodeLoc (CJoinPoint spa spb)) lf_info
+
+newTempAmodeAndIdInfo :: Id -> LambdaFormInfo -> (CAddrMode, CgIdInfo)
+
+newTempAmodeAndIdInfo name lf_info
+ = (temp_amode, temp_idinfo)
+ where
+ uniq = getTheUnique name
+ temp_amode = CTemp uniq (getIdKind name)
+ temp_idinfo = tempIdInfo name uniq lf_info
+
+idInfoToAmode :: PrimKind -> CgIdInfo -> FCode CAddrMode
+idInfoToAmode kind (MkCgIdInfo _ vol stab _) = idInfoPiecesToAmode kind vol stab
+
+idInfoPiecesToAmode :: PrimKind -> VolatileLoc -> StableLoc -> FCode CAddrMode
+
+idInfoPiecesToAmode kind (TempVarLoc uniq) stable_loc = returnFC (CTemp uniq kind)
+idInfoPiecesToAmode kind (RegLoc magic_id) stable_loc = returnFC (CReg magic_id)
+
+idInfoPiecesToAmode kind NoVolatileLoc (LitLoc lit) = returnFC (CLit lit)
+idInfoPiecesToAmode kind NoVolatileLoc (StableAmodeLoc amode) = returnFC amode
+
+idInfoPiecesToAmode kind (VirNodeLoc nd_off) stable_loc
+ = returnFC (CVal (NodeRel nd_off) kind)
+ -- Virtual offsets from Node increase into the closures,
+ -- and so do Node-relative offsets (which we want in the CVal),
+ -- so there is no mucking about to do to the offset.
+
+idInfoPiecesToAmode kind (VirHpLoc hp_off) stable_loc
+ = getHpRelOffset hp_off `thenFC` \ rel_hp ->
+ returnFC (CAddr rel_hp)
+
+idInfoPiecesToAmode kind NoVolatileLoc (VirAStkLoc i)
+ = getSpARelOffset i `thenFC` \ rel_spA ->
+ returnFC (CVal rel_spA kind)
+
+idInfoPiecesToAmode kind NoVolatileLoc (VirBStkLoc i)
+ = getSpBRelOffset i `thenFC` \ rel_spB ->
+ returnFC (CVal rel_spB kind)
+
+idInfoPiecesToAmode kind NoVolatileLoc NoStableLoc = panic "idInfoPiecesToAmode: no loc"
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[Bindery-nuke-volatile]{Nuking volatile bindings}
+%* *
+%************************************************************************
+
+We sometimes want to nuke all the volatile bindings; we must be sure
+we don't leave any (NoVolatile, NoStable) binds around...
+
+\begin{code}
+nukeVolatileBinds :: CgBindings -> CgBindings
+nukeVolatileBinds binds
+ = mkIdEnv (foldr keep_if_stable [] (rngIdEnv binds))
+ where
+ keep_if_stable (MkCgIdInfo i _ NoStableLoc entry_info) acc = acc
+ keep_if_stable (MkCgIdInfo i _ stable_loc entry_info) acc
+ = (i, MkCgIdInfo i NoVolatileLoc stable_loc entry_info) : acc
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[lookup-interface]{Interface functions to looking up bindings}
+%* *
+%************************************************************************
+
+I {\em think} all looking-up is done through @getCAddrMode(s)@.
+
+\begin{code}
+getCAddrModeAndInfo :: Id -> FCode (CAddrMode, LambdaFormInfo)
+
+getCAddrModeAndInfo name
+ | not (isLocallyDefined name)
+ = returnFC (global_amode, mkLFImported name)
+
+ | isDataCon name
+ = returnFC (global_amode, mkConLFInfo name)
+
+ | otherwise = -- *might* be a nested defn: in any case, it's something whose
+ -- definition we will know about...
+ lookupBindC name `thenFC` \ (MkCgIdInfo _ volatile_loc stable_loc lf_info) ->
+ idInfoPiecesToAmode kind volatile_loc stable_loc `thenFC` \ amode ->
+ returnFC (amode, lf_info)
+ where
+ global_amode = CLbl (mkClosureLabel name) kind
+ kind = getIdKind name
+
+getCAddrMode :: Id -> FCode CAddrMode
+getCAddrMode name
+ = getCAddrModeAndInfo name `thenFC` \ (amode, _) ->
+ returnFC amode
+\end{code}
+
+\begin{code}
+getCAddrModeIfVolatile :: Id -> FCode (Maybe CAddrMode)
+getCAddrModeIfVolatile name
+ | toplevelishId name = returnFC Nothing
+ | otherwise
+ = lookupBindC name `thenFC` \ ~(MkCgIdInfo _ volatile_loc stable_loc lf_info) ->
+ case stable_loc of
+ NoStableLoc -> -- Aha! So it is volatile!
+ idInfoPiecesToAmode (getIdKind name) volatile_loc NoStableLoc `thenFC` \ amode ->
+ returnFC (Just amode)
+
+ a_stable_loc -> returnFC Nothing
+\end{code}
+
+@getVolatileRegs@ gets a set of live variables, and returns a list of
+all registers on which these variables depend. These are the regs
+which must be saved and restored across any C calls. If a variable is
+both in a volatile location (depending on a register) {\em and} a
+stable one (notably, on the stack), we modify the current bindings to
+forget the volatile one.
+
+\begin{code}
+getVolatileRegs :: PlainStgLiveVars -> FCode [MagicId]
+
+getVolatileRegs vars
+ = mapFCs snaffle_it (uniqSetToList vars) `thenFC` \ stuff ->
+ returnFC (catMaybes stuff)
+ where
+ snaffle_it var
+ = lookupBindC var `thenFC` \ (MkCgIdInfo _ volatile_loc stable_loc lf_info) ->
+ let
+ -- commoned-up code...
+ consider_reg reg
+ = if not (isVolatileReg reg) then
+ -- Potentially dies across C calls
+ -- For now, that's everything; we leave
+ -- it to the save-macros to decide which
+ -- regs *really* need to be saved.
+ returnFC Nothing
+ else
+ case stable_loc of
+ NoStableLoc -> returnFC (Just reg) -- got one!
+ is_a_stable_loc ->
+ -- has both volatile & stable locations;
+ -- force it to rely on the stable location
+ modifyBindC var nuke_vol_bind `thenC`
+ returnFC Nothing
+ in
+ case volatile_loc of
+ RegLoc reg -> consider_reg reg
+ VirHpLoc _ -> consider_reg Hp
+ VirNodeLoc _ -> consider_reg node
+ non_reg_loc -> returnFC Nothing
+
+ nuke_vol_bind (MkCgIdInfo i _ stable_loc lf_info)
+ = MkCgIdInfo i NoVolatileLoc stable_loc lf_info
+\end{code}
+
+\begin{code}
+getAtomAmodes :: [PlainStgAtom] -> FCode [CAddrMode]
+getAtomAmodes [] = returnFC []
+getAtomAmodes (atom:atoms)
+ = getAtomAmode atom `thenFC` \ amode ->
+ getAtomAmodes atoms `thenFC` \ amodes ->
+ returnFC ( amode : amodes )
+
+getAtomAmode :: PlainStgAtom -> FCode CAddrMode
+
+getAtomAmode (StgVarAtom var) = getCAddrMode var
+getAtomAmode (StgLitAtom lit) = returnFC (CLit lit)
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[binding-and-rebinding-interface]{Interface functions for binding and re-binding names}
+%* *
+%************************************************************************
+
+\begin{code}
+bindNewToAStack :: (Id, VirtualSpAOffset) -> Code
+bindNewToAStack (name, offset)
+ = addBindC name info
+ where
+ info = MkCgIdInfo name NoVolatileLoc (VirAStkLoc offset) mkLFArgument
+
+bindNewToBStack :: (Id, VirtualSpBOffset) -> Code
+bindNewToBStack (name, offset)
+ = addBindC name info
+ where
+ info = MkCgIdInfo name NoVolatileLoc (VirBStkLoc offset) (panic "bindNewToBStack")
+ -- B-stack things shouldn't need lambda-form info!
+
+bindNewToNode :: Id -> VirtualHeapOffset -> LambdaFormInfo -> Code
+bindNewToNode name offset lf_info
+ = addBindC name info
+ where
+ info = MkCgIdInfo name (VirNodeLoc offset) NoStableLoc lf_info
+
+-- Create a new temporary whose unique is that in the id,
+-- bind the id to it, and return the addressing mode for the
+-- temporary.
+bindNewToTemp :: Id -> FCode CAddrMode
+bindNewToTemp name
+ = let (temp_amode, id_info) = newTempAmodeAndIdInfo name mkLFArgument
+ -- This is used only for things we don't know
+ -- anything about; values returned by a case statement,
+ -- for example.
+ in
+ addBindC name id_info `thenC`
+ returnFC temp_amode
+
+bindNewToReg :: Id -> MagicId -> LambdaFormInfo -> Code
+bindNewToReg name magic_id lf_info
+ = addBindC name info
+ where
+ info = MkCgIdInfo name (RegLoc magic_id) NoStableLoc lf_info
+
+bindNewToLit name lit
+ = addBindC name info
+ where
+ info = MkCgIdInfo name NoVolatileLoc (LitLoc lit) (error "bindNewToLit")
+
+bindArgsToRegs :: [Id] -> [MagicId] -> Code
+bindArgsToRegs args regs
+ = listCs (zipWith bind args regs)
+ where
+ arg `bind` reg = bindNewToReg arg reg mkLFArgument
+
+{- UNUSED:
+bindNewToSameAsOther :: Id -> PlainStgAtom -> Code
+bindNewToSameAsOther name (StgVarAtom old_name)
+#ifdef DEBUG
+ | toplevelishId old_name = panic "bindNewToSameAsOther: global old name"
+ | otherwise
+#endif
+ = lookupBindC old_name `thenFC` \ old_stuff ->
+ addBindC name old_stuff
+
+bindNewToSameAsOther name (StgLitAtom lit)
+ = addBindC name info
+ where
+ info = MkCgIdInfo name NoVolatileLoc (LitLoc lit) (panic "bindNewToSameAsOther")
+-}
+\end{code}
+
+@bindNewPrimToAmode@ works only for certain addressing modes, because
+those are the only ones we've needed so far!
+
+\begin{code}
+bindNewPrimToAmode :: Id -> CAddrMode -> Code
+bindNewPrimToAmode name (CReg reg) = bindNewToReg name reg (panic "bindNewPrimToAmode")
+ -- was: mkLFArgument
+ -- LFinfo is irrelevant for primitives
+bindNewPrimToAmode name (CTemp uniq kind)
+ = addBindC name (tempIdInfo name uniq (panic "bindNewPrimToAmode"))
+ -- LFinfo is irrelevant for primitives
+
+bindNewPrimToAmode name (CLit lit) = bindNewToLit name lit
+
+bindNewPrimToAmode name (CVal (SpBRel _ offset) _)
+ = bindNewToBStack (name, offset)
+
+bindNewPrimToAmode name (CVal (NodeRel offset) _)
+ = bindNewToNode name offset (panic "bindNewPrimToAmode node")
+ -- See comment on idInfoPiecesToAmode for VirNodeLoc
+
+#ifdef DEBUG
+bindNewPrimToAmode name amode
+ = panic ("bindNew...:"++(uppShow 80 (pprAmode PprDebug amode)))
+#endif
+\end{code}
+
+\begin{code}
+rebindToAStack :: Id -> VirtualSpAOffset -> Code
+rebindToAStack name offset
+ = modifyBindC name replace_stable_fn
+ where
+ replace_stable_fn (MkCgIdInfo i vol stab einfo)
+ = MkCgIdInfo i vol (VirAStkLoc offset) einfo
+
+rebindToBStack :: Id -> VirtualSpBOffset -> Code
+rebindToBStack name offset
+ = modifyBindC name replace_stable_fn
+ where
+ replace_stable_fn (MkCgIdInfo i vol stab einfo)
+ = MkCgIdInfo i vol (VirBStkLoc offset) einfo
+
+{- UNUSED:
+rebindToTemp :: Id -> FCode CAddrMode
+rebindToTemp name
+ = let
+ (temp_amode, MkCgIdInfo _ new_vol _ _ {-LF info discarded-})
+ = newTempAmodeAndIdInfo name (panic "rebindToTemp")
+ in
+ modifyBindC name (replace_volatile_fn new_vol) `thenC`
+ returnFC temp_amode
+ where
+ replace_volatile_fn new_vol (MkCgIdInfo i vol stab einfo)
+ = MkCgIdInfo i new_vol stab einfo
+-}
+\end{code}
+
diff --git a/ghc/compiler/codeGen/CgCase.hi b/ghc/compiler/codeGen/CgCase.hi
new file mode 100644
index 0000000000..9a2ce69973
--- /dev/null
+++ b/ghc/compiler/codeGen/CgCase.hi
@@ -0,0 +1,25 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgCase where
+import AbsCSyn(AbstractC)
+import BasicLit(BasicLit)
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, EndOfBlockInfo, StubFlag)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import PrimOps(PrimOp)
+import StgSyn(StgAtom, StgBinding, StgCaseAlternatives, StgCaseDefault, StgExpr)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data StgCaseAlternatives a b {-# GHC_PRAGMA StgAlgAlts UniType [(Id, [a], [Bool], StgExpr a b)] (StgCaseDefault a b) | StgPrimAlts UniType [(BasicLit, StgExpr a b)] (StgCaseDefault a b) #-}
+data StgExpr a b {-# GHC_PRAGMA StgApp (StgAtom b) [StgAtom b] (UniqFM b) | StgConApp Id [StgAtom b] (UniqFM b) | StgPrimApp PrimOp [StgAtom b] (UniqFM b) | StgCase (StgExpr a b) (UniqFM b) (UniqFM b) Unique (StgCaseAlternatives a b) | StgLet (StgBinding a b) (StgExpr a b) | StgLetNoEscape (UniqFM b) (UniqFM b) (StgBinding a b) (StgExpr a b) | StgSCC UniType CostCentre (StgExpr a b) #-}
+cgCase :: StgExpr Id Id -> UniqFM Id -> UniqFM Id -> Unique -> StgCaseAlternatives Id Id -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 5 _U_ 2222222 _N_ _S_ "SLLLL" _N_ _N_ #-}
+saveVolatileVarsAndRegs :: UniqFM Id -> CgInfoDownwards -> CgState -> ((AbstractC, EndOfBlockInfo, Labda Int), CgState)
+ {-# GHC_PRAGMA _A_ 1 _U_ 222 _N_ _N_ _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgCase.lhs b/ghc/compiler/codeGen/CgCase.lhs
new file mode 100644
index 0000000000..1cd7696a11
--- /dev/null
+++ b/ghc/compiler/codeGen/CgCase.lhs
@@ -0,0 +1,1107 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+%********************************************************
+%* *
+\section[CgCase]{Converting @StgCase@ expressions}
+%* *
+%********************************************************
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgCase (
+ cgCase,
+ saveVolatileVarsAndRegs,
+
+ -- and to make the interface self-sufficient...
+ StgExpr, Id, StgCaseAlternatives, CgState
+ ) where
+
+IMPORT_Trace -- ToDo: rm (debugging)
+import Outputable
+import Pretty
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import AbsPrel ( PrimOp(..), primOpCanTriggerGC
+ IF_ATTACK_PRAGMAS(COMMA tagOf_PrimOp)
+ IF_ATTACK_PRAGMAS(COMMA pprPrimOp)
+ )
+import AbsUniType ( kindFromType, getTyConDataCons,
+ getUniDataSpecTyCon, getUniDataSpecTyCon_maybe,
+ isEnumerationTyCon,
+ UniType
+ )
+import CgBindery -- all of it
+import CgCon ( buildDynCon, bindConArgs )
+import CgExpr ( cgExpr, getPrimOpArgAmodes )
+import CgHeapery ( heapCheck )
+import CgRetConv -- lots of stuff
+import CgStackery -- plenty
+import CgTailCall ( tailCallBusiness, performReturn )
+import CgUsages -- and even more
+import CLabelInfo -- bunches of things...
+import ClosureInfo {-( blackHoleClosureInfo, mkConLFInfo, mkLFArgument,
+ layOutDynCon
+ )-}
+import CmdLineOpts ( GlobalSwitch(..) )
+import CostCentre ( useCurrentCostCentre, CostCentre )
+import BasicLit ( kindOfBasicLit )
+import Id ( getDataConTag, getIdKind, fIRST_TAG, isDataCon,
+ toplevelishId, getInstantiatedDataConSig,
+ ConTag(..), DataCon(..)
+ )
+import Maybes ( catMaybes, Maybe(..) )
+import PrimKind ( getKindSize, isFollowableKind, retKindSize, PrimKind(..) )
+import UniqSet -- ( uniqSetToList, UniqSet(..) )
+import Util
+\end{code}
+
+\begin{code}
+data GCFlag
+ = GCMayHappen -- The scrutinee may involve GC, so everything must be
+ -- tidy before the code for the scrutinee.
+
+ | NoGC -- The scrutinee is a primitive value, or a call to a
+ -- primitive op which does no GC. Hence the case can
+ -- be done inline, without tidying up first.
+\end{code}
+
+It is quite interesting to decide whether to put a heap-check
+at the start of each alternative. Of course we certainly have
+to do so if the case forces an evaluation, or if there is a primitive
+op which can trigger GC.
+
+A more interesting situation is this:
+
+\begin{verbatim}
+ !A!;
+ ...A...
+ case x# of
+ 0# -> !B!; ...B...
+ default -> !C!; ...C...
+\end{verbatim}
+
+where \tr{!x!} indicates a possible heap-check point. The heap checks
+in the alternatives {\em can} be omitted, in which case the topmost
+heapcheck will take their worst case into account.
+
+In favour of omitting \tr{!B!}, \tr{!C!}:
+
+\begin{itemize}
+\item
+{\em May} save a heap overflow test,
+ if ...A... allocates anything. The other advantage
+ of this is that we can use relative addressing
+ from a single Hp to get at all the closures so allocated.
+\item
+ No need to save volatile vars etc across the case
+\end{itemize}
+
+Against:
+
+\begin{itemize}
+\item
+ May do more allocation than reqd. This sometimes bites us
+ badly. For example, nfib (ha!) allocates about 30\% more space if the
+ worst-casing is done, because many many calls to nfib are leaf calls
+ which don't need to allocate anything.
+
+ This never hurts us if there is only one alternative.
+\end{itemize}
+
+
+*** NOT YET DONE *** The difficulty is that \tr{!B!}, \tr{!C!} need
+to take account of what is live, and that includes all live volatile
+variables, even if they also have stable analogues. Furthermore, the
+stack pointers must be lined up properly so that GC sees tidy stacks.
+If these things are done, then the heap checks can be done at \tr{!B!} and
+\tr{!C!} without a full save-volatile-vars sequence.
+
+\begin{code}
+cgCase :: PlainStgExpr
+ -> PlainStgLiveVars
+ -> PlainStgLiveVars
+ -> Unique
+ -> PlainStgCaseAlternatives
+ -> Code
+\end{code}
+
+Several special cases for primitive operations.
+
+******* TO DO TO DO: fix what follows
+
+Special case for
+
+ case (op x1 ... xn) of
+ y -> e
+
+where the type of the case scrutinee is a multi-constuctor algebraic type.
+Then we simply compile code for
+
+ let y = op x1 ... xn
+ in
+ e
+
+In this case:
+
+ case (op x1 ... xn) of
+ C a b -> ...
+ y -> e
+
+where the type of the case scrutinee is a multi-constuctor algebraic type.
+we just bomb out at the moment. It never happens in practice.
+
+**** END OF TO DO TO DO
+
+\begin{code}
+cgCase scrut@(StgPrimApp op args _) live_in_whole_case live_in_alts uniq
+ (StgAlgAlts _ alts (StgBindDefault id _ deflt_rhs))
+ = if not (null alts) then
+ panic "cgCase: case on PrimOp with default *and* alts\n"
+ -- For now, die if alts are non-empty
+ else
+#if 0
+ pprTrace "cgCase:prim app returning alg data type: bad code!" (ppr PprDebug scrut) $
+ -- See above TO DO TO DO
+#endif
+ cgExpr (StgLet (StgNonRec id scrut_rhs) deflt_rhs)
+ where
+ scrut_rhs = StgRhsClosure useCurrentCostCentre stgArgOcc{-safe-} scrut_free_vars
+ Updatable [] scrut
+ scrut_free_vars = [ fv | StgVarAtom fv <- args, not (toplevelishId fv) ]
+ -- Hack, hack
+\end{code}
+
+
+\begin{code}
+cgCase (StgPrimApp op args _) live_in_whole_case live_in_alts uniq alts
+ | not (primOpCanTriggerGC op)
+ =
+ -- Get amodes for the arguments and results
+ getPrimOpArgAmodes op args `thenFC` \ arg_amodes ->
+ let
+ result_amodes = getPrimAppResultAmodes uniq alts
+ liveness_mask = panic "cgCase: liveness of non-GC-ing primop touched\n"
+ in
+ -- Perform the operation
+ getVolatileRegs live_in_alts `thenFC` \ vol_regs ->
+
+ profCtrC SLIT("SET_ACTIVITY") [CLitLit SLIT("ACT_PRIM") IntKind] `thenC`
+
+ absC (COpStmt result_amodes op
+ arg_amodes -- note: no liveness arg
+ liveness_mask vol_regs) `thenC`
+
+ profCtrC SLIT("SET_ACTIVITY") [CLitLit SLIT("ACT_PRIM_STOP") IntKind] `thenC`
+
+ -- Scrutinise the result
+ cgInlineAlts NoGC uniq alts
+
+ | otherwise -- *Can* trigger GC
+ = getPrimOpArgAmodes op args `thenFC` \ arg_amodes ->
+
+ -- Get amodes for the arguments and results, and assign to regs
+ -- (Can-trigger-gc primops guarantee to have their (nonRobust)
+ -- args in regs)
+ let
+ op_result_regs = assignPrimOpResultRegs op
+
+ op_result_amodes = map CReg op_result_regs
+
+ (op_arg_amodes, liveness_mask, arg_assts)
+ = makePrimOpArgsRobust op arg_amodes
+
+ liveness_arg = mkIntCLit liveness_mask
+ in
+ -- Tidy up in case GC happens...
+
+ -- Nota Bene the use of live_in_whole_case in nukeDeadBindings.
+ -- Reason: the arg_assts computed above may refer to some stack slots
+ -- which are not live in the alts. So we mustn't use those slots
+ -- to save volatile vars in!
+ nukeDeadBindings live_in_whole_case `thenC`
+ saveVolatileVars live_in_alts `thenFC` \ volatile_var_save_assts ->
+
+ getEndOfBlockInfo `thenFC` \ eob_info ->
+ forkEval eob_info nopC
+ (getAbsC (cgInlineAlts GCMayHappen uniq alts) `thenFC` \ abs_c ->
+ absC (CRetUnVector vtbl_label (CLabelledCode return_label abs_c))
+ `thenC`
+ returnFC (CaseAlts (CUnVecLbl return_label vtbl_label)
+ Nothing{-no semi-tagging-}))
+ `thenFC` \ new_eob_info ->
+
+ -- Record the continuation info
+ setEndOfBlockInfo new_eob_info (
+
+ -- Now "return" to the inline alternatives; this will get
+ -- compiled to a fall-through.
+ let
+ simultaneous_assts = arg_assts `mkAbsCStmts` volatile_var_save_assts
+
+ -- do_op_and_continue will be passed an amode for the continuation
+ do_op_and_continue sequel
+ = profCtrC SLIT("SET_ACTIVITY") [CLitLit SLIT("ACT_PRIM") IntKind] `thenC`
+
+ absC (COpStmt op_result_amodes
+ op
+ (pin_liveness op liveness_arg op_arg_amodes)
+ liveness_mask
+ [{-no vol_regs-}])
+ `thenC`
+
+ profCtrC SLIT("SET_ACTIVITY") [CLitLit SLIT("ACT_PRIM_STOP") IntKind] `thenC`
+
+ sequelToAmode sequel `thenFC` \ dest_amode ->
+ absC (CReturn dest_amode DirectReturn)
+
+ -- Note: we CJump even for algebraic data types,
+ -- because cgInlineAlts always generates code, never a
+ -- vector.
+ in
+ performReturn simultaneous_assts do_op_and_continue live_in_alts
+ )
+ where
+ -- for all PrimOps except ccalls, we pin the liveness info
+ -- on as the first "argument"
+ -- ToDo: un-duplicate?
+
+ pin_liveness (CCallOp _ _ _ _ _) _ args = args
+ pin_liveness other_op liveness_arg args
+ = liveness_arg :args
+
+ vtbl_label = mkVecTblLabel uniq
+ return_label = mkReturnPtLabel uniq
+
+\end{code}
+
+Another special case: scrutinising a primitive-typed variable. No
+evaluation required. We don't save volatile variables, nor do we do a
+heap-check in the alternatives. Instead, the heap usage of the
+alternatives is worst-cased and passed upstream. This can result in
+allocating more heap than strictly necessary, but it will sometimes
+eliminate a heap check altogether.
+
+\begin{code}
+cgCase (StgApp v [] _) live_in_whole_case live_in_alts uniq (StgPrimAlts ty alts deflt)
+ = getAtomAmode v `thenFC` \ amode ->
+ cgPrimAltsGivenScrutinee NoGC amode alts deflt
+\end{code}
+
+Special case: scrutinising a non-primitive variable.
+This can be done a little better than the general case, because
+we can reuse/trim the stack slot holding the variable (if it is in one).
+
+\begin{code}
+cgCase (StgApp (StgVarAtom fun) args _ {-lvs must be same as live_in_alts-})
+ live_in_whole_case live_in_alts uniq alts@(StgAlgAlts _ _ _)
+ =
+ getCAddrModeAndInfo fun `thenFC` \ (fun_amode, lf_info) ->
+ getAtomAmodes args `thenFC` \ arg_amodes ->
+
+ -- Squish the environment
+ nukeDeadBindings live_in_alts `thenC`
+ saveVolatileVarsAndRegs live_in_alts
+ `thenFC` \ (save_assts, alts_eob_info, maybe_cc_slot) ->
+
+ forkEval alts_eob_info
+ nopC (cgEvalAlts maybe_cc_slot uniq alts) `thenFC` \ scrut_eob_info ->
+ setEndOfBlockInfo scrut_eob_info (
+ tailCallBusiness fun fun_amode lf_info arg_amodes live_in_alts save_assts
+ )
+
+\end{code}
+
+Finally, here is the general case.
+
+\begin{code}
+cgCase expr live_in_whole_case live_in_alts uniq alts
+ = -- Figure out what volatile variables to save
+ nukeDeadBindings live_in_whole_case `thenC`
+ saveVolatileVarsAndRegs live_in_alts
+ `thenFC` \ (save_assts, alts_eob_info, maybe_cc_slot) ->
+
+ -- Save those variables right now!
+ absC save_assts `thenC`
+
+ forkEval alts_eob_info
+ (nukeDeadBindings live_in_alts)
+ (cgEvalAlts maybe_cc_slot uniq alts) `thenFC` \ scrut_eob_info ->
+
+ setEndOfBlockInfo scrut_eob_info (cgExpr expr)
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgCase-primops]{Primitive applications}
+%* *
+%************************************************************************
+
+Get result amodes for a primitive operation, in the case wher GC can't happen.
+The amodes are returned in canonical order, ready for the prim-op!
+
+ Alg case: temporaries named as in the alternatives,
+ plus (CTemp u) for the tag (if needed)
+ Prim case: (CTemp u)
+
+This is all disgusting, because these amodes must be consistent with those
+invented by CgAlgAlts.
+
+\begin{code}
+getPrimAppResultAmodes
+ :: Unique
+ -> PlainStgCaseAlternatives
+ -> [CAddrMode]
+\end{code}
+
+\begin{code}
+-- If there's an StgBindDefault which does use the bound
+-- variable, then we can only handle it if the type involved is
+-- an enumeration type. That's important in the case
+-- of comparisions:
+--
+-- case x ># y of
+-- r -> f r
+--
+-- The only reason for the restriction to *enumeration* types is our
+-- inability to invent suitable temporaries to hold the results;
+-- Elaborating the CTemp addr mode to have a second uniq field
+-- (which would simply count from 1) would solve the problem.
+-- Anyway, cgInlineAlts is now capable of handling all cases;
+-- it's only this function which is being wimpish.
+
+getPrimAppResultAmodes uniq (StgAlgAlts ty alts (StgBindDefault _ True {- used -} _))
+ | isEnumerationTyCon spec_tycon = [tag_amode]
+ | otherwise = panic "getPrimAppResultAmodes: non-enumeration algebraic alternatives with default"
+ where
+ -- A temporary variable to hold the tag; this is unaffected by GC because
+ -- the heap-checks in the branches occur after the switch
+ tag_amode = CTemp uniq IntKind
+ (spec_tycon, _, _) = getUniDataSpecTyCon ty
+
+getPrimAppResultAmodes uniq (StgAlgAlts ty alts other_default)
+ -- Default is either StgNoDefault or StgBindDefault with unused binder
+ = case alts of
+ [_] -> arg_amodes -- No need for a tag
+ other -> tag_amode : arg_amodes
+ where
+ -- A temporary variable to hold the tag; this is unaffected by GC because
+ -- the heap-checks in the branches occur after the switch
+ tag_amode = CTemp uniq IntKind
+
+ -- Sort alternatives into canonical order; there must be a complete
+ -- set because there's no default case.
+ sorted_alts = sortLt lt alts
+ (con1,_,_,_) `lt` (con2,_,_,_) = getDataConTag con1 < getDataConTag con2
+
+ arg_amodes :: [CAddrMode]
+
+ -- Turn them into amodes
+ arg_amodes = concat (map mk_amodes sorted_alts)
+ mk_amodes (con, args, use_mask, rhs)
+ = [ CTemp (getTheUnique arg) (getIdKind arg) | arg <- args ]
+\end{code}
+
+The situation is simpler for primitive
+results, because there is only one!
+
+\begin{code}
+getPrimAppResultAmodes uniq (StgPrimAlts ty _ _)
+ = [CTemp uniq kind]
+ where
+ kind = kindFromType ty
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[CgCase-alts]{Alternatives}
+%* *
+%************************************************************************
+
+@cgEvalAlts@ returns an addressing mode for a continuation for the
+alternatives of a @case@, used in a context when there
+is some evaluation to be done.
+
+\begin{code}
+cgEvalAlts :: Maybe VirtualSpBOffset -- Offset of cost-centre to be restored, if any
+ -> Unique
+ -> PlainStgCaseAlternatives
+ -> FCode Sequel -- Any addr modes inside are guaranteed to be a label
+ -- so that we can duplicate it without risk of
+ -- duplicating code
+
+cgEvalAlts cc_slot uniq (StgAlgAlts ty alts deflt)
+ = -- Generate the instruction to restore cost centre, if any
+ restoreCurrentCostCentre cc_slot `thenFC` \ cc_restore ->
+
+ -- Generate sequel info for use downstream
+ -- At the moment, we only do it if the type is vector-returnable.
+ -- Reason: if not, then it costs extra to label the
+ -- alternatives, because we'd get return code like:
+ --
+ -- switch TagReg { 0 : JMP(alt_1); 1 : JMP(alt_2) ..etc }
+ --
+ -- which is worse than having the alt code in the switch statement
+
+ let
+ (spec_tycon, _, _) = getUniDataSpecTyCon ty
+
+ use_labelled_alts
+ = case ctrlReturnConvAlg spec_tycon of
+ VectoredReturn _ -> True
+ _ -> False
+
+ semi_tagged_stuff
+ = if not use_labelled_alts then
+ Nothing -- no semi-tagging info
+ else
+ cgSemiTaggedAlts uniq alts deflt -- Just <something>
+ in
+ cgAlgAlts GCMayHappen uniq cc_restore use_labelled_alts ty alts deflt
+ `thenFC` \ (tagged_alt_absCs, deflt_absC) ->
+
+ mkReturnVector uniq ty tagged_alt_absCs deflt_absC `thenFC` \ return_vec ->
+
+ returnFC (CaseAlts return_vec semi_tagged_stuff)
+
+cgEvalAlts cc_slot uniq (StgPrimAlts ty alts deflt)
+ = -- Generate the instruction to restore cost centre, if any
+ restoreCurrentCostCentre cc_slot `thenFC` \ cc_restore ->
+
+ -- Generate the switch
+ getAbsC (cgPrimAlts GCMayHappen uniq ty alts deflt) `thenFC` \ abs_c ->
+
+ -- Generate the labelled block, starting with restore-cost-centre
+ absC (CRetUnVector vtbl_label
+ (CLabelledCode return_label (cc_restore `mkAbsCStmts` abs_c)))
+ `thenC`
+ -- Return an amode for the block
+ returnFC (CaseAlts (CUnVecLbl return_label vtbl_label) Nothing{-no semi-tagging-})
+ where
+ vtbl_label = mkVecTblLabel uniq
+ return_label = mkReturnPtLabel uniq
+\end{code}
+
+
+\begin{code}
+cgInlineAlts :: GCFlag -> Unique
+ -> PlainStgCaseAlternatives
+ -> Code
+\end{code}
+
+First case: algebraic case, exactly one alternative, no default.
+In this case the primitive op will not have set a temporary to the
+tag, so we shouldn't generate a switch statment. Instead we just
+do the right thing.
+
+\begin{code}
+cgInlineAlts gc_flag uniq (StgAlgAlts ty [alt@(con,args,use_mask,rhs)] StgNoDefault)
+ = cgAlgAltRhs gc_flag con args use_mask rhs
+\end{code}
+
+Second case: algebraic case, several alternatives.
+Tag is held in a temporary.
+
+\begin{code}
+cgInlineAlts gc_flag uniq (StgAlgAlts ty alts deflt)
+ = cgAlgAlts gc_flag uniq AbsCNop{-restore_cc-} False{-no semi-tagging-}
+ ty alts deflt `thenFC` \ (tagged_alts, deflt_c) ->
+
+ -- Do the switch
+ absC (mkAlgAltsCSwitch tag_amode tagged_alts deflt_c)
+ where
+ -- A temporary variable to hold the tag; this is unaffected by GC because
+ -- the heap-checks in the branches occur after the switch
+ tag_amode = CTemp uniq IntKind
+\end{code}
+
+=========== OLD: we *can* now handle this case ================
+
+Next, a case we can't deal with: an algebraic case with no evaluation
+required (so it is in-line), and a default case as well. In this case
+we require all the alternatives written out, so that we can invent
+suitable binders to pass to the PrimOp. A default case defeats this.
+Could be fixed, but probably isn't worth it.
+
+\begin{code}
+{- ============= OLD
+cgInlineAlts gc_flag uniq (StgAlgAlts ty alts other_default)
+ = panic "cgInlineAlts: alg alts with default"
+================= END OF OLD -}
+\end{code}
+
+Third (real) case: primitive result type.
+
+\begin{code}
+cgInlineAlts gc_flag uniq (StgPrimAlts ty alts deflt)
+ = cgPrimAlts gc_flag uniq ty alts deflt
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[CgCase-alg-alts]{Algebraic alternatives}
+%* *
+%************************************************************************
+
+In @cgAlgAlts@, none of the binders in the alternatives are
+assumed to be yet bound.
+
+\begin{code}
+cgAlgAlts :: GCFlag
+ -> Unique
+ -> AbstractC -- Restore-cost-centre instruction
+ -> Bool -- True <=> branches must be labelled
+ -> UniType -- From the case statement
+ -> [(Id, [Id], [Bool], PlainStgExpr)] -- The alternatives
+ -> PlainStgCaseDefault -- The default
+ -> FCode ([(ConTag, AbstractC)], -- The branches
+ AbstractC -- The default case
+ )
+\end{code}
+
+The case with a default which has a binder is different. We need to
+pick all the constructors which aren't handled explicitly by an
+alternative, and which return their results in registers, allocate
+them explicitly in the heap, and jump to a join point for the default
+case.
+
+OLD: All of this only works if a heap-check is required anyway, because
+otherwise it isn't safe to allocate.
+
+NEW (July 94): now false! It should work regardless of gc_flag,
+because of the extra_branches argument now added to forkAlts.
+
+We put a heap-check at the join point, for the benefit of constructors
+which don't need to do allocation. This means that ones which do need
+to allocate may end up doing two heap-checks; but that's just too bad.
+(We'd need two join labels otherwise. ToDo.)
+
+It's all pretty turgid anyway.
+
+\begin{code}
+cgAlgAlts gc_flag uniq restore_cc semi_tagging
+ ty alts deflt@(StgBindDefault binder True{-used-} _)
+ = forkAlts (map (cgAlgAlt gc_flag uniq restore_cc semi_tagging) alts)
+ extra_branches
+ (cgAlgDefault gc_flag uniq restore_cc must_label_default deflt)
+ where
+ extra_branches :: [FCode (ConTag, AbstractC)]
+ extra_branches = catMaybes (map mk_extra_branch default_cons)
+
+ must_label_default = semi_tagging || not (null extra_branches)
+
+ default_join_lbl = mkDefaultLabel uniq
+ jump_instruction = CJump (CLbl default_join_lbl CodePtrKind)
+
+ (spec_tycon, _, spec_cons)
+ = -- trace ("cgCase:tycon:"++(ppShow 80 (ppAboves [
+ -- ppr PprDebug uniq,
+ -- ppr PprDebug ty,
+ -- ppr PprShowAll binder
+ -- ]))) (
+ getUniDataSpecTyCon ty
+ -- )
+
+ alt_cons = [ con | (con,_,_,_) <- alts ]
+
+ default_cons = [ spec_con | spec_con <- spec_cons, -- In this type
+ spec_con `not_elem` alt_cons ] -- Not handled explicitly
+ where
+ not_elem = isn'tIn "cgAlgAlts"
+
+ -- (mk_extra_branch con) returns the a maybe for the extra branch for con.
+ -- The "maybe" is because con may return in heap, in which case there is
+ -- nothing to do. Otherwise, we have a special case for a nullary constructor,
+ -- but in the general case we do an allocation and heap-check.
+
+ mk_extra_branch :: DataCon -> (Maybe (FCode (ConTag, AbstractC)))
+
+ mk_extra_branch con
+ = ASSERT(isDataCon con)
+ case dataReturnConvAlg con of
+ ReturnInHeap -> Nothing
+ ReturnInRegs rs -> Just (getAbsC (alloc_code rs) `thenFC` \ abs_c ->
+ returnFC (tag, abs_c)
+ )
+ where
+ lf_info = mkConLFInfo con
+ tag = getDataConTag con
+ closure_lbl = mkClosureLabel con
+
+ -- alloc_code generates code to allocate constructor con, whose args are
+ -- in the arguments to alloc_code, assigning the result to Node.
+ alloc_code :: [MagicId] -> Code
+
+ alloc_code regs
+ = possibleHeapCheck gc_flag regs False (
+ buildDynCon binder useCurrentCostCentre con
+ (map CReg regs) (all zero_size regs)
+ `thenFC` \ idinfo ->
+ idInfoToAmode PtrKind idinfo `thenFC` \ amode ->
+
+ absC (CAssign (CReg node) amode) `thenC`
+ absC jump_instruction
+ )
+ where
+ zero_size reg = getKindSize (kindFromMagicId reg) == 0
+\end{code}
+
+Now comes the general case
+
+\begin{code}
+cgAlgAlts gc_flag uniq restore_cc must_label_branches ty alts deflt
+ {- The deflt is either StgNoDefault or a BindDefault which doesn't use the binder -}
+ = forkAlts (map (cgAlgAlt gc_flag uniq restore_cc must_label_branches) alts)
+ [{- No "extra branches" -}]
+ (cgAlgDefault gc_flag uniq restore_cc must_label_branches deflt)
+\end{code}
+
+\begin{code}
+cgAlgDefault :: GCFlag
+ -> Unique -> AbstractC -> Bool -- turgid state...
+ -> PlainStgCaseDefault -- input
+ -> FCode AbstractC -- output
+
+cgAlgDefault gc_flag uniq restore_cc must_label_branch
+ StgNoDefault
+ = returnFC AbsCNop
+
+cgAlgDefault gc_flag uniq restore_cc must_label_branch
+ (StgBindDefault _ False{-binder not used-} rhs)
+
+ = getAbsC (absC restore_cc `thenC`
+ possibleHeapCheck gc_flag [] False (cgExpr rhs)) `thenFC` \ abs_c ->
+ let
+ final_abs_c | must_label_branch = CJump (CLabelledCode lbl abs_c)
+ | otherwise = abs_c
+ in
+ returnFC final_abs_c
+ where
+ lbl = mkDefaultLabel uniq
+
+
+cgAlgDefault gc_flag uniq restore_cc must_label_branch
+ (StgBindDefault binder True{-binder used-} rhs)
+
+ = -- We have arranged that Node points to the thing, even
+ -- even if we return in registers
+ bindNewToReg binder node mkLFArgument `thenC`
+ getAbsC (absC restore_cc `thenC`
+ possibleHeapCheck gc_flag [node] False (cgExpr rhs)
+ -- Node is live, but doesn't need to point at the thing itself;
+ -- it's ok for Node to point to an indirection or FETCH_ME
+ -- Hence no need to re-enter Node.
+ ) `thenFC` \ abs_c ->
+
+ let
+ final_abs_c | must_label_branch = CJump (CLabelledCode lbl abs_c)
+ | otherwise = abs_c
+ in
+ returnFC final_abs_c
+ where
+ lbl = mkDefaultLabel uniq
+
+
+cgAlgAlt :: GCFlag
+ -> Unique -> AbstractC -> Bool -- turgid state
+ -> (Id, [Id], [Bool], PlainStgExpr)
+ -> FCode (ConTag, AbstractC)
+
+cgAlgAlt gc_flag uniq restore_cc must_label_branch (con, args, use_mask, rhs)
+ = getAbsC (absC restore_cc `thenC`
+ cgAlgAltRhs gc_flag con args use_mask rhs) `thenFC` \ abs_c ->
+ let
+ final_abs_c | must_label_branch = CJump (CLabelledCode lbl abs_c)
+ | otherwise = abs_c
+ in
+ returnFC (tag, final_abs_c)
+ where
+ tag = getDataConTag con
+ lbl = mkAltLabel uniq tag
+
+cgAlgAltRhs :: GCFlag -> Id -> [Id] -> [Bool] -> PlainStgExpr -> Code
+
+cgAlgAltRhs gc_flag con args use_mask rhs
+ = let
+ (live_regs, node_reqd)
+ = case (dataReturnConvAlg con) of
+ ReturnInHeap -> ([], True)
+ ReturnInRegs regs -> ([reg | (reg,True) <- regs `zipEqual` use_mask], False)
+ -- Pick the live registers using the use_mask
+ -- Doing so is IMPORTANT, because with semi-tagging
+ -- enabled only the live registers will have valid
+ -- pointers in them.
+ in
+ possibleHeapCheck gc_flag live_regs node_reqd (
+ (case gc_flag of
+ NoGC -> mapFCs bindNewToTemp args `thenFC` \ _ ->
+ nopC
+ GCMayHappen -> bindConArgs con args
+ ) `thenC`
+ cgExpr rhs
+ )
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgCase-semi-tagged-alts]{The code to deal with sem-tagging}
+%* *
+%************************************************************************
+
+Turgid-but-non-monadic code to conjure up the required info from
+algebraic case alternatives for semi-tagging.
+
+\begin{code}
+cgSemiTaggedAlts :: Unique
+ -> [(Id, [Id], [Bool], PlainStgExpr)]
+ -> StgCaseDefault Id Id
+ -> SemiTaggingStuff
+
+cgSemiTaggedAlts uniq alts deflt
+ = Just (map st_alt alts, st_deflt deflt)
+ where
+ st_deflt StgNoDefault = Nothing
+
+ st_deflt (StgBindDefault binder binder_used _)
+ = Just (if binder_used then Just binder else Nothing,
+ (CCallProfCtrMacro SLIT("RET_SEMI_BY_DEFAULT") [], -- ToDo: monadise?
+ mkDefaultLabel uniq)
+ )
+
+ st_alt (con, args, use_mask, _)
+ = case (dataReturnConvAlg con) of
+
+ ReturnInHeap ->
+ -- Ha! Nothing to do; Node already points to the thing
+ (con_tag,
+ (CCallProfCtrMacro SLIT("RET_SEMI_IN_HEAP") [], -- ToDo: monadise?
+ join_label)
+ )
+
+ ReturnInRegs regs ->
+ -- We have to load the live registers from the constructor
+ -- pointed to by Node.
+ let
+ (_, regs_w_offsets) = layOutDynCon con kindFromMagicId regs
+
+ used_regs = selectByMask use_mask regs
+
+ used_regs_w_offsets = [ ro | ro@(reg,offset) <- regs_w_offsets,
+ reg `is_elem` used_regs]
+
+ is_elem = isIn "cgSemiTaggedAlts"
+ in
+ (con_tag,
+ (mkAbstractCs [
+ CCallProfCtrMacro SLIT("RET_SEMI_IN_REGS") [], -- ToDo: macroise?
+ CSimultaneous (mkAbstractCs (map move_to_reg used_regs_w_offsets))],
+ join_label))
+ where
+ con_tag = getDataConTag con
+ join_label = mkAltLabel uniq con_tag
+
+ move_to_reg :: (MagicId, VirtualHeapOffset {-from Node-}) -> AbstractC
+ move_to_reg (reg, offset)
+ = CAssign (CReg reg) (CVal (NodeRel offset) (kindFromMagicId reg))
+
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgCase-prim-alts]{Primitive alternatives}
+%* *
+%************************************************************************
+
+@cgPrimAlts@ generates a suitable @CSwitch@ for dealing with the
+alternatives of a primitive @case@, given an addressing mode for the
+thing to scrutinise. It also keeps track of the maximum stack depth
+encountered down any branch.
+
+As usual, no binders in the alternatives are yet bound.
+
+\begin{code}
+cgPrimAlts :: GCFlag
+ -> Unique
+ -> UniType
+ -> [(BasicLit, PlainStgExpr)] -- Alternatives
+ -> PlainStgCaseDefault -- Default
+ -> Code
+
+cgPrimAlts gc_flag uniq ty alts deflt
+ = cgPrimAltsGivenScrutinee gc_flag scrutinee alts deflt
+ where
+ -- A temporary variable, or standard register, to hold the result
+ scrutinee = case gc_flag of
+ NoGC -> CTemp uniq kind
+ GCMayHappen -> CReg (dataReturnConvPrim kind)
+
+ kind = kindFromType ty
+
+
+cgPrimAltsGivenScrutinee gc_flag scrutinee alts deflt
+ = forkAlts (map (cgPrimAlt gc_flag) alts)
+ [{- No "extra branches" -}]
+ (cgPrimDefault gc_flag scrutinee deflt) `thenFC` \ (alt_absCs, deflt_absC) ->
+ absC (CSwitch scrutinee alt_absCs deflt_absC)
+ -- CSwitch does sensible things with one or zero alternatives
+
+
+cgPrimAlt :: GCFlag
+ -> (BasicLit, PlainStgExpr) -- The alternative
+ -> FCode (BasicLit, AbstractC) -- Its compiled form
+
+cgPrimAlt gc_flag (lit, rhs)
+ = getAbsC rhs_code `thenFC` \ absC ->
+ returnFC (lit,absC)
+ where
+ rhs_code = possibleHeapCheck gc_flag [] False (cgExpr rhs )
+
+cgPrimDefault :: GCFlag
+ -> CAddrMode -- Scrutinee
+ -> PlainStgCaseDefault
+ -> FCode AbstractC
+
+cgPrimDefault gc_flag scrutinee StgNoDefault
+ = panic "cgPrimDefault: No default in prim case"
+
+cgPrimDefault gc_flag scrutinee (StgBindDefault _ False{-binder not used-} rhs)
+ = getAbsC (possibleHeapCheck gc_flag [] False (cgExpr rhs ))
+
+cgPrimDefault gc_flag scrutinee (StgBindDefault binder True{-used-} rhs)
+ = getAbsC (possibleHeapCheck gc_flag regs False rhs_code)
+ where
+ regs = if isFollowableKind (getAmodeKind scrutinee) then
+ [node] else []
+
+ rhs_code = bindNewPrimToAmode binder scrutinee `thenC`
+ cgExpr rhs
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[CgCase-tidy]{Code for tidying up prior to an eval}
+%* *
+%************************************************************************
+
+\begin{code}
+saveVolatileVarsAndRegs
+ :: PlainStgLiveVars -- Vars which should be made safe
+ -> FCode (AbstractC, -- Assignments to do the saves
+ EndOfBlockInfo, -- New sequel, recording where the return
+ -- address now is
+ Maybe VirtualSpBOffset) -- Slot for current cost centre
+
+
+saveVolatileVarsAndRegs vars
+ = saveVolatileVars vars `thenFC` \ var_saves ->
+ saveCurrentCostCentre `thenFC` \ (maybe_cc_slot, cc_save) ->
+ saveReturnAddress `thenFC` \ (new_eob_info, ret_save) ->
+ returnFC (mkAbstractCs [var_saves, cc_save, ret_save],
+ new_eob_info,
+ maybe_cc_slot)
+
+
+saveVolatileVars :: PlainStgLiveVars -- Vars which should be made safe
+ -> FCode AbstractC -- Assignments to to the saves
+
+saveVolatileVars vars
+ = save_em (uniqSetToList vars)
+ where
+ save_em [] = returnFC AbsCNop
+
+ save_em (var:vars)
+ = getCAddrModeIfVolatile var `thenFC` \ v ->
+ case v of
+ Nothing -> save_em vars -- Non-volatile, so carry on
+
+
+ Just vol_amode -> -- Aha! It's volatile
+ save_var var vol_amode `thenFC` \ abs_c ->
+ save_em vars `thenFC` \ abs_cs ->
+ returnFC (abs_c `mkAbsCStmts` abs_cs)
+
+ save_var var vol_amode
+ | isFollowableKind kind
+ = allocAStack `thenFC` \ a_slot ->
+ rebindToAStack var a_slot `thenC`
+ getSpARelOffset a_slot `thenFC` \ spa_rel ->
+ returnFC (CAssign (CVal spa_rel kind) vol_amode)
+ | otherwise
+ = allocBStack (getKindSize kind) `thenFC` \ b_slot ->
+ rebindToBStack var b_slot `thenC`
+ getSpBRelOffset b_slot `thenFC` \ spb_rel ->
+ returnFC (CAssign (CVal spb_rel kind) vol_amode)
+ where
+ kind = getAmodeKind vol_amode
+
+saveReturnAddress :: FCode (EndOfBlockInfo, AbstractC)
+saveReturnAddress
+ = getEndOfBlockInfo `thenFC` \ eob_info@(EndOfBlockInfo vA vB sequel) ->
+
+ -- See if it is volatile
+ case sequel of
+ InRetReg -> -- Yes, it's volatile
+ allocBStack retKindSize `thenFC` \ b_slot ->
+ getSpBRelOffset b_slot `thenFC` \ spb_rel ->
+
+ returnFC (EndOfBlockInfo vA vB (OnStack b_slot),
+ CAssign (CVal spb_rel RetKind) (CReg RetReg))
+
+ UpdateCode _ -> -- It's non-volatile all right, but we still need
+ -- to allocate a B-stack slot for it, *solely* to make
+ -- sure that update frames for different values do not
+ -- appear adjacent on the B stack. This makes sure
+ -- that B-stack squeezing works ok.
+ -- See note below
+ allocBStack retKindSize `thenFC` \ b_slot ->
+ returnFC (eob_info, AbsCNop)
+
+ other -> -- No, it's non-volatile, so do nothing
+ returnFC (eob_info, AbsCNop)
+\end{code}
+
+Note about B-stack squeezing. Consider the following:`
+
+ y = [...] \u [] -> ...
+ x = [y] \u [] -> case y of (a,b) -> a
+
+The code for x will push an update frame, and then enter y. The code
+for y will push another update frame. If the B-stack-squeezer then
+wakes up, it will see two update frames right on top of each other,
+and will combine them. This is WRONG, of course, because x's value is
+not the same as y's.
+
+The fix implemented above makes sure that we allocate an (unused)
+B-stack slot before entering y. You can think of this as holding the
+saved value of RetAddr, which (after pushing x's update frame will be
+some update code ptr). The compiler is clever enough to load the
+static update code ptr into RetAddr before entering ~a~, but the slot
+is still there to separate the update frames.
+
+When we save the current cost centre (which is done for lexical
+scoping), we allocate a free B-stack location, and return (a)~the
+virtual offset of the location, to pass on to the alternatives, and
+(b)~the assignment to do the save (just as for @saveVolatileVars@).
+
+\begin{code}
+saveCurrentCostCentre ::
+ FCode (Maybe VirtualSpBOffset, -- Where we decide to store it
+ -- Nothing if not lexical CCs
+ AbstractC) -- Assignment to save it
+ -- AbsCNop if not lexical CCs
+
+saveCurrentCostCentre
+ = isSwitchSetC SccProfilingOn `thenFC` \ doing_profiling ->
+ if not doing_profiling then
+ returnFC (Nothing, AbsCNop)
+ else
+ allocBStack (getKindSize CostCentreKind) `thenFC` \ b_slot ->
+ getSpBRelOffset b_slot `thenFC` \ spb_rel ->
+ returnFC (Just b_slot,
+ CAssign (CVal spb_rel CostCentreKind) (CReg CurCostCentre))
+
+restoreCurrentCostCentre :: Maybe VirtualSpBOffset -> FCode AbstractC
+
+restoreCurrentCostCentre Nothing
+ = returnFC AbsCNop
+restoreCurrentCostCentre (Just b_slot)
+ = getSpBRelOffset b_slot `thenFC` \ spb_rel ->
+ freeBStkSlot b_slot `thenC`
+ returnFC (CCallProfCCMacro SLIT("RESTORE_CCC") [CVal spb_rel CostCentreKind])
+ -- we use the RESTORE_CCC macro, rather than just
+ -- assigning into CurCostCentre, in case RESTORE_CCC
+ -- has some sanity-checking in it.
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[CgCase-return-vec]{Building a return vector}
+%* *
+%************************************************************************
+
+Build a return vector, and return a suitable label addressing
+mode for it.
+
+\begin{code}
+mkReturnVector :: Unique
+ -> UniType
+ -> [(ConTag, AbstractC)] -- Branch codes
+ -> AbstractC -- Default case
+ -> FCode CAddrMode
+
+mkReturnVector uniq ty tagged_alt_absCs deflt_absC
+ = let
+ (return_vec_amode, vtbl_body) = case (ctrlReturnConvAlg spec_tycon) of {
+
+ UnvectoredReturn _ ->
+ (CUnVecLbl ret_label vtbl_label,
+ absC (CRetUnVector vtbl_label
+ (CLabelledCode ret_label
+ (mkAlgAltsCSwitch (CReg TagReg)
+ tagged_alt_absCs
+ deflt_absC))));
+ VectoredReturn table_size ->
+ (CLbl vtbl_label DataPtrKind,
+ absC (CRetVector vtbl_label
+ -- must restore cc before each alt, if required
+ (map mk_vector_entry [fIRST_TAG .. (table_size+fIRST_TAG-1)])
+ deflt_absC))
+
+-- Leave nops and comments in for now; they are eliminated
+-- lazily as it's printed.
+-- (case (nonemptyAbsC deflt_absC) of
+-- Nothing -> AbsCNop
+-- Just def -> def)
+
+ } in
+ vtbl_body `thenC`
+ returnFC return_vec_amode
+ -- )
+ where
+
+ (spec_tycon,_,_) = case (getUniDataSpecTyCon_maybe ty) of -- *must* be a real "data" type constructor
+ Just xx -> xx
+ Nothing -> error ("ERROR: can't generate code for polymorphic case;\nprobably a mis-use of `seq' or `par';\nthe User's Guide has more details.\nOffending type: "++(ppShow 80 (ppr PprDebug ty)))
+
+ vtbl_label = mkVecTblLabel uniq
+ ret_label = mkReturnPtLabel uniq
+
+ mk_vector_entry :: ConTag -> Maybe CAddrMode
+ mk_vector_entry tag
+ = case [ absC | (t, absC) <- tagged_alt_absCs, t == tag ] of
+ [] -> Nothing
+ [absC] -> Just (CCode absC)
+ _ -> panic "mkReturnVector: too many"
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgCase-utils]{Utilities for handling case expressions}
+%* *
+%************************************************************************
+
+@possibleHeapCheck@ tests a flag passed in to decide whether to
+do a heap check or not.
+
+\begin{code}
+possibleHeapCheck :: GCFlag -> [MagicId] -> Bool -> Code -> Code
+
+possibleHeapCheck GCMayHappen regs node_reqd code = heapCheck regs node_reqd code
+possibleHeapCheck NoGC _ _ code = code
+\end{code}
+
+Select a restricted set of registers based on a usage mask.
+
+\begin{code}
+selectByMask [] [] = []
+selectByMask (True:ms) (x:xs) = x : selectByMask ms xs
+selectByMask (False:ms) (x:xs) = selectByMask ms xs
+\end{code}
diff --git a/ghc/compiler/codeGen/CgClosure.hi b/ghc/compiler/codeGen/CgClosure.hi
new file mode 100644
index 0000000000..fcdb52d910
--- /dev/null
+++ b/ghc/compiler/codeGen/CgClosure.hi
@@ -0,0 +1,32 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgClosure where
+import AbsCSyn(AbstractC)
+import CgBindery(CgIdInfo, StableLoc, VolatileLoc)
+import CgMonad(CgInfoDownwards, CgState, CompilationInfo, EndOfBlockInfo, StubFlag)
+import ClosureInfo(LambdaFormInfo)
+import CmdLineOpts(GlobalSwitch)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import PreludePS(_PackedString)
+import PrimOps(PrimOp)
+import StgSyn(StgAtom, StgBinderInfo, StgBinding, StgCaseAlternatives, StgExpr, UpdateFlag)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data CgIdInfo {-# GHC_PRAGMA MkCgIdInfo Id VolatileLoc StableLoc LambdaFormInfo #-}
+data CgInfoDownwards {-# GHC_PRAGMA MkCgInfoDown CompilationInfo (UniqFM CgIdInfo) EndOfBlockInfo #-}
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data CompilationInfo {-# GHC_PRAGMA MkCompInfo (GlobalSwitch -> Bool) _PackedString #-}
+data HeapOffset
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data Labda a {-# GHC_PRAGMA Hamna | Ni a #-}
+data StgExpr a b {-# GHC_PRAGMA StgApp (StgAtom b) [StgAtom b] (UniqFM b) | StgConApp Id [StgAtom b] (UniqFM b) | StgPrimApp PrimOp [StgAtom b] (UniqFM b) | StgCase (StgExpr a b) (UniqFM b) (UniqFM b) Unique (StgCaseAlternatives a b) | StgLet (StgBinding a b) (StgExpr a b) | StgLetNoEscape (UniqFM b) (UniqFM b) (StgBinding a b) (StgExpr a b) | StgSCC UniType CostCentre (StgExpr a b) #-}
+data UpdateFlag {-# GHC_PRAGMA ReEntrant | Updatable | SingleEntry #-}
+cgRhsClosure :: Id -> CostCentre -> StgBinderInfo -> [Id] -> [Id] -> StgExpr Id Id -> LambdaFormInfo -> CgInfoDownwards -> CgState -> ((Id, CgIdInfo), CgState)
+ {-# GHC_PRAGMA _A_ 7 _U_ 222222222 _N_ _S_ "LLLLLLS" _N_ _N_ #-}
+cgTopRhsClosure :: Id -> CostCentre -> StgBinderInfo -> [Id] -> StgExpr Id Id -> LambdaFormInfo -> CgInfoDownwards -> CgState -> ((Id, CgIdInfo), CgState)
+ {-# GHC_PRAGMA _A_ 6 _U_ 22222222 _N_ _N_ _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgClosure.lhs b/ghc/compiler/codeGen/CgClosure.lhs
new file mode 100644
index 0000000000..93aabe1b6f
--- /dev/null
+++ b/ghc/compiler/codeGen/CgClosure.lhs
@@ -0,0 +1,1014 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[CgClosure]{Code generation for closures}
+
+This module provides the support code for @StgToAbstractC@ to deal
+with {\em closures} on the RHSs of let(rec)s. See also
+@CgCon@, which deals with constructors.
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgClosure (
+ cgTopRhsClosure, cgRhsClosure,
+
+ -- and to make the interface self-sufficient...
+ StgExpr, Id, CgState, Maybe, HeapOffset,
+ CgInfoDownwards, CgIdInfo, CompilationInfo,
+ UpdateFlag
+ ) where
+
+IMPORT_Trace -- ToDo: rm (debugging)
+import Outputable
+import Pretty -- NB: see below
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import AbsPrel ( PrimOp(..), primOpNameInfo, Name
+ IF_ATTACK_PRAGMAS(COMMA tagOf_PrimOp)
+ IF_ATTACK_PRAGMAS(COMMA pprPrimOp)
+ )
+import AbsUniType ( isPrimType, isPrimTyCon,
+ getTauType, showTypeCategory, getTyConDataCons
+ IF_ATTACK_PRAGMAS(COMMA splitType)
+ IF_ATTACK_PRAGMAS(COMMA splitTyArgs)
+ )
+import CgBindery ( getCAddrMode, getAtomAmodes,
+ getCAddrModeAndInfo,
+ bindNewToNode, bindNewToAStack, bindNewToBStack,
+ bindNewToReg, bindArgsToRegs
+ )
+import CgCompInfo ( spARelToInt, spBRelToInt )
+import CgExpr ( cgExpr, cgSccExpr )
+import CgUpdate ( pushUpdateFrame )
+import CgHeapery ( allocDynClosure, heapCheck
+#ifdef GRAN
+ , heapCheckOnly, fetchAndReschedule -- HWL
+#endif {- GRAN -}
+ )
+import CgRetConv ( ctrlReturnConvAlg, dataReturnConvAlg, mkLiveRegsBitMask,
+ CtrlReturnConvention(..), DataReturnConvention(..)
+ )
+import CgStackery ( getFinalStackHW, mkVirtStkOffsets,
+ adjustRealSps
+ )
+import CgUsages ( getVirtSps, setRealAndVirtualSps,
+ getSpARelOffset, getSpBRelOffset,
+ getHpRelOffset
+ )
+import CLabelInfo
+import ClosureInfo -- lots and lots of stuff
+import CmdLineOpts ( GlobalSwitch(..) )
+import CostCentre
+import Id ( getIdUniType, getIdKind, isSysLocalId, myWrapperMaybe,
+ showId, getIdInfo, getIdStrictness,
+ getDataConTag
+ )
+import IdInfo
+import ListSetOps ( minusList )
+import Maybes ( Maybe(..), maybeToBool )
+import PrimKind ( isFollowableKind )
+import UniqSet
+import Unpretty
+import Util
+\end{code}
+
+%********************************************************
+%* *
+\subsection[closures-no-free-vars]{Top-level closures}
+%* *
+%********************************************************
+
+For closures bound at top level, allocate in static space.
+They should have no free variables.
+
+\begin{code}
+cgTopRhsClosure :: Id
+ -> CostCentre -- Optional cost centre annotation
+ -> StgBinderInfo
+ -> [Id] -- Args
+ -> PlainStgExpr
+ -> LambdaFormInfo
+ -> FCode (Id, CgIdInfo)
+\end{code}
+
+\begin{code}
+{- NOT USED:
+cgTopRhsClosure name cc binder_info args body lf_info
+ | maybeToBool maybe_std_thunk -- AHA! A STANDARD-FORM THUNK
+ = (
+ -- LAY OUT THE OBJECT
+ getAtomAmodes std_thunk_payload `thenFC` \ amodes ->
+ let
+ (closure_info, amodes_w_offsets) = layOutStaticClosure name getAmodeKind amodes lf_info
+ in
+
+ -- BUILD THE OBJECT
+ chooseStaticCostCentre cc lf_info `thenFC` \ cost_centre ->
+ absC (CStaticClosure
+ closure_label -- Labelled with the name on lhs of defn
+ closure_info
+ cost_centre
+ (map fst amodes_w_offsets)) -- They are in the correct order
+ ) `thenC`
+
+ returnFC (name, stableAmodeIdInfo name (CLbl closure_label PtrKind) lf_info)
+ where
+ maybe_std_thunk = getStandardFormThunkInfo lf_info
+ Just std_thunk_payload = maybe_std_thunk
+
+ closure_label = mkClosureLabel name
+-}
+\end{code}
+
+The general case:
+\begin{code}
+cgTopRhsClosure name cc binder_info args body lf_info
+ = -- LAY OUT THE OBJECT
+ let
+ closure_info = layOutStaticNoFVClosure name lf_info
+ in
+
+ -- GENERATE THE INFO TABLE (IF NECESSARY)
+ forkClosureBody (closureCodeBody binder_info closure_info
+ cc args body)
+ `thenC`
+
+ -- BUILD VAP INFO TABLES IF NECESSARY
+ -- Don't build Vap info tables etc for
+ -- a function whose result is an unboxed type,
+ -- because we can never have thunks with such a type.
+ (if closureReturnsUnboxedType closure_info then
+ nopC
+ else
+ let
+ bind_the_fun = addBindC name cg_id_info -- It's global!
+ in
+ cgVapInfoTables True {- Top level -} bind_the_fun binder_info name args lf_info
+ ) `thenC`
+
+ -- BUILD THE OBJECT (IF NECESSARY)
+ (if staticClosureRequired name binder_info lf_info
+ then
+ let
+ cost_centre = mkCCostCentre cc
+ in
+ absC (CStaticClosure
+ closure_label -- Labelled with the name on lhs of defn
+ closure_info
+ cost_centre
+ []) -- No fields
+ else
+ nopC
+ ) `thenC`
+
+ returnFC (name, cg_id_info)
+ where
+ closure_label = mkClosureLabel name
+ cg_id_info = stableAmodeIdInfo name (CLbl closure_label PtrKind) lf_info
+\end{code}
+
+%********************************************************
+%* *
+\subsection[non-top-level-closures]{Non top-level closures}
+%* *
+%********************************************************
+
+For closures with free vars, allocate in heap.
+
+===================== OLD PROBABLY OUT OF DATE COMMENTS =============
+
+-- Closures which (a) have no fvs and (b) have some args (i.e.
+-- combinator functions), are allocated statically, just as if they
+-- were top-level closures. We can't get a space leak that way
+-- (because they are HNFs) and it saves allocation.
+
+-- Lexical Scoping: Problem
+-- These top level function closures will be inherited, possibly
+-- to a different cost centre scope set before entering.
+
+-- Evaluation Scoping: ok as already in HNF
+
+-- Should rely on floating mechanism to achieve this floating to top level.
+-- As let floating will avoid floating which breaks cost centre attribution
+-- everything will be OK.
+
+-- Disabled: because it breaks lexical-scoped cost centre semantics.
+-- cgRhsClosure binder cc bi [] upd_flag args@(_:_) body
+-- = cgTopRhsClosure binder cc bi upd_flag args body
+
+===================== END OF OLD PROBABLY OUT OF DATE COMMENTS =============
+
+\begin{code}
+cgRhsClosure :: Id
+ -> CostCentre -- Optional cost centre annotation
+ -> StgBinderInfo
+ -> [Id] -- Free vars
+ -> [Id] -- Args
+ -> PlainStgExpr
+ -> LambdaFormInfo
+ -> FCode (Id, CgIdInfo)
+
+cgRhsClosure binder cc binder_info fvs args body lf_info
+ | maybeToBool maybe_std_thunk -- AHA! A STANDARD-FORM THUNK
+ -- ToDo: check non-primitiveness (ASSERT)
+ = (
+ -- LAY OUT THE OBJECT
+ getAtomAmodes std_thunk_payload `thenFC` \ amodes ->
+ let
+ (closure_info, amodes_w_offsets)
+ = layOutDynClosure binder getAmodeKind amodes lf_info
+
+ (use_cc, blame_cc) = chooseDynCostCentres cc args fvs body
+ in
+ -- BUILD THE OBJECT
+ allocDynClosure closure_info use_cc blame_cc amodes_w_offsets
+ )
+ `thenFC` \ heap_offset ->
+
+ -- RETURN
+ returnFC (binder, heapIdInfo binder heap_offset lf_info)
+
+ where
+ maybe_std_thunk = getStandardFormThunkInfo lf_info
+ Just std_thunk_payload = maybe_std_thunk
+\end{code}
+
+Here's the general case.
+\begin{code}
+cgRhsClosure binder cc binder_info fvs args body lf_info
+ = (
+ -- LAY OUT THE OBJECT
+ --
+ -- If the binder is itself a free variable, then don't store
+ -- it in the closure. Instead, just bind it to Node on entry.
+ -- NB we can be sure that Node will point to it, because we
+ -- havn't told mkClosureLFInfo about this; so if the binder
+ -- *was* a free var of its RHS, mkClosureLFInfo thinks it *is*
+ -- stored in the closure itself, so it will make sure that
+ -- Node points to it...
+ let
+ is_elem = isIn "cgRhsClosure"
+
+ binder_is_a_fv = binder `is_elem` fvs
+ reduced_fvs = if binder_is_a_fv
+ then fvs `minusList` [binder]
+ else fvs
+ in
+ mapFCs getCAddrModeAndInfo reduced_fvs `thenFC` \ amodes_and_info ->
+ let
+ fvs_w_amodes_and_info = reduced_fvs `zip` amodes_and_info
+
+ closure_info :: ClosureInfo
+ bind_details :: [((Id, (CAddrMode, LambdaFormInfo)), VirtualHeapOffset)]
+
+ (closure_info, bind_details)
+ = layOutDynClosure binder get_kind fvs_w_amodes_and_info lf_info
+
+ bind_fv ((id, (_, lf_info)), offset) = bindNewToNode id offset lf_info
+
+ amodes_w_offsets = [(amode,offset) | ((_, (amode,_)), offset) <- bind_details]
+
+ get_kind (id, amode_and_info) = getIdKind id
+ in
+ -- BUILD ITS INFO TABLE AND CODE
+ forkClosureBody (
+ -- Bind the fvs
+ mapCs bind_fv bind_details `thenC`
+
+ -- Bind the binder itself, if it is a free var
+ (if binder_is_a_fv then
+ bindNewToReg binder node lf_info
+ else
+ nopC) `thenC`
+
+ -- Compile the body
+ closureCodeBody binder_info closure_info cc args body
+ ) `thenC`
+
+ -- BUILD VAP INFO TABLES IF NECESSARY
+ -- Don't build Vap info tables etc for
+ -- a function whose result is an unboxed type,
+ -- because we can never have thunks with such a type.
+ (if closureReturnsUnboxedType closure_info then
+ nopC
+ else
+ cgVapInfoTables False {- Not top level -} nopC binder_info binder args lf_info
+ ) `thenC`
+
+ -- BUILD THE OBJECT
+ let
+ (use_cc, blame_cc) = chooseDynCostCentres cc args fvs body
+ in
+ allocDynClosure closure_info use_cc blame_cc amodes_w_offsets
+ ) `thenFC` \ heap_offset ->
+
+ -- RETURN
+ returnFC (binder, heapIdInfo binder heap_offset lf_info)
+\end{code}
+
+@cgVapInfoTables@ generates both Vap info tables, if they are required
+at all. It calls @cgVapInfoTable@ to generate each Vap info table,
+along with its entry code.
+
+\begin{code}
+-- Don't generate Vap info tables for thunks; only for functions
+cgVapInfoTables top_level perhaps_bind_the_fun binder_info fun [{- no args; a thunk! -}] lf_info
+ = nopC
+
+cgVapInfoTables top_level perhaps_bind_the_fun binder_info fun args lf_info
+ = -- BUILD THE STANDARD VAP-ENTRY STUFF IF NECESSARY
+ (if stdVapRequired binder_info then
+ cgVapInfoTable perhaps_bind_the_fun Updatable fun args fun_in_payload lf_info
+ else
+ nopC
+ ) `thenC`
+
+ -- BUILD THE NO-UPDATE VAP-ENTRY STUFF IF NECESSARY
+ (if noUpdVapRequired binder_info then
+ cgVapInfoTable perhaps_bind_the_fun SingleEntry fun args fun_in_payload lf_info
+ else
+ nopC
+ )
+
+ where
+ fun_in_payload = not top_level
+
+cgVapInfoTable perhaps_bind_the_fun upd_flag fun args fun_in_payload fun_lf_info
+ = let
+ -- The vap_entry_rhs is a manufactured STG expression which
+ -- looks like the RHS of any binding which is going to use the vap-entry
+ -- point of the function. Each of these bindings will look like:
+ --
+ -- x = [a,b,c] \upd [] -> f a b c
+ --
+ -- If f is not top-level, then f is one of the free variables too,
+ -- hence "payload_ids" isn't the same as "arg_ids".
+ --
+ vap_entry_rhs = StgApp (StgVarAtom fun) (map StgVarAtom args) emptyUniqSet
+ -- Empty live vars
+
+ arg_ids_w_info = [(name,mkLFArgument) | name <- args]
+ payload_ids_w_info | fun_in_payload = (fun,fun_lf_info) : arg_ids_w_info
+ | otherwise = arg_ids_w_info
+
+ payload_ids | fun_in_payload = fun : args -- Sigh; needed for mkClosureLFInfo
+ | otherwise = args
+
+ vap_lf_info = mkClosureLFInfo False {-not top level-} payload_ids
+ upd_flag [] vap_entry_rhs
+ -- It's not top level, even if we're currently compiling a top-level
+ -- function, because any VAP *use* of this function will be for a
+ -- local thunk, thus
+ -- let x = f p q -- x isn't top level!
+ -- in ...
+
+ get_kind (id, info) = getIdKind id
+
+ payload_bind_details :: [((Id, LambdaFormInfo), VirtualHeapOffset)]
+ (closure_info, payload_bind_details) = layOutDynClosure
+ fun
+ get_kind payload_ids_w_info
+ vap_lf_info
+ -- The dodgy thing is that we use the "fun" as the
+ -- Id to give to layOutDynClosure. This Id gets embedded in
+ -- the closure_info it returns. But of course, the function doesn't
+ -- have the right type to match the Vap closure. Never mind,
+ -- a hack in closureType spots the special case. Otherwise that
+ -- Id is just used for label construction, which is OK.
+
+ bind_fv ((id,lf_info), offset) = bindNewToNode id offset lf_info
+ in
+
+ -- BUILD ITS INFO TABLE AND CODE
+ forkClosureBody (
+
+ -- Bind the fvs; if the fun is not in the payload, then bind_the_fun tells
+ -- how to bind it. If it is in payload it'll be bound by payload_bind_details.
+ perhaps_bind_the_fun `thenC`
+ mapCs bind_fv payload_bind_details `thenC`
+
+ -- Generate the info table and code
+ closureCodeBody NoStgBinderInfo
+ closure_info
+ useCurrentCostCentre
+ [] -- No args; it's a thunk
+ vap_entry_rhs
+ )
+\end{code}
+%************************************************************************
+%* *
+\subsection[code-for-closures]{The code for closures}
+%* *
+%************************************************************************
+
+\begin{code}
+closureCodeBody :: StgBinderInfo
+ -> ClosureInfo -- Lots of information about this closure
+ -> CostCentre -- Optional cost centre attached to closure
+ -> [Id]
+ -> PlainStgExpr
+ -> Code
+\end{code}
+
+There are two main cases for the code for closures. If there are {\em
+no arguments}, then the closure is a thunk, and not in normal form.
+So it should set up an update frame (if it is shared). Also, it has
+no argument satisfaction check, so fast and slow entry-point labels
+are the same.
+
+\begin{code}
+closureCodeBody binder_info closure_info cc [] body
+ = -- thunks cannot have a primitive type!
+#ifdef DEBUG
+ let
+ (has_tycon, tycon)
+ = case (closureType closure_info) of
+ Nothing -> (False, panic "debug")
+ Just (tc,_,_) -> (True, tc)
+ in
+ if has_tycon && isPrimTyCon tycon then
+ pprPanic "closureCodeBody:thunk:prim type!" (ppr PprDebug tycon)
+ else
+#endif
+ getAbsC body_code `thenFC` \ body_absC ->
+#ifndef DPH
+ moduleName `thenFC` \ mod_name ->
+ absC (CClosureInfoAndCode closure_info body_absC Nothing stdUpd (cl_descr mod_name))
+#else
+ -- Applying a similar scheme to Simon's placing info tables before code...
+ -- ToDo:DPH: update
+ absC (CNativeInfoTableAndCode closure_info
+ closure_description
+ (CCodeBlock entry_label body_absC))
+#endif {- Data Parallel Haskell -}
+ where
+ cl_descr mod_name = closureDescription mod_name (closureId closure_info) [] body
+
+ body_addr = CLbl (entryLabelFromCI closure_info) CodePtrKind
+ body_code = profCtrC SLIT("ENT_THK") [] `thenC`
+ enterCostCentreCode closure_info cc IsThunk `thenC`
+ thunkWrapper closure_info (cgSccExpr body)
+
+ stdUpd = CLbl mkErrorStdEntryLabel CodePtrKind
+\end{code}
+
+If there is {\em at least one argument}, then this closure is in
+normal form, so there is no need to set up an update frame. On the
+other hand, we do have to check that there are enough args, and
+perform an update if not!
+
+The Macros for GrAnSim are produced at the beginning of the
+argSatisfactionCheck (by calling fetchAndReschedule). There info if
+Node points to closure is available. -- HWL
+
+\begin{code}
+closureCodeBody binder_info closure_info cc all_args body
+ = getEntryConvention id lf_info
+ (map getIdKind all_args) `thenFC` \ entry_conv ->
+
+ isSwitchSetC EmitArityChecks `thenFC` \ do_arity_chks ->
+
+ isSwitchSetC ForConcurrent `thenFC` \ is_concurrent ->
+
+ isStringSwitchSetC AsmTarget `thenFC` \ native_code ->
+
+ let
+ stg_arity = length all_args
+
+ -- Arg mapping for standard (slow) entry point; all args on stack
+ (spA_all_args, spB_all_args, all_bxd_w_offsets, all_ubxd_w_offsets)
+ = mkVirtStkOffsets
+ 0 0 -- Initial virtual SpA, SpB
+ getIdKind
+ all_args
+
+ -- Arg mapping for the fast entry point; as many args as poss in
+ -- registers; the rest on the stack
+ -- arg_regs are the registers used for arg passing
+ -- stk_args are the args which are passed on the stack
+ --
+ arg_regs = case entry_conv of
+ DirectEntry lbl arity regs -> regs
+ ViaNode | is_concurrent -> []
+ other -> panic "closureCodeBody:arg_regs"
+
+ stk_args = drop (length arg_regs) all_args
+ (spA_stk_args, spB_stk_args, stk_bxd_w_offsets, stk_ubxd_w_offsets)
+ = mkVirtStkOffsets
+ 0 0 -- Initial virtual SpA, SpB
+ getIdKind
+ stk_args
+
+ -- HWL; Note: empty list of live regs in slow entry code
+ -- Old version (reschedule combined with heap check);
+ -- see argSatisfactionCheck for new version
+ --slow_entry_code = forceHeapCheck [node] True slow_entry_code'
+ -- where node = VanillaReg PtrKind 1
+ --slow_entry_code = forceHeapCheck [] True slow_entry_code'
+
+ slow_entry_code
+ = profCtrC SLIT("ENT_FUN_STD") [] `thenC`
+
+ -- Bind args, and record expected position of stk ptrs
+ mapCs bindNewToAStack all_bxd_w_offsets `thenC`
+ mapCs bindNewToBStack all_ubxd_w_offsets `thenC`
+ setRealAndVirtualSps spA_all_args spB_all_args `thenC`
+
+ argSatisfactionCheck closure_info all_args `thenC`
+
+ -- OK, so there are enough args. Now we need to stuff as
+ -- many of them in registers as the fast-entry code expects
+ -- Note that the zipWith will give up when it hits the end of arg_regs
+ mapFCs getCAddrMode all_args `thenFC` \ stk_amodes ->
+ absC (mkAbstractCs (zipWith assign_to_reg arg_regs stk_amodes)) `thenC`
+
+ -- Now adjust real stack pointers
+ adjustRealSps spA_stk_args spB_stk_args `thenC`
+
+ -- set the arity checker, if asked
+ absC (
+ if do_arity_chks
+ then CMacroStmt SET_ARITY [mkIntCLit stg_arity]
+ else AbsCNop
+ ) `thenC`
+
+#ifndef DPH
+ absC (CFallThrough (CLbl fast_label CodePtrKind))
+#else
+ -- Fall through to the fast entry point
+ absC (AbsCNop)
+#endif {- Data Parallel Haskell -}
+
+ assign_to_reg reg_id amode = CAssign (CReg reg_id) amode
+
+ -- HWL
+ -- Old version (reschedule combined with heap check);
+ -- see argSatisfactionCheck for new version
+ -- fast_entry_code = forceHeapCheck [] True fast_entry_code'
+
+ fast_entry_code
+ = profCtrC SLIT("ENT_FUN_DIRECT") [
+ CLbl (mkRednCountsLabel id) PtrKind,
+ CString (_PK_ (showId PprDebug id)),
+ mkIntCLit stg_arity, -- total # of args
+ mkIntCLit spA_stk_args, -- # passed on A stk
+ mkIntCLit spB_stk_args, -- B stk (rest in regs)
+ CString (_PK_ (map (showTypeCategory . getIdUniType) all_args)),
+ CString (_PK_ (show_wrapper_name wrapper_maybe)),
+ CString (_PK_ (show_wrapper_arg_kinds wrapper_maybe))
+ ] `thenC`
+ absC (
+ if do_arity_chks
+ then CMacroStmt CHK_ARITY [mkIntCLit stg_arity]
+ else AbsCNop
+ ) `thenC`
+
+ -- Bind args to regs/stack as appropriate, and
+ -- record expected position of sps
+ bindArgsToRegs all_args arg_regs `thenC`
+ mapCs bindNewToAStack stk_bxd_w_offsets `thenC`
+ mapCs bindNewToBStack stk_ubxd_w_offsets `thenC`
+ setRealAndVirtualSps spA_stk_args spB_stk_args `thenC`
+
+ -- Enter the closures cc, if required
+ enterCostCentreCode closure_info cc IsFunction `thenC`
+
+ -- Do the business
+ funWrapper closure_info arg_regs (cgExpr body)
+ in
+#ifndef DPH
+ -- Make a labelled code-block for the slow and fast entry code
+ forkAbsC (if slow_code_needed then slow_entry_code else absC AbsCNop)
+ `thenFC` \ slow_abs_c ->
+ forkAbsC fast_entry_code `thenFC` \ fast_abs_c ->
+ moduleName `thenFC` \ mod_name ->
+ -- Now either construct the info table, or put the fast code in alone
+ -- (We never have slow code without an info table)
+ absC (
+ if info_table_needed
+ then
+ CClosureInfoAndCode closure_info slow_abs_c
+ (Just fast_abs_c) stdUpd (cl_descr mod_name)
+ else
+ CCodeBlock fast_label fast_abs_c
+ )
+
+ where
+#else
+ -- The info table goes before the slow entry point.
+ forkAbsC slow_entry_code `thenFC` \ slow_abs_c ->
+ forkAbsC fast_entry_code `thenFC` \ fast_abs_c ->
+ moduleName `thenFC` \ mod_name ->
+ absC (CNativeInfoTableAndCode
+ closure_info
+ (closureDescription mod_name id all_args body)
+ (CCodeBlock slow_label
+ (AbsCStmts slow_abs_c
+ (CCodeBlock fast_label
+ fast_abs_c))))
+ where
+ slow_label = if slow_code_needed then
+ mkStdEntryLabel id
+ else
+ mkErrorStdEntryLabel
+ -- We may need a pointer to stuff in the info table,
+ -- but if the slow entry code isn't needed, this code
+ -- will never be entered, so we can use a standard
+ -- panic routine.
+
+#endif {- Data Parallel Haskell -}
+
+ lf_info = closureLFInfo closure_info
+
+ cl_descr mod_name = closureDescription mod_name id all_args body
+
+ -- Figure out what is needed and what isn't
+ slow_code_needed = slowFunEntryCodeRequired id binder_info
+ info_table_needed = funInfoTableRequired id binder_info lf_info
+
+ -- Manufacture labels
+ id = closureId closure_info
+
+ fast_label = fastLabelFromCI closure_info
+
+ stdUpd = CLbl mkErrorStdEntryLabel CodePtrKind
+
+ wrapper_maybe = get_ultimate_wrapper Nothing id
+ where
+ get_ultimate_wrapper deflt x -- walk all the way up a "wrapper chain"
+ = case (myWrapperMaybe x) of
+ Nothing -> deflt
+ Just xx -> get_ultimate_wrapper (Just xx) xx
+
+ show_wrapper_name Nothing = ""
+ show_wrapper_name (Just xx) = showId PprDebug xx
+
+ show_wrapper_arg_kinds Nothing = ""
+ show_wrapper_arg_kinds (Just xx)
+ = case (getWrapperArgTypeCategories (getIdUniType xx) (getIdStrictness xx)) of
+ Nothing -> ""
+ Just str -> str
+\end{code}
+
+For lexically scoped profiling we have to load the cost centre from
+the closure entered, if the costs are not supposed to be inherited.
+This is done immediately on entering the fast entry point.
+
+Load current cost centre from closure, if not inherited.
+Node is guaranteed to point to it, if profiling and not inherited.
+
+\begin{code}
+data IsThunk = IsThunk | IsFunction -- Bool-like, local
+
+enterCostCentreCode :: ClosureInfo -> CostCentre -> IsThunk -> Code
+
+enterCostCentreCode closure_info cc is_thunk
+ = costCentresFlag `thenFC` \ profiling_on ->
+ if not profiling_on then
+ nopC
+ else -- down to business
+ ASSERT(not (noCostCentreAttached cc))
+
+ if costsAreSubsumed cc then
+ nopC
+
+ else if is_current_CC cc then -- fish the CC out of the closure,
+ -- where we put it when we alloc'd;
+ -- NB: chk defn of "is_current_CC"
+ -- if you go to change this! (WDP 94/12)
+ costCentresC
+ (case is_thunk of
+ IsThunk -> SLIT("ENTER_CC_TCL")
+ IsFunction -> SLIT("ENTER_CC_FCL"))
+ [CReg node]
+
+ else if isCafCC cc then
+ costCentresC
+ SLIT("ENTER_CC_CAF")
+ [mkCCostCentre cc]
+
+ else -- we've got a "real" cost centre right here in our hands...
+ costCentresC
+ (case is_thunk of
+ IsThunk -> SLIT("ENTER_CC_T")
+ IsFunction -> SLIT("ENTER_CC_F"))
+ [mkCCostCentre cc]
+ where
+ is_current_CC cc
+ = currentOrSubsumedCosts cc
+ -- but we've already ruled out "subsumed", so it must be "current"!
+\end{code}
+
+%************************************************************************
+%* *
+\subsubsection[pre-closure-code-stuff]{Pre-closure-code code}
+%* *
+%************************************************************************
+
+The argument-satisfaction check code is placed after binding
+the arguments to their stack locations. Hence, the virtual stack
+pointer is pointing after all the args, and virtual offset 1 means
+the base of frame and hence most distant arg. Hence
+virtual offset 0 is just beyond the most distant argument; the
+relative offset of this word tells how many words of arguments
+are expected.
+
+\begin{code}
+argSatisfactionCheck :: ClosureInfo -> [Id] -> Code
+
+argSatisfactionCheck closure_info [] = nopC
+
+argSatisfactionCheck closure_info args
+ = -- safest way to determine which stack last arg will be on:
+ -- look up CAddrMode that last arg is bound to;
+ -- getAmodeKind;
+ -- check isFollowableKind.
+
+ nodeMustPointToIt (closureLFInfo closure_info) `thenFC` \ node_points ->
+
+#ifdef GRAN
+ -- HWL:
+ -- absC (CMacroStmt GRAN_FETCH []) `thenC`
+ -- forceHeapCheck [] node_points (absC AbsCNop) `thenC`
+ (if node_points
+ then fetchAndReschedule [] node_points
+ else absC AbsCNop) `thenC`
+#endif {- GRAN -}
+
+ getCAddrMode (last args) `thenFC` \ last_amode ->
+
+ if (isFollowableKind (getAmodeKind last_amode)) then
+ getSpARelOffset 0 `thenFC` \ a_rel_offset ->
+ if node_points then
+ absC (CMacroStmt ARGS_CHK_A [mkIntCLit (spARelToInt a_rel_offset)])
+ else
+ absC (CMacroStmt ARGS_CHK_A_LOAD_NODE
+ [mkIntCLit (spARelToInt a_rel_offset), set_Node_to_this])
+ else
+ getSpBRelOffset 0 `thenFC` \ b_rel_offset ->
+ if node_points then
+ absC (CMacroStmt ARGS_CHK_B [mkIntCLit (spBRelToInt b_rel_offset)])
+ else
+ absC (CMacroStmt ARGS_CHK_B_LOAD_NODE
+ [mkIntCLit (spBRelToInt b_rel_offset), set_Node_to_this])
+ where
+ -- We must tell the arg-satis macro whether Node is pointing to
+ -- the closure or not. If it isn't so pointing, then we give to
+ -- the macro the (static) address of the closure.
+
+ set_Node_to_this = CLbl (closureLabelFromCI closure_info) PtrKind
+\end{code}
+
+%************************************************************************
+%* *
+\subsubsection[closure-code-wrappers]{Wrappers around closure code}
+%* *
+%************************************************************************
+
+\begin{code}
+thunkWrapper:: ClosureInfo -> Code -> Code
+thunkWrapper closure_info thunk_code
+ = -- Stack and heap overflow checks
+ nodeMustPointToIt (closureLFInfo closure_info) `thenFC` \ node_points ->
+
+#ifdef GRAN
+ -- HWL insert macros for GrAnSim if node is live here
+ (if node_points
+ then fetchAndReschedule [] node_points
+ else absC AbsCNop) `thenC`
+#endif {- GRAN -}
+
+ stackCheck closure_info [] node_points ( -- stackCheck *encloses* the rest
+
+ -- Must be after stackCheck: if stchk fails new stack
+ -- space has to be allocated from the heap
+
+ heapCheck [] node_points (
+ -- heapCheck *encloses* the rest
+ -- The "[]" says there are no live argument registers
+
+ -- Overwrite with black hole if necessary
+ blackHoleIt closure_info `thenC`
+
+ -- Push update frame if necessary
+ setupUpdate closure_info ( -- setupUpdate *encloses* the rest
+
+ -- Evaluation scoping -- load current cost centre from closure
+ -- Must be done after the update frame is pushed
+ -- Node is guaranteed to point to it, if profiling
+-- OLD:
+-- (if isStaticClosure closure_info
+-- then evalCostCentreC "SET_CAFCC_CL" [CReg node]
+-- else evalCostCentreC "ENTER_CC_TCL" [CReg node]) `thenC`
+
+ -- Finally, do the business
+ thunk_code
+ )))
+
+funWrapper :: ClosureInfo -- Closure whose code body this is
+ -> [MagicId] -- List of argument registers (if any)
+ -> Code -- Body of function being compiled
+ -> Code
+funWrapper closure_info arg_regs fun_body
+ = -- Stack overflow check
+ nodeMustPointToIt (closureLFInfo closure_info) `thenFC` \ node_points ->
+ stackCheck closure_info arg_regs node_points ( -- stackCheck *encloses* the rest
+
+ -- Heap overflow check
+ heapCheck arg_regs node_points (
+ -- heapCheck *encloses* the rest
+
+ -- Finally, do the business
+ fun_body
+ ))
+\end{code}
+
+%************************************************************************
+%* *
+\subsubsubsection[overflow-checks]{Stack and heap overflow wrappers}
+%* *
+%************************************************************************
+
+Assumption: virtual and real stack pointers are currently exactly aligned.
+
+\begin{code}
+stackCheck :: ClosureInfo
+ -> [MagicId] -- Live registers
+ -> Bool -- Node required to point after check?
+ -> Code
+ -> Code
+
+stackCheck closure_info regs node_reqd code
+ = getFinalStackHW (\ aHw -> \ bHw -> -- Both virtual stack offsets
+
+ getVirtSps `thenFC` \ (vSpA, vSpB) ->
+
+ let a_headroom_reqd = aHw - vSpA -- Virtual offsets are positive integers
+ b_headroom_reqd = bHw - vSpB
+ in
+
+ absC (if (a_headroom_reqd == 0 && b_headroom_reqd == 0) then
+ AbsCNop
+ else
+ CMacroStmt STK_CHK [mkIntCLit liveness_mask,
+ mkIntCLit a_headroom_reqd,
+ mkIntCLit b_headroom_reqd,
+ mkIntCLit vSpA,
+ mkIntCLit vSpB,
+ mkIntCLit (if returns_prim_type then 1 else 0),
+ mkIntCLit (if node_reqd then 1 else 0)]
+ )
+ -- The test is *inside* the absC, to avoid black holes!
+
+ `thenC` code
+ )
+ where
+ all_regs = if node_reqd then node:regs else regs
+ liveness_mask = mkLiveRegsBitMask all_regs
+
+ returns_prim_type = closureReturnsUnboxedType closure_info
+\end{code}
+
+%************************************************************************
+%* *
+\subsubsubsection[update-and-BHs]{Update and black-hole wrappers}
+%* *
+%************************************************************************
+
+
+\begin{code}
+blackHoleIt :: ClosureInfo -> Code -- Only called for thunks
+blackHoleIt closure_info
+ = noBlackHolingFlag `thenFC` \ no_black_holing ->
+
+ if (blackHoleOnEntry no_black_holing closure_info)
+ then
+ absC (if closureSingleEntry(closure_info) then
+ CMacroStmt UPD_BH_SINGLE_ENTRY [CReg node]
+ else
+ CMacroStmt UPD_BH_UPDATABLE [CReg node])
+ -- Node always points to it; see stg-details
+ else
+ nopC
+\end{code}
+
+\begin{code}
+setupUpdate :: ClosureInfo -> Code -> Code -- Only called for thunks
+ -- Nota Bene: this function does not change Node (even if it's a CAF),
+ -- so that the cost centre in the original closure can still be
+ -- extracted by a subsequent ENTER_CC_TCL
+
+setupUpdate closure_info code
+ = if (closureUpdReqd closure_info) then
+ link_caf_if_needed `thenFC` \ update_closure ->
+ pushUpdateFrame update_closure vector code
+ else
+ -- Non-updatable thunks still need a resume-cost-centre "update"
+ -- frame to be pushed if we are doing evaluation profiling.
+
+--OLD: evalPushRCCFrame False {-never primitive-} (
+ profCtrC SLIT("UPDF_OMITTED") []
+ `thenC`
+ code
+-- )
+ where
+ link_caf_if_needed :: FCode CAddrMode -- Returns amode for closure to be updated
+ link_caf_if_needed
+ = if not (isStaticClosure closure_info) then
+ returnFC (CReg node)
+ else
+
+ -- First we must allocate a black hole, and link the
+ -- CAF onto the CAF list
+
+ -- Alloc black hole specifying CC_HDR(Node) as the cost centre
+ -- Hack Warning: Using a CLitLit to get CAddrMode !
+ let
+ use_cc = CLitLit SLIT("CC_HDR(R1.p)") PtrKind
+ blame_cc = use_cc
+ in
+ allocDynClosure (blackHoleClosureInfo closure_info) use_cc blame_cc []
+ `thenFC` \ heap_offset ->
+ getHpRelOffset heap_offset `thenFC` \ hp_rel ->
+ let amode = CAddr hp_rel
+ in
+ absC (CMacroStmt UPD_CAF [CReg node, amode])
+ `thenC`
+ returnFC amode
+
+ closure_label = mkClosureLabel (closureId closure_info)
+
+ vector = case (closureType closure_info) of
+ Nothing -> CReg StdUpdRetVecReg
+ Just (spec_tycon, _, spec_datacons) ->
+ case ctrlReturnConvAlg spec_tycon of
+ UnvectoredReturn 1 ->
+ let
+ spec_data_con = head spec_datacons
+ only_tag = getDataConTag spec_data_con
+ direct = case dataReturnConvAlg spec_data_con of
+ ReturnInRegs _ -> mkConUpdCodePtrVecLabel spec_tycon only_tag
+ ReturnInHeap -> mkStdUpdCodePtrVecLabel spec_tycon only_tag
+ vectored = mkStdUpdVecTblLabel spec_tycon
+ in
+ CUnVecLbl direct vectored
+
+ UnvectoredReturn _ -> CReg StdUpdRetVecReg
+ VectoredReturn _ -> CLbl (mkStdUpdVecTblLabel spec_tycon) DataPtrKind
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgClosure-Description]{Profiling Closure Description.}
+%* *
+%************************************************************************
+
+For "global" data constructors the description is simply occurrence
+name of the data constructor itself (see \ref{CgConTbls-info-tables}).
+
+Otherwise it is determind by @closureDescription@ from the let
+binding information.
+
+\begin{code}
+closureDescription :: FAST_STRING -- Module
+ -> Id -- Id of closure binding
+ -> [Id] -- Args
+ -> PlainStgExpr -- Body
+ -> String
+
+ -- Not called for StgRhsCon which have global info tables built in
+ -- CgConTbls.lhs with a description generated from the data constructor
+
+closureDescription mod_name name args body =
+ uppShow 0 (prettyToUn (
+ ppBesides [ppChar '<',
+ ppPStr mod_name,
+ ppChar '.',
+ ppr PprDebug name,
+ ppChar '>']))
+\end{code}
+
+\begin{code}
+chooseDynCostCentres cc args fvs body
+ = let
+ use_cc -- cost-centre we record in the object
+ = if currentOrSubsumedCosts cc
+ then CReg CurCostCentre
+ else mkCCostCentre cc
+
+ blame_cc -- cost-centre on whom we blame the allocation
+ = case (args, fvs, body) of
+ ([], [just1], StgApp (StgVarAtom fun) [{-no args-}] _)
+ | just1 == fun
+ -> mkCCostCentre overheadCostCentre
+ _ -> use_cc
+ -- if it's an utterly trivial RHS, then it must be
+ -- one introduced by boxHigherOrderArgs for profiling,
+ -- so we charge it to "OVERHEAD".
+ in
+ (use_cc, blame_cc)
+\end{code}
diff --git a/ghc/compiler/codeGen/CgCompInfo.hi b/ghc/compiler/codeGen/CgCompInfo.hi
new file mode 100644
index 0000000000..abf7a52c89
--- /dev/null
+++ b/ghc/compiler/codeGen/CgCompInfo.hi
@@ -0,0 +1,94 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgCompInfo where
+import AbsCSyn(RegRelative)
+import HeapOffs(HeapOffset)
+data RegRelative {-# GHC_PRAGMA HpRel HeapOffset HeapOffset | SpARel Int Int | SpBRel Int Int | NodeRel HeapOffset #-}
+cON_UF_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [2#] _N_ #-}
+iND_TAG :: Integer
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _N_ _N_ #-}
+lIVENESS_R1 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [1#] _N_ #-}
+lIVENESS_R2 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [2#] _N_ #-}
+lIVENESS_R3 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [4#] _N_ #-}
+lIVENESS_R4 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [8#] _N_ #-}
+lIVENESS_R5 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [16#] _N_ #-}
+lIVENESS_R6 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [32#] _N_ #-}
+lIVENESS_R7 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [64#] _N_ #-}
+lIVENESS_R8 :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [128#] _N_ #-}
+mAX_Double_REG :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [2#] _N_ #-}
+mAX_FAMILY_SIZE_FOR_VEC_RETURNS :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [8#] _N_ #-}
+mAX_Float_REG :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [4#] _N_ #-}
+mAX_INTLIKE :: Integer
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 1 _#_ int2Integer# [] [16#] _N_ #-}
+mAX_SPEC_ALL_NONPTRS :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [5#] _N_ #-}
+mAX_SPEC_ALL_PTRS :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [12#] _N_ #-}
+mAX_SPEC_MIXED_FIELDS :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [3#] _N_ #-}
+mAX_SPEC_SELECTEE_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [12#] _N_ #-}
+mAX_Vanilla_REG :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [8#] _N_ #-}
+mIN_BIG_TUPLE_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [16#] _N_ #-}
+mIN_INTLIKE :: Integer
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _N_ _N_ #-}
+mIN_MP_INT_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [16#] _N_ #-}
+mIN_SIZE_NonUpdHeapObject :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [1#] _N_ #-}
+mIN_SIZE_NonUpdStaticHeapObject :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [0#] _N_ #-}
+mIN_UPD_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [2#] _N_ #-}
+mP_STRUCT_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [3#] _N_ #-}
+oTHER_TAG :: Integer
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _N_ _N_ #-}
+sCC_CON_UF_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [3#] _N_ #-}
+sCC_STD_UF_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [5#] _N_ #-}
+sTD_UF_SIZE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [4#] _N_ #-}
+spARelToInt :: RegRelative -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+spBRelToInt :: RegRelative -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+uF_COST_CENTRE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [4#] _N_ #-}
+uF_RET :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [0#] _N_ #-}
+uF_SUA :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [2#] _N_ #-}
+uF_SUB :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [1#] _N_ #-}
+uF_UPDATEE :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [3#] _N_ #-}
+uNFOLDING_CHEAP_OP_COST :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [1#] _N_ #-}
+uNFOLDING_CON_DISCOUNT_WEIGHT :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [1#] _N_ #-}
+uNFOLDING_CREATION_THRESHOLD :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [30#] _N_ #-}
+uNFOLDING_DEAR_OP_COST :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [4#] _N_ #-}
+uNFOLDING_NOREP_LIT_COST :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [4#] _N_ #-}
+uNFOLDING_OVERRIDE_THRESHOLD :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [8#] _N_ #-}
+uNFOLDING_USE_THRESHOLD :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [3#] _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgCompInfo.lhs b/ghc/compiler/codeGen/CgCompInfo.lhs
new file mode 100644
index 0000000000..1ea5e045da
--- /dev/null
+++ b/ghc/compiler/codeGen/CgCompInfo.lhs
@@ -0,0 +1,189 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1994
+%
+\section[CgCompInfo]{Info about this compilation}
+
+!!!!! THIS CODE MUST AGREE WITH SMinterface.h !!!!!!
+
+*** This SHOULD BE the only module that is CPP'd with "stgdefs.h" stuff.
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgCompInfo (
+ uNFOLDING_USE_THRESHOLD,
+ uNFOLDING_CREATION_THRESHOLD,
+ uNFOLDING_OVERRIDE_THRESHOLD,
+ uNFOLDING_CHEAP_OP_COST,
+ uNFOLDING_DEAR_OP_COST,
+ uNFOLDING_NOREP_LIT_COST,
+ uNFOLDING_CON_DISCOUNT_WEIGHT,
+
+ mAX_SPEC_ALL_PTRS,
+ mAX_SPEC_ALL_NONPTRS,
+ mAX_SPEC_MIXED_FIELDS,
+ mAX_SPEC_SELECTEE_SIZE,
+
+ mIN_UPD_SIZE,
+ mIN_SIZE_NonUpdHeapObject,
+ mIN_SIZE_NonUpdStaticHeapObject,
+
+ mAX_FAMILY_SIZE_FOR_VEC_RETURNS,
+
+ sTD_UF_SIZE, cON_UF_SIZE,
+ sCC_STD_UF_SIZE, sCC_CON_UF_SIZE,
+ uF_RET,
+ uF_SUB,
+ uF_SUA,
+ uF_UPDATEE,
+ uF_COST_CENTRE,
+
+ mAX_Vanilla_REG,
+#ifndef DPH
+ mAX_Float_REG,
+ mAX_Double_REG,
+#else
+ mAX_Data_REG,
+#endif {- Data Parallel Haskell -}
+
+ mIN_BIG_TUPLE_SIZE,
+
+ mIN_MP_INT_SIZE,
+ mP_STRUCT_SIZE,
+
+ oTHER_TAG, iND_TAG, -- semi-tagging stuff
+
+ lIVENESS_R1,
+ lIVENESS_R2,
+ lIVENESS_R3,
+ lIVENESS_R4,
+ lIVENESS_R5,
+ lIVENESS_R6,
+ lIVENESS_R7,
+ lIVENESS_R8,
+
+ mAX_INTLIKE, mIN_INTLIKE,
+
+
+ spARelToInt,
+ spBRelToInt,
+
+ -- and to make the interface self-sufficient...
+ RegRelative
+ ) where
+
+-- This magical #include brings in all the everybody-knows-these magic
+-- constants unfortunately, we need to be *explicit* about which one
+-- we want; if we just hope a -I... will get the right one, we could
+-- be in trouble.
+
+#ifndef DPH
+#include "../../includes/GhcConstants.h"
+#else
+#include "../dphsystem/imports/DphConstants.h"
+#endif {- Data Parallel Haskell -}
+
+import AbsCSyn
+import Util
+\end{code}
+
+All pretty arbitrary:
+\begin{code}
+uNFOLDING_USE_THRESHOLD = ( 3 :: Int)
+uNFOLDING_CREATION_THRESHOLD = (30 :: Int)
+uNFOLDING_OVERRIDE_THRESHOLD = ( 8 :: Int)
+uNFOLDING_CHEAP_OP_COST = ( 1 :: Int)
+uNFOLDING_DEAR_OP_COST = ( 4 :: Int)
+uNFOLDING_NOREP_LIT_COST = ( 4 :: Int)
+uNFOLDING_CON_DISCOUNT_WEIGHT = ( 1 :: Int)
+\end{code}
+
+\begin{code}
+mAX_SPEC_ALL_PTRS = (MAX_SPEC_ALL_PTRS :: Int)
+mAX_SPEC_ALL_NONPTRS = (MAX_SPEC_ALL_NONPTRS :: Int)
+mAX_SPEC_MIXED_FIELDS = (MAX_SPEC_OTHER_SIZE :: Int)
+mAX_SPEC_SELECTEE_SIZE = (MAX_SPEC_SELECTEE_SIZE :: Int)
+
+-- closure sizes: these do NOT include the header
+mIN_UPD_SIZE = (MIN_UPD_SIZE::Int)
+mIN_SIZE_NonUpdHeapObject = (MIN_NONUPD_SIZE::Int)
+mIN_SIZE_NonUpdStaticHeapObject = (0::Int)
+\end{code}
+
+A completely random number:
+\begin{code}
+mIN_BIG_TUPLE_SIZE = (16::Int)
+\end{code}
+
+Sizes of gmp objects:
+\begin{code}
+mIN_MP_INT_SIZE = (MIN_MP_INT_SIZE :: Int)
+mP_STRUCT_SIZE = (MP_STRUCT_SIZE :: Int)
+\end{code}
+
+Constants for semi-tagging; the tags associated with the data
+constructors will start at 0 and go up.
+\begin{code}
+oTHER_TAG = (INFO_OTHER_TAG :: Integer) -- (-1) unevaluated, probably
+iND_TAG = (INFO_IND_TAG :: Integer) -- (-1) NOT USED, REALLY
+\end{code}
+
+Stuff for liveness masks:
+\begin{code}
+lIVENESS_R1 = (LIVENESS_R1 :: Int)
+lIVENESS_R2 = (LIVENESS_R2 :: Int)
+lIVENESS_R3 = (LIVENESS_R3 :: Int)
+lIVENESS_R4 = (LIVENESS_R4 :: Int)
+lIVENESS_R5 = (LIVENESS_R5 :: Int)
+lIVENESS_R6 = (LIVENESS_R6 :: Int)
+lIVENESS_R7 = (LIVENESS_R7 :: Int)
+lIVENESS_R8 = (LIVENESS_R8 :: Int)
+\end{code}
+
+\begin{code}
+mIN_INTLIKE, mAX_INTLIKE :: Integer -- Only used to compare with (MachInt Integer)
+mIN_INTLIKE = MIN_INTLIKE
+mAX_INTLIKE = MAX_INTLIKE
+\end{code}
+
+\begin{code}
+-- THESE ARE DIRECTION SENSITIVE!
+spARelToInt (SpARel spA off) = spA - off -- equiv to: AREL(spA - off)
+spBRelToInt (SpBRel spB off) = off - spB -- equiv to: BREL(spB - off)
+\end{code}
+
+A section of code-generator-related MAGIC CONSTANTS.
+\begin{code}
+mAX_FAMILY_SIZE_FOR_VEC_RETURNS = (MAX_VECTORED_RTN::Int) -- pretty arbitrary
+-- If you change this, you may need to change runtimes/standard/Update.lhc
+
+-- The update frame sizes
+sTD_UF_SIZE = (NOSCC_STD_UF_SIZE::Int)
+cON_UF_SIZE = (NOSCC_CON_UF_SIZE::Int)
+
+-- Same again, with profiling
+sCC_STD_UF_SIZE = (SCC_STD_UF_SIZE::Int)
+sCC_CON_UF_SIZE = (SCC_CON_UF_SIZE::Int)
+
+-- Offsets in an update frame. They don't change with profiling!
+uF_RET = (UF_RET::Int)
+uF_SUB = (UF_SUB::Int)
+uF_SUA = (UF_SUA::Int)
+uF_UPDATEE = (UF_UPDATEE::Int)
+uF_COST_CENTRE = (UF_COST_CENTRE::Int)
+\end{code}
+
+\begin{code}
+#ifndef DPH
+mAX_Vanilla_REG = (MAX_VANILLA_REG :: Int)
+mAX_Float_REG = (MAX_FLOAT_REG :: Int)
+mAX_Double_REG = (MAX_DOUBLE_REG :: Int)
+#else
+-- The DAP has only got 14 registers :-( After various heap and stack
+-- pointers we dont have that many left over..
+mAX_Vanilla_REG = (4 :: Int) -- Ptr, Int, Char, Float
+mAX_Data_REG = (4 :: Int) -- Int, Char, Float, Double
+mAX_Float_REG = error "mAX_Float_REG : not used in DPH"
+mAX_Double_REG = error "mAX_Double_REG: not used in DPH"
+#endif {- Data Parallel Haskell -}
+\end{code}
diff --git a/ghc/compiler/codeGen/CgCon.hi b/ghc/compiler/codeGen/CgCon.hi
new file mode 100644
index 0000000000..f90731dd3c
--- /dev/null
+++ b/ghc/compiler/codeGen/CgCon.hi
@@ -0,0 +1,35 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgCon where
+import AbsCSyn(AbstractC, CAddrMode, CExprMacro, MagicId, RegRelative)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, StubFlag)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import PreludePS(_PackedString)
+import PrimKind(PrimKind)
+import PrimOps(PrimOp)
+import StgSyn(StgAtom)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data CAddrMode {-# GHC_PRAGMA CVal RegRelative PrimKind | CAddr RegRelative | CReg MagicId | CTableEntry CAddrMode CAddrMode PrimKind | CTemp Unique PrimKind | CLbl CLabel PrimKind | CUnVecLbl CLabel CLabel | CCharLike CAddrMode | CIntLike CAddrMode | CString _PackedString | CLit BasicLit | CLitLit _PackedString PrimKind | COffset HeapOffset | CCode AbstractC | CLabelledCode CLabel AbstractC | CJoinPoint Int Int | CMacroExpr PrimKind CExprMacro [CAddrMode] | CCostCentre CostCentre Bool #-}
+data MagicId {-# GHC_PRAGMA BaseReg | StkOReg | VanillaReg PrimKind Int# | FloatReg Int# | DoubleReg Int# | TagReg | RetReg | SpA | SuA | SpB | SuB | Hp | HpLim | LivenessReg | ActivityReg | StdUpdRetVecReg | StkStubReg | CurCostCentre | VoidReg #-}
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data PrimKind {-# GHC_PRAGMA PtrKind | CodePtrKind | DataPtrKind | RetKind | InfoPtrKind | CostCentreKind | CharKind | IntKind | WordKind | AddrKind | FloatKind | DoubleKind | MallocPtrKind | StablePtrKind | ArrayKind | ByteArrayKind | VoidKind #-}
+data PrimOp
+ {-# GHC_PRAGMA CharGtOp | CharGeOp | CharEqOp | CharNeOp | CharLtOp | CharLeOp | IntGtOp | IntGeOp | IntEqOp | IntNeOp | IntLtOp | IntLeOp | WordGtOp | WordGeOp | WordEqOp | WordNeOp | WordLtOp | WordLeOp | AddrGtOp | AddrGeOp | AddrEqOp | AddrNeOp | AddrLtOp | AddrLeOp | FloatGtOp | FloatGeOp | FloatEqOp | FloatNeOp | FloatLtOp | FloatLeOp | DoubleGtOp | DoubleGeOp | DoubleEqOp | DoubleNeOp | DoubleLtOp | DoubleLeOp | OrdOp | ChrOp | IntAddOp | IntSubOp | IntMulOp | IntQuotOp | IntDivOp | IntRemOp | IntNegOp | IntAbsOp | AndOp | OrOp | NotOp | SllOp | SraOp | SrlOp | ISllOp | ISraOp | ISrlOp | Int2WordOp | Word2IntOp | Int2AddrOp | Addr2IntOp | FloatAddOp | FloatSubOp | FloatMulOp | FloatDivOp | FloatNegOp | Float2IntOp | Int2FloatOp | FloatExpOp | FloatLogOp | FloatSqrtOp | FloatSinOp | FloatCosOp | FloatTanOp | FloatAsinOp | FloatAcosOp | FloatAtanOp | FloatSinhOp | FloatCoshOp | FloatTanhOp | FloatPowerOp | DoubleAddOp | DoubleSubOp | DoubleMulOp | DoubleDivOp | DoubleNegOp | Double2IntOp | Int2DoubleOp | Double2FloatOp | Float2DoubleOp | DoubleExpOp | DoubleLogOp | DoubleSqrtOp | DoubleSinOp | DoubleCosOp | DoubleTanOp | DoubleAsinOp | DoubleAcosOp | DoubleAtanOp | DoubleSinhOp | DoubleCoshOp | DoubleTanhOp | DoublePowerOp | IntegerAddOp | IntegerSubOp | IntegerMulOp | IntegerQuotRemOp | IntegerDivModOp | IntegerNegOp | IntegerCmpOp | Integer2IntOp | Int2IntegerOp | Word2IntegerOp | Addr2IntegerOp | FloatEncodeOp | FloatDecodeOp | DoubleEncodeOp | DoubleDecodeOp | NewArrayOp | NewByteArrayOp PrimKind | SameMutableArrayOp | SameMutableByteArrayOp | ReadArrayOp | WriteArrayOp | IndexArrayOp | ReadByteArrayOp PrimKind | WriteByteArrayOp PrimKind | IndexByteArrayOp PrimKind | IndexOffAddrOp PrimKind | UnsafeFreezeArrayOp | UnsafeFreezeByteArrayOp | NewSynchVarOp | TakeMVarOp | PutMVarOp | ReadIVarOp | WriteIVarOp | MakeStablePtrOp | DeRefStablePtrOp | CCallOp _PackedString Bool Bool [UniType] UniType | ErrorIOPrimOp | ReallyUnsafePtrEqualityOp | SeqOp | ParOp | ForkOp | DelayOp | WaitOp #-}
+data StgAtom a {-# GHC_PRAGMA StgVarAtom a | StgLitAtom BasicLit #-}
+bindConArgs :: Id -> [Id] -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 2 _U_ 1222 _N_ _S_ "U(LLLS)L" {_A_ 5 _U_ 2222222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+buildDynCon :: Id -> CostCentre -> Id -> [CAddrMode] -> Bool -> CgInfoDownwards -> CgState -> (CgIdInfo, CgState)
+ {-# GHC_PRAGMA _A_ 5 _U_ 2222122 _N_ _S_ "LLLLE" _N_ _N_ #-}
+cgReturnDataCon :: Id -> [CAddrMode] -> Bool -> UniqFM Id -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 6 _U_ 222222 _N_ _S_ "LLLLU(LLU(LLS))L" _N_ _N_ #-}
+cgTopRhsCon :: Id -> Id -> [StgAtom Id] -> Bool -> CgInfoDownwards -> CgState -> ((Id, CgIdInfo), CgState)
+ {-# GHC_PRAGMA _A_ 4 _U_ 222022 _N_ _S_ "LLSA" {_A_ 3 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgCon.lhs b/ghc/compiler/codeGen/CgCon.lhs
new file mode 100644
index 0000000000..05ef0e81ec
--- /dev/null
+++ b/ghc/compiler/codeGen/CgCon.lhs
@@ -0,0 +1,515 @@
+%
+% (c) The GRASP Project, Glasgow University, 1992-1995
+%
+\section[CgCon]{Code generation for constructors}
+
+This module provides the support code for @StgToAbstractC@ to deal
+with {\em constructors} on the RHSs of let(rec)s. See also
+@CgClosure@, which deals with closures.
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgCon (
+ -- it's all exported, actually...
+ cgTopRhsCon, buildDynCon,
+ bindConArgs,
+ cgReturnDataCon,
+
+ -- and to make the interface self-sufficient...
+ Id, StgAtom, CgState, CAddrMode,
+ PrimKind, PrimOp, MagicId
+ ) where
+
+IMPORT_Trace -- ToDo: rm (debugging)
+import Outputable
+import Pretty
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import AbsUniType ( maybeCharLikeTyCon, maybeIntLikeTyCon, TyVar,
+ TyCon, Class, UniType
+ )
+import CgBindery ( getAtomAmode, getAtomAmodes, bindNewToNode,
+ bindArgsToRegs, newTempAmodeAndIdInfo, idInfoToAmode
+ )
+import CgClosure ( cgTopRhsClosure )
+import CgHeapery ( allocDynClosure, heapCheck
+#ifdef GRAN
+ , fetchAndReschedule -- HWL
+#endif {- GRAN -}
+ )
+import CgCompInfo ( mAX_INTLIKE, mIN_INTLIKE )
+
+import CgRetConv ( dataReturnConvAlg, mkLiveRegsBitMask,
+ CtrlReturnConvention(..), DataReturnConvention(..)
+ )
+import CgTailCall ( performReturn, mkStaticAlgReturnCode )
+import CgUsages ( getHpRelOffset )
+import CLabelInfo ( CLabel, mkClosureLabel, mkInfoTableLabel,
+ mkPhantomInfoTableLabel,
+ mkConEntryLabel, mkStdEntryLabel
+ )
+import ClosureInfo -- hiding ( auxInfoTableLabelFromCI ) -- I hate pragmas
+ {-( mkConLFInfo, mkLFArgument, closureLFInfo,
+ layOutDynCon, layOutDynClosure,
+ layOutStaticClosure, UpdateFlag(..),
+ mkClosureLFInfo, layOutStaticNoFVClosure
+ )-}
+import Id ( getIdKind, getDataConTag, getDataConTyCon,
+ isDataCon, fIRST_TAG, DataCon(..), ConTag(..)
+ )
+import CmdLineOpts ( GlobalSwitch(..) )
+import Maybes ( maybeToBool, Maybe(..) )
+import PrimKind ( PrimKind(..), isFloatingKind, getKindSize )
+import CostCentre
+import UniqSet -- ( emptyUniqSet, UniqSet(..) )
+import Util
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[toplevel-constructors]{Top-level constructors}
+%* *
+%************************************************************************
+
+\begin{code}
+cgTopRhsCon :: Id -- Name of thing bound to this RHS
+ -> DataCon -- Id
+ -> [PlainStgAtom] -- Args
+ -> Bool -- All zero-size args (see buildDynCon)
+ -> FCode (Id, CgIdInfo)
+\end{code}
+
+Special Case:
+Constructors some of whose arguments are of \tr{Float#} or
+\tr{Double#} type, {\em or} which are ``lit lits'' (which are given
+\tr{Addr#} type).
+
+These ones have to be compiled as re-entrant thunks rather than closures,
+because we can't figure out a way to persuade C to allow us to initialise a
+static closure with Floats and Doubles!
+Thus, for \tr{x = 2.0} (defaults to Double), we get:
+
+\begin{verbatim}
+-- The STG syntax:
+ Main.x = MkDouble [2.0##]
+
+-- C Code:
+
+-- closure:
+ SET_STATIC_HDR(Main_x_closure,Main_x_static,CC_DATA,,EXTDATA_RO)
+ };
+-- its *own* info table:
+ STATIC_INFO_TABLE(Main_x,Main_x_entry,,,,EXTFUN,???,":MkDouble","Double");
+-- with its *own* entry code:
+ STGFUN(Main_x_entry) {
+ P_ u1701;
+ RetDouble1=2.0;
+ u1701=(P_)*SpB;
+ SpB=SpB-1;
+ JMP_(u1701[0]);
+ }
+\end{verbatim}
+
+The above has the down side that each floating-point constant will end
+up with its own info table (rather than sharing the MkFloat/MkDouble
+ones). On the plus side, however, it does return a value (\tr{2.0})
+{\em straight away}.
+
+Here, then is the implementation: just pretend it's a non-updatable
+thunk. That is, instead of
+
+ x = F# 3.455#
+
+pretend we've seen
+
+ x = [] \n [] -> F# 3.455#
+
+\begin{code}
+top_cc = dontCareCostCentre -- out here to avoid a cgTopRhsCon CAF (sigh)
+top_ccc = mkCCostCentre dontCareCostCentre -- because it's static data
+
+cgTopRhsCon name con args all_zero_size_args
+ | any (isFloatingKind . getAtomKind) args
+ || any isLitLitStgAtom args
+ = cgTopRhsClosure name top_cc NoStgBinderInfo [] body lf_info
+ where
+ body = StgConApp con args emptyUniqSet{-emptyLiveVarSet-}
+ lf_info = mkClosureLFInfo True {- Top level -} [] ReEntrant [] body
+\end{code}
+
+OK, so now we have the general case.
+
+\begin{code}
+cgTopRhsCon name con args all_zero_size_args
+ = (
+ ASSERT(isDataCon con)
+
+ -- LAY IT OUT
+ getAtomAmodes args `thenFC` \ amodes ->
+
+ let
+ (closure_info, amodes_w_offsets)
+ = layOutStaticClosure name getAmodeKind amodes lf_info
+ in
+ -- HWL: In 0.22 there was a heap check in here that had to be changed.
+ -- CHECK if having no heap check is ok for GrAnSim here!!!
+
+ -- BUILD THE OBJECT
+ absC (CStaticClosure
+ closure_label -- Labelled with the name on lhs of defn
+ closure_info -- Closure is static
+ top_ccc
+ (map fst amodes_w_offsets)) -- Sorted into ptrs first, then nonptrs
+
+ ) `thenC`
+
+ -- RETURN
+ returnFC (name, stableAmodeIdInfo name (CLbl closure_label PtrKind) lf_info)
+ where
+ con_tycon = getDataConTyCon con
+ lf_info = mkConLFInfo con
+
+ closure_label = mkClosureLabel name
+ info_label = mkInfoTableLabel con
+ con_entry_label = mkConEntryLabel con
+ entry_label = mkStdEntryLabel name
+\end{code}
+
+The general case is:
+\begin{verbatim}
+-- code:
+ data Foo = MkFoo
+ x = MkFoo
+
+-- STG code:
+STG syntax:
+ Main.x = Main.MkFoo []
+
+-- interesting parts of the C Code:
+
+-- closure for "x":
+ SET_STATIC_HDR(Main_x_closure,Main_MkFoo_static,CC_DATA,,EXTDATA_RO)
+ };
+-- entry code for "x":
+ STGFUN(Main_x_entry) {
+ Node=(W_)(Main_x_closure);
+ STGJUMP(Main_MkFoo_entry);
+ }
+\end{verbatim}
+
+Observe: (1)~We create a static closure for \tr{x}, {\em reusing} the
+regular \tr{MkFoo} info-table and entry code. (2)~However: the
+\tr{MkFoo} code expects Node to be set, but the caller of \tr{x_entry}
+will not have set it. Therefore, the whole point of \tr{x_entry} is
+to set node (and then call the shared \tr{MkFoo} entry code).
+
+
+
+Special Case:
+For top-level Int/Char constants. We get entry-code fragments of the form:
+
+\begin{verbatim}
+-- code:
+ y = 1
+
+-- entry code for "y":
+ STGFUN(Main_y_entry) {
+ Node=(W_)(Main_y_closure);
+ STGJUMP(I#_entry);
+ }
+\end{verbatim}
+
+This is pretty tiresome: we {\em know} what the constant is---we'd
+rather just return it. We end up with something that's a hybrid
+between the Float/Double and general cases: (a)~like Floats/Doubles,
+the entry-code returns the value immediately; (b)~like the general
+case, we share the data-constructor's std info table. So, what we
+want is:
+\begin{verbatim}
+-- code:
+ z = 1
+
+-- STG code:
+STG syntax:
+ Main.z = I# [1#]
+
+-- interesting parts of the C Code:
+
+-- closure for "z" (shares I# info table):
+ SET_STATIC_HDR(Main_z_closure,I#_static,CC_DATA,,EXTDATA_RO)
+ };
+-- entry code for "z" (do the business directly):
+ STGFUN(Main_z_entry) {
+ P_ u1702;
+ Ret1=1;
+ u1702=(P_)*SpB;
+ SpB=SpB-1;
+ JMP_(u1702[0]);
+ }
+\end{verbatim}
+
+This blob used to be in cgTopRhsCon, but I don't see how we can
+jump direct to the named code for a constructor; any external entries
+will be via Node. Generating all this extra code is a real waste
+for big static data structures. So I've nuked it. SLPJ Sept 94
+
+
+Further discourse on these entry-code fragments (NB this isn't done
+yet [ToDo]): They're really pretty pointless, except for {\em
+exported} top-level constants (the rare case). Consider:
+\begin{verbatim}
+y = p : ps -- y is not exported
+f a b = y
+g c = (y, c)
+\end{verbatim}
+Why have a \tr{y_entry} fragment at all? The code generator should
+``know enough'' about \tr{y} not to need it. For the first case
+above, with \tr{y} in ``head position,'' it should generate code just
+as for an \tr{StgRhsCon} (possibly because the STG simplification
+actually did the unfolding to make it so). At the least, it should
+load up \tr{Node} and call \tr{Cons}'s entry code---not some special
+\tr{y_entry} code.
+
+\begin{pseudocode}
+ -- WE NEED AN ENTRY PT, IN CASE SOMEONE JUMPS DIRECT TO name
+ -- FROM OUTSIDE. NB: this CCodeBlock precedes the
+ -- CStaticClosure for the same reason (fewer forward refs) as
+ -- we did in CgClosure.
+
+ -- we either have ``in-line'' returning code (special case)
+ -- or we set Node and jump to the constructor's entry code
+
+ (if maybeToBool (maybeCharLikeTyCon con_tycon)
+ || maybeToBool (maybeIntLikeTyCon con_tycon)
+ then -- special case
+ getAbsC (-- OLD: No, we don't fiddle cost-centres on
+ -- entry to data values any more (WDP 94/06)
+ -- lexCostCentreC "ENTER_CC_D" [top_ccc]
+ -- `thenC`
+ cgReturnDataCon con amodes all_zero_size_args emptyUniqSet{-no live vars-})
+ else -- boring case
+ returnFC (
+ mkAbstractCs [
+ -- Node := this_closure
+ CAssign (CReg node) (CLbl closure_label PtrKind),
+ -- InfoPtr := info table for this_closure
+ CAssign (CReg infoptr) (CLbl info_label DataPtrKind),
+ -- Jump to std code for this constructor
+ CJump (CLbl con_entry_label CodePtrKind)
+ ])
+ ) `thenFC` \ ret_absC ->
+
+ absC (CCodeBlock entry_label ret_absC) `thenC`
+\end{pseudocode}
+
+=========================== END OF OLD STUFF ==============================
+
+
+%************************************************************************
+%* *
+%* non-top-level constructors *
+%* *
+%************************************************************************
+\subsection[code-for-constructors]{The code for constructors}
+
+\begin{code}
+buildDynCon :: Id -- Name of the thing to which this constr will
+ -- be bound
+ -> CostCentre -- Where to grab cost centre from;
+ -- current CC if currentOrSubsumedCosts
+ -> DataCon -- The data constructor
+ -> [CAddrMode] -- Its args
+ -> Bool -- True <=> all args (if any) are
+ -- of "zero size" (i.e., VoidKind);
+ -- The reason we don't just look at the
+ -- args is that we may be in a "knot", and
+ -- premature looking at the args will cause
+ -- the compiler to black-hole!
+ -> FCode CgIdInfo -- Return details about how to find it
+\end{code}
+
+First we deal with the case of zero-arity constructors. Now, they
+will probably be unfolded, so we don't expect to see this case
+much, if at all, but it does no harm, and sets the scene for characters.
+
+In the case of zero-arity constructors, or, more accurately,
+those which have exclusively size-zero (VoidKind) args,
+we generate no code at all.
+
+\begin{code}
+buildDynCon binder cc con args all_zero_size_args@True
+ = ASSERT(isDataCon con)
+ returnFC (stableAmodeIdInfo binder
+ (CLbl (mkClosureLabel con) PtrKind)
+ (mkConLFInfo con))
+\end{code}
+
+Now for @Char@-like closures. We generate an assignment of the
+address of the closure to a temporary. It would be possible simply to
+generate no code, and record the addressing mode in the environment, but
+we'd have to be careful if the argument wasn't a constant --- so for simplicity
+we just always asssign to a temporary.
+
+Last special case: @Int@-like closures. We only special-case the situation
+in which the argument is a literal in the range @mIN_INTLIKE@..@mAX_INTLILKE@.
+NB: for @Char@-like closures we can work with any old argument, but
+for @Int@-like ones the argument has to be a literal. Reason: @Char@ like
+closures have an argument type which is guaranteed in range.
+
+Because of this, we use can safely return an addressing mode.
+
+\begin{code}
+buildDynCon binder cc con [arg_amode] all_zero_size_args@False
+
+ | maybeToBool (maybeCharLikeTyCon tycon)
+ = ASSERT(isDataCon con)
+ absC (CAssign temp_amode (CCharLike arg_amode)) `thenC`
+ returnFC temp_id_info
+
+ | maybeToBool (maybeIntLikeTyCon tycon) && in_range_int_lit arg_amode
+ = ASSERT(isDataCon con)
+ returnFC (stableAmodeIdInfo binder (CIntLike arg_amode) (mkConLFInfo con))
+ where
+ tycon = getDataConTyCon con
+ (temp_amode, temp_id_info) = newTempAmodeAndIdInfo binder (mkConLFInfo con)
+
+ in_range_int_lit (CLit (MachInt val _)) = (val <= mAX_INTLIKE) && (val >= mIN_INTLIKE)
+ in_range_int_lit other_amode = False
+\end{code}
+
+Now the general case.
+
+\begin{code}
+buildDynCon binder cc con args all_zero_size_args@False
+ = ASSERT(isDataCon con)
+ allocDynClosure closure_info use_cc blame_cc amodes_w_offsets `thenFC` \ hp_off ->
+ returnFC (heapIdInfo binder hp_off (mkConLFInfo con))
+ where
+ (closure_info, amodes_w_offsets)
+ = layOutDynClosure binder getAmodeKind args (mkConLFInfo con)
+
+ use_cc -- cost-centre to stick in the object
+ = if currentOrSubsumedCosts cc
+ then CReg CurCostCentre
+ else mkCCostCentre cc
+
+ blame_cc = use_cc -- cost-centre on which to blame the alloc (same)
+\end{code}
+
+
+%************************************************************************
+%* *
+%* constructor-related utility function: *
+%* bindConArgs is called from cgAlt of a case *
+%* *
+%************************************************************************
+\subsection[constructor-utilities]{@bindConArgs@: constructor-related utility}
+
+@bindConArgs@ $con args$ augments the environment with bindings for the
+binders $args$, assuming that we have just returned from a @case@ which
+found a $con$.
+
+\begin{code}
+bindConArgs :: DataCon -> [Id] -> Code
+bindConArgs con args
+ = ASSERT(isDataCon con)
+ case (dataReturnConvAlg con) of
+ ReturnInRegs rs -> bindArgsToRegs args rs
+ ReturnInHeap ->
+ let
+ (_, args_w_offsets) = layOutDynCon con getIdKind args
+ in
+ mapCs bind_arg args_w_offsets
+ where
+ bind_arg (arg, offset) = bindNewToNode arg offset mkLFArgument
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsubsection[CgRetConv-cgReturnDataCon]{Actually generate code for a constructor return}
+%* *
+%************************************************************************
+
+
+Note: it's the responsibility of the @cgReturnDataCon@ caller to be
+sure the @amodes@ passed don't conflict with each other.
+\begin{code}
+cgReturnDataCon :: DataCon -> [CAddrMode] -> Bool -> PlainStgLiveVars -> Code
+
+cgReturnDataCon con amodes all_zero_size_args live_vars
+ = ASSERT(isDataCon con)
+ getEndOfBlockInfo `thenFC` \ (EndOfBlockInfo args_spa args_spb sequel) ->
+
+ case sequel of
+
+ CaseAlts _ (Just (alts, Just (maybe_deflt_binder, (_,deflt_lbl))))
+ | not (getDataConTag con `is_elem` map fst alts)
+ ->
+ -- Special case! We're returning a constructor to the default case
+ -- of an enclosing case. For example:
+ --
+ -- case (case e of (a,b) -> C a b) of
+ -- D x -> ...
+ -- y -> ...<returning here!>...
+ --
+ -- In this case,
+ -- if the default is a non-bind-default (ie does not use y),
+ -- then we should simply jump to the default join point;
+ --
+ -- if the default is a bind-default (ie does use y), we
+ -- should return the constructor IN THE HEAP, pointed to by Node,
+ -- **regardless** of the return convention of the constructor C.
+
+ case maybe_deflt_binder of
+ Just binder ->
+ buildDynCon binder useCurrentCostCentre con amodes all_zero_size_args
+ `thenFC` \ idinfo ->
+ idInfoToAmode PtrKind idinfo `thenFC` \ amode ->
+ performReturn (move_to_reg amode node) jump_to_join_point live_vars
+
+ Nothing ->
+ performReturn AbsCNop {- No reg assts -} jump_to_join_point live_vars
+ where
+ is_elem = isIn "cgReturnDataCon"
+ jump_to_join_point sequel = absC (CJump (CLbl deflt_lbl CodePtrKind))
+ -- Ignore the sequel: we've already looked at it above
+
+ other_sequel -> -- The usual case
+ case dataReturnConvAlg con of
+
+ ReturnInHeap ->
+ -- BUILD THE OBJECT IN THE HEAP
+ -- The first "con" says that the name bound to this
+ -- closure is "con", which is a bit of a fudge, but it only
+ -- affects profiling (ToDo?)
+ buildDynCon con useCurrentCostCentre con amodes all_zero_size_args
+ `thenFC` \ idinfo ->
+ idInfoToAmode PtrKind idinfo `thenFC` \ amode ->
+
+ -- MAKE NODE POINT TO IT
+ let reg_assts = move_to_reg amode node
+ info_lbl = mkInfoTableLabel con
+ in
+
+ -- RETURN
+ profCtrC SLIT("RET_NEW_IN_HEAP") [] `thenC`
+
+ performReturn reg_assts (mkStaticAlgReturnCode con (Just info_lbl)) live_vars
+
+ ReturnInRegs regs ->
+ let reg_assts = mkAbstractCs (zipWith move_to_reg amodes regs)
+ info_lbl = mkPhantomInfoTableLabel con
+ in
+--OLD:WDP:94/06 evalCostCentreC "SET_RetCC" [CReg CurCostCentre] `thenC`
+ profCtrC SLIT("RET_NEW_IN_REGS") [] `thenC`
+
+ performReturn reg_assts (mkStaticAlgReturnCode con (Just info_lbl)) live_vars
+ where
+ move_to_reg :: CAddrMode -> MagicId -> AbstractC
+ move_to_reg src_amode dest_reg = CAssign (CReg dest_reg) src_amode
+\end{code}
diff --git a/ghc/compiler/codeGen/CgConTbls.hi b/ghc/compiler/codeGen/CgConTbls.hi
new file mode 100644
index 0000000000..9779b1dc91
--- /dev/null
+++ b/ghc/compiler/codeGen/CgConTbls.hi
@@ -0,0 +1,24 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgConTbls where
+import AbsCSyn(AbstractC, CAddrMode, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgMonad(CompilationInfo)
+import ClosureInfo(ClosureInfo)
+import CmdLineOpts(GlobalSwitch)
+import CostCentre(CostCentre)
+import FiniteMap(FiniteMap)
+import Maybes(Labda)
+import PreludePS(_PackedString)
+import PrimOps(PrimOp)
+import TCE(TCE(..))
+import TyCon(TyCon)
+import UniType(UniType)
+import UniqFM(UniqFM)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+data CompilationInfo {-# GHC_PRAGMA MkCompInfo (GlobalSwitch -> Bool) _PackedString #-}
+type TCE = UniqFM TyCon
+data UniqFM a {-# GHC_PRAGMA EmptyUFM | LeafUFM Int# a | NodeUFM Int# Int# (UniqFM a) (UniqFM a) #-}
+genStaticConBits :: CompilationInfo -> [TyCon] -> FiniteMap TyCon [[Labda UniType]] -> AbstractC
+ {-# GHC_PRAGMA _A_ 3 _U_ 211 _N_ _N_ _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgConTbls.lhs b/ghc/compiler/codeGen/CgConTbls.lhs
new file mode 100644
index 0000000000..b37689f197
--- /dev/null
+++ b/ghc/compiler/codeGen/CgConTbls.lhs
@@ -0,0 +1,430 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[CgConTbls]{Info tables and update bits for constructors}
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgConTbls (
+ genStaticConBits,
+
+ -- and to complete the interface...
+ TCE(..), UniqFM, CompilationInfo, AbstractC
+ ) where
+
+import Pretty -- ToDo: rm (debugging)
+import Outputable
+
+import AbsCSyn
+import CgMonad
+
+import AbsUniType ( getTyConDataCons, kindFromType,
+ maybeIntLikeTyCon,
+ mkSpecTyCon, isLocalSpecTyCon,
+ TyVarTemplate, TyCon, Class,
+ TauType(..), UniType, ThetaType(..)
+ IF_ATTACK_PRAGMAS(COMMA cmpTyCon COMMA cmpClass)
+ IF_ATTACK_PRAGMAS(COMMA cmpUniType)
+ )
+import CgHeapery ( heapCheck, allocDynClosure )
+import CgRetConv ( dataReturnConvAlg, ctrlReturnConvAlg,
+ mkLiveRegsBitMask,
+ CtrlReturnConvention(..),
+ DataReturnConvention(..)
+ )
+import CgTailCall ( performReturn, mkStaticAlgReturnCode )
+import CgUsages ( getHpRelOffset )
+import CLabelInfo ( mkConEntryLabel, mkStaticConEntryLabel,
+ mkInfoTableLabel,
+ mkClosureLabel, --UNUSED: mkConUpdCodePtrUnvecLabel,
+ mkConUpdCodePtrVecLabel, mkStdUpdCodePtrVecLabel,
+ mkStdUpdVecTblLabel, CLabel
+ )
+import ClosureInfo ( layOutStaticClosure, layOutDynCon,
+ closureSizeWithoutFixedHdr, closurePtrsSize,
+ fitsMinUpdSize, mkConLFInfo, layOutPhantomClosure,
+ infoTableLabelFromCI
+ )
+import CmdLineOpts ( GlobalSwitch(..) )
+import FiniteMap
+import Id ( getDataConTag, getDataConSig, getDataConTyCon,
+ mkSameSpecCon,
+ getDataConArity, fIRST_TAG, ConTag(..),
+ DataCon(..)
+ )
+import CgCompInfo ( uF_UPDATEE )
+import Maybes ( maybeToBool, Maybe(..) )
+import PrimKind ( getKindSize, retKindSize )
+import CostCentre
+import UniqSet -- ( emptyUniqSet, UniqSet(..) )
+import TCE ( rngTCE, TCE(..), UniqFM )
+import Util
+\end{code}
+
+For every constructor we generate the following info tables:
+ A static info table, for static instances of the constructor,
+
+ For constructors which return in registers (and only them),
+ an "inregs" info table. This info table is rather emaciated;
+ it only contains update code and tag.
+
+ Plus:
+
+\begin{tabular}{lll}
+Info tbls & Macro & Kind of constructor \\
+\hline
+info & @CONST_INFO_TABLE@& Zero arity (no info -- compiler uses static closure)\\
+info & @CHARLIKE_INFO_TABLE@& Charlike (no info -- compiler indexes fixed array)\\
+info & @INTLIKE_INFO_TABLE@& Intlike; the one macro generates both info tbls\\
+info & @SPEC_INFO_TABLE@& SPECish, and bigger than or equal to @MIN_UPD_SIZE@\\
+info & @GEN_INFO_TABLE@& GENish (hence bigger than or equal to @MIN_UPD_SIZE@)\\
+\end{tabular}
+
+Possible info tables for constructor con:
+
+\begin{description}
+\item[@con_info@:]
+Used for dynamically let(rec)-bound occurrences of
+the constructor, and for updates. For constructors
+which are int-like, char-like or nullary, when GC occurs,
+the closure tries to get rid of itself.
+
+\item[@con_inregs_info@:]
+Used when returning a new constructor in registers.
+Only for return-in-regs constructors.
+Macro: @INREGS_INFO_TABLE@.
+
+\item[@con_static_info@:]
+Static occurrences of the constructor
+macro: @STATIC_INFO_TABLE@.
+\end{description}
+
+For zero-arity constructors, \tr{con}, we also generate a static closure:
+
+\begin{description}
+\item[@con_closure@:]
+A single static copy of the (zero-arity) constructor itself.
+\end{description}
+
+For charlike and intlike closures there is a fixed array of static
+closures predeclared.
+
+\begin{code}
+genStaticConBits :: CompilationInfo -- global info about the compilation
+ -> [TyCon] -- tycons to generate
+ -> FiniteMap TyCon [[Maybe UniType]]
+ -- tycon specialisation info
+ -> AbstractC -- output
+
+genStaticConBits comp_info gen_tycons tycon_specs
+ = -- for each type constructor:
+ -- grab all its data constructors;
+ -- for each one, generate an info table
+ -- for each specialised type constructor
+ -- for each specialisation of the type constructor
+ -- grab data constructors, and generate info tables
+
+ -- ToDo: for tycons and specialisations which are not
+ -- declared in this module we must ensure that the
+ -- C labels are local to this module i.e. static
+
+ mkAbstractCs [ gen_for_tycon tc | tc <- gen_tycons ]
+ `mkAbsCStmts`
+ mkAbstractCs [ mkAbstractCs [ gen_for_spec_tycon tc spec
+ | spec <- specs ]
+ | (tc, specs) <- fmToList tycon_specs,
+ isLocalSpecTyCon (sw_chkr CompilingPrelude) tc
+ ]
+ where
+ gen_for_tycon :: TyCon -> AbstractC
+ gen_for_tycon tycon
+ = mkAbstractCs (map (genConInfo comp_info tycon) data_cons)
+ `mkAbsCStmts` maybe_tycon_vtbl
+
+ where
+ data_cons = getTyConDataCons tycon
+ tycon_upd_label = mkStdUpdVecTblLabel tycon
+
+ maybe_tycon_vtbl =
+ case ctrlReturnConvAlg tycon of
+ UnvectoredReturn 1 -> CRetUnVector tycon_upd_label
+ (mk_upd_label tycon (head data_cons))
+ UnvectoredReturn _ -> AbsCNop
+ VectoredReturn _ -> CFlatRetVector tycon_upd_label
+ (map (mk_upd_label tycon) data_cons)
+ ------------------
+ gen_for_spec_tycon :: TyCon -> [Maybe UniType] -> AbstractC
+
+ gen_for_spec_tycon tycon ty_maybes
+ = mkAbstractCs (map (genConInfo comp_info tycon) spec_data_cons)
+ `mkAbsCStmts`
+ maybe_spec_tycon_vtbl
+ where
+ data_cons = getTyConDataCons tycon
+
+ spec_tycon = mkSpecTyCon tycon ty_maybes
+ spec_data_cons = map (mkSameSpecCon ty_maybes) data_cons
+
+ spec_tycon_upd_label = mkStdUpdVecTblLabel spec_tycon
+
+ maybe_spec_tycon_vtbl =
+ case ctrlReturnConvAlg spec_tycon of
+ UnvectoredReturn 1 -> CRetUnVector spec_tycon_upd_label
+ (mk_upd_label spec_tycon (head spec_data_cons))
+ UnvectoredReturn _ -> AbsCNop
+ VectoredReturn _ -> CFlatRetVector spec_tycon_upd_label
+ (map (mk_upd_label spec_tycon) spec_data_cons)
+ ------------------
+ mk_upd_label tycon con
+ = case dataReturnConvAlg con of
+ ReturnInRegs _ -> CLbl (mkConUpdCodePtrVecLabel tycon tag) CodePtrKind
+ ReturnInHeap -> CLbl (mkStdUpdCodePtrVecLabel tycon tag) CodePtrKind
+ where
+ tag = getDataConTag con
+
+ ------------------
+ (MkCompInfo sw_chkr _) = comp_info
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgConTbls-info-tables]{Generating info tables for constructors}
+%* *
+%************************************************************************
+
+Generate the entry code, info tables, and (for niladic constructor) the
+static closure, for a constructor.
+
+\begin{code}
+genConInfo :: CompilationInfo -> TyCon -> Id -> AbstractC
+
+genConInfo comp_info tycon data_con
+ = mkAbstractCs [
+#ifndef DPH
+ CSplitMarker,
+ inregs_upd_maybe,
+ closure_code,
+ static_code,
+#else
+ info_table,
+ CSplitMarker,
+ static_info_table,
+#endif {- Data Parallel Haskell -}
+ closure_maybe]
+ -- Order of things is to reduce forward references
+ where
+ (closure_info, body_code) = mkConCodeAndInfo data_con
+
+ -- To allow the debuggers, interpreters, etc to cope with static
+ -- data structures (ie those built at compile time), we take care that
+ -- info-table contains the information we need.
+ (static_ci,_) = layOutStaticClosure data_con kindFromType arg_tys (mkConLFInfo data_con)
+
+ body = (initC comp_info (
+ profCtrC SLIT("ENT_CON") [CReg node] `thenC`
+ body_code))
+
+ entry_addr = CLbl entry_label CodePtrKind
+ con_descr = _UNPK_ (getOccurrenceName data_con)
+
+#ifndef DPH
+ closure_code = CClosureInfoAndCode closure_info body Nothing stdUpd con_descr
+ static_code = CClosureInfoAndCode static_ci body Nothing stdUpd con_descr
+
+ inregs_upd_maybe = genPhantomUpdInfo comp_info tycon data_con
+
+ stdUpd = CLbl (mkStdUpdCodePtrVecLabel tycon tag) CodePtrKind
+
+ tag = getDataConTag data_con
+
+#else
+ info_table
+ = CNativeInfoTableAndCode closure_info con_descr entry_code
+ static_info_table
+ = CNativeInfoTableAndCode static_ci con_descr (CJump entry_addr)
+#endif {- Data Parallel Haskell -}
+
+ cost_centre = mkCCostCentre dontCareCostCentre -- not worried about static data costs
+
+ -- For zero-arity data constructors, or, more accurately,
+ -- those which only have VoidKind args (or none):
+ -- We make the closure too (not just info tbl), so that we can share
+ -- one copy throughout.
+ closure_maybe = -- OLD: if con_arity /= 0 then
+ if not (all zero_size arg_tys) then
+ AbsCNop
+ else
+ CStaticClosure closure_label -- Label for closure
+ static_ci -- Info table
+ cost_centre
+ [{-No args! A slight lie for constrs with VoidKind args-}]
+
+ zero_size arg_ty = getKindSize (kindFromType arg_ty) == 0
+
+ (_,_,arg_tys,_) = getDataConSig data_con
+ con_arity = getDataConArity data_con
+ entry_label = mkConEntryLabel data_con
+ closure_label = mkClosureLabel data_con
+\end{code}
+
+\begin{code}
+mkConCodeAndInfo :: Id -- Data constructor
+ -> (ClosureInfo, Code) -- The info table
+
+mkConCodeAndInfo con
+ = case (dataReturnConvAlg con) of
+
+ ReturnInRegs regs ->
+ let
+ (closure_info, regs_w_offsets)
+ = layOutDynCon con kindFromMagicId regs
+
+ body_code
+ = -- OLD: We don't set CC when entering data any more (WDP 94/06)
+ -- lexCostCentreC "ENTER_CC_DCL" [CReg node] `thenC`
+ -- evalCostCentreC "SET_RetCC_CL" [CReg node] `thenC`
+ profCtrC SLIT("RET_OLD_IN_REGS") [] `thenC`
+
+ performReturn (mkAbstractCs (map move_to_reg regs_w_offsets))
+ (mkStaticAlgReturnCode con Nothing {- Info-ptr already loaded-})
+ emptyUniqSet{-no live vars-}
+ in
+ (closure_info, body_code)
+
+ ReturnInHeap ->
+ let
+ (_, _, arg_tys, _) = getDataConSig con
+
+ (closure_info, _)
+ = layOutDynCon con kindFromType arg_tys
+
+ body_code
+ = -- OLD: We don't set CC when entering data any more (WDP 94/06)
+ -- lexCostCentreC "ENTER_CC_DCL" [CReg node] `thenC`
+ profCtrC SLIT("RET_OLD_IN_HEAP") [] `thenC`
+
+ performReturn AbsCNop -- Ptr to thing already in Node
+ (mkStaticAlgReturnCode con Nothing {- Info-ptr already loaded-})
+ emptyUniqSet{-no live vars-}
+ in
+ (closure_info, body_code)
+
+ where
+ move_to_reg :: (MagicId, VirtualHeapOffset {-from Node-}) -> AbstractC
+ move_to_reg (reg, offset)
+ = CAssign (CReg reg) (CVal (NodeRel offset) (kindFromMagicId reg))
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgConTbls-updates]{Generating update bits for constructors}
+%* *
+%************************************************************************
+
+Generate the "phantom" info table and update code, iff the constructor returns in regs
+
+\begin{code}
+
+genPhantomUpdInfo :: CompilationInfo -> TyCon -> Id{-data con-} -> AbstractC
+genPhantomUpdInfo comp_info tycon data_con
+ = case dataReturnConvAlg data_con of
+
+ ReturnInHeap -> AbsCNop -- No need for a phantom update
+
+ ReturnInRegs regs ->
+
+ let
+ phantom_itbl = CClosureInfoAndCode phantom_ci AbsCNop Nothing upd_code con_descr
+ phantom_ci = layOutPhantomClosure data_con (mkConLFInfo data_con)
+
+ con_descr = _UNPK_ (getOccurrenceName data_con)
+
+ con_arity = getDataConArity data_con
+
+ upd_code = CLabelledCode upd_label (mkAbsCStmts build_closure perform_return)
+ upd_label = mkConUpdCodePtrVecLabel tycon tag
+ tag = getDataConTag data_con
+
+ updatee = CVal (SpBRel 0 (-uF_UPDATEE)) PtrKind
+
+ perform_return = mkAbstractCs
+ [
+ CMacroStmt POP_STD_UPD_FRAME [],
+ CReturn (CReg RetReg) return_info
+ ]
+
+ return_info =
+ -- OLD: pprTrace "ctrlReturn6:" (ppr PprDebug tycon) (
+ case (ctrlReturnConvAlg tycon) of
+ UnvectoredReturn _ -> DirectReturn
+ VectoredReturn _ -> StaticVectoredReturn (tag - fIRST_TAG)
+ -- )
+
+ -- Determine cost centre for the updated closures CC (and allocation)
+ -- CCC for lexical (now your only choice)
+ use_cc = CReg CurCostCentre -- what to put in the closure
+ blame_cc = use_cc -- who to blame for allocation
+
+ do_move (reg, virt_offset) =
+ CAssign (CVal (NodeRel virt_offset) (kindFromMagicId reg)) (CReg reg)
+
+
+ -- Code for building a new constructor in place over the updatee
+ overwrite_code = profCtrC SLIT("UPD_CON_IN_PLACE") [] `thenC`
+ absC (mkAbstractCs
+ [
+ CAssign (CReg node) updatee,
+
+ -- Tell the storage mgr that we intend to update in place
+ -- This may (in complicated mgrs eg generational) cause gc,
+ -- and it may modify Node to point to another place to
+ -- actually update into.
+ CMacroStmt upd_inplace_macro [liveness_mask],
+
+ -- Initialise the closure pointed to by node
+ CInitHdr closure_info (NodeRel zeroOff) use_cc True,
+ mkAbstractCs (map do_move regs_w_offsets),
+ if con_arity /= 0 then
+ CAssign (CReg infoptr) (CLbl info_label DataPtrKind)
+ else
+ AbsCNop
+ ])
+
+ upd_inplace_macro = if closurePtrsSize closure_info == 0
+ then UPD_INPLACE_NOPTRS
+ else UPD_INPLACE_PTRS
+
+ -- Code for allocating a new constructor in the heap
+ alloc_code =
+ let amodes_w_offsets = [ (CReg r, o) | (r,o) <- regs_w_offsets ]
+ in
+ -- Allocate and build closure specifying upd_new_w_regs
+ allocDynClosure closure_info use_cc blame_cc amodes_w_offsets
+ `thenFC` \ hp_offset ->
+ getHpRelOffset hp_offset `thenFC` \ hp_rel ->
+ let
+ amode = CAddr hp_rel
+ in
+ profCtrC SLIT("UPD_CON_IN_NEW") [] `thenC`
+ absC (mkAbstractCs
+ [
+ CMacroStmt UPD_IND [updatee, amode],
+ CAssign (CReg node) amode,
+ CAssign (CReg infoptr) (CLbl info_label DataPtrKind)
+ ])
+
+ (closure_info, regs_w_offsets) = layOutDynCon data_con kindFromMagicId regs
+ info_label = infoTableLabelFromCI closure_info
+ liveness_mask = mkIntCLit (mkLiveRegsBitMask (node:regs))
+
+ build_closure =
+ if fitsMinUpdSize closure_info then
+ initC comp_info overwrite_code
+ else
+ initC comp_info (heapCheck regs False alloc_code)
+
+ in CClosureUpdInfo phantom_itbl
+
+\end{code}
+
diff --git a/ghc/compiler/codeGen/CgExpr.hi b/ghc/compiler/codeGen/CgExpr.hi
new file mode 100644
index 0000000000..6d21c17ed7
--- /dev/null
+++ b/ghc/compiler/codeGen/CgExpr.hi
@@ -0,0 +1,24 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgExpr where
+import AbsCSyn(AbstractC, CAddrMode)
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, StubFlag)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import PrimOps(PrimOp)
+import StgSyn(StgAtom, StgBinding, StgCaseAlternatives, StgExpr)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data StgExpr a b {-# GHC_PRAGMA StgApp (StgAtom b) [StgAtom b] (UniqFM b) | StgConApp Id [StgAtom b] (UniqFM b) | StgPrimApp PrimOp [StgAtom b] (UniqFM b) | StgCase (StgExpr a b) (UniqFM b) (UniqFM b) Unique (StgCaseAlternatives a b) | StgLet (StgBinding a b) (StgExpr a b) | StgLetNoEscape (UniqFM b) (UniqFM b) (StgBinding a b) (StgExpr a b) | StgSCC UniType CostCentre (StgExpr a b) #-}
+cgExpr :: StgExpr Id Id -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 1 _U_ 222 _N_ _S_ "S" _N_ _N_ #-}
+cgSccExpr :: StgExpr Id Id -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 1 _U_ 222 _N_ _S_ "S" _N_ _N_ #-}
+getPrimOpArgAmodes :: PrimOp -> [StgAtom Id] -> CgInfoDownwards -> CgState -> ([CAddrMode], CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 1222 _N_ _S_ "SL" _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgExpr.lhs b/ghc/compiler/codeGen/CgExpr.lhs
new file mode 100644
index 0000000000..5974df641d
--- /dev/null
+++ b/ghc/compiler/codeGen/CgExpr.lhs
@@ -0,0 +1,414 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+%********************************************************
+%* *
+\section[CgExpr]{Converting @StgExpr@s}
+%* *
+%********************************************************
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgExpr (
+ cgExpr, cgSccExpr, getPrimOpArgAmodes,
+
+ -- and to make the interface self-sufficient...
+ StgExpr, Id, CgState
+ ) where
+
+IMPORT_Trace -- NB: not just for debugging
+import Outputable -- ToDo: rm (just for debugging)
+import Pretty -- ToDo: rm (just for debugging)
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import AbsPrel ( PrimOp(..), PrimOpResultInfo(..), HeapRequirement(..),
+ primOpHeapReq, getPrimOpResultInfo, PrimKind,
+ primOpCanTriggerGC
+ IF_ATTACK_PRAGMAS(COMMA tagOf_PrimOp)
+ IF_ATTACK_PRAGMAS(COMMA pprPrimOp)
+ )
+import AbsUniType ( isPrimType, getTyConDataCons )
+import CLabelInfo ( CLabel, mkPhantomInfoTableLabel, mkInfoTableVecTblLabel )
+import ClosureInfo ( LambdaFormInfo, mkClosureLFInfo )
+import CgBindery ( getAtomAmodes )
+import CgCase ( cgCase, saveVolatileVarsAndRegs )
+import CgClosure ( cgRhsClosure )
+import CgCon ( buildDynCon, cgReturnDataCon )
+import CgHeapery ( allocHeap )
+import CgLetNoEscape ( cgLetNoEscapeClosure )
+import CgRetConv -- various things...
+import CgTailCall ( cgTailCall, performReturn, mkDynamicAlgReturnCode,
+ mkPrimReturnCode
+ )
+import CostCentre ( setToAbleCostCentre, isDupdCC, CostCentre )
+import Maybes ( Maybe(..) )
+import PrimKind ( getKindSize )
+import UniqSet
+import Util
+\end{code}
+
+This module provides the support code for @StgToAbstractC@ to deal
+with STG {\em expressions}. See also @CgClosure@, which deals
+with closures, and @CgCon@, which deals with constructors.
+
+\begin{code}
+cgExpr :: PlainStgExpr -- input
+ -> Code -- output
+\end{code}
+
+%********************************************************
+%* *
+%* Tail calls *
+%* *
+%********************************************************
+
+``Applications'' mean {\em tail calls}, a service provided by module
+@CgTailCall@. This includes literals, which show up as
+@(STGApp (StgLitAtom 42) [])@.
+
+\begin{code}
+cgExpr (StgApp fun args live_vars) = cgTailCall fun args live_vars
+\end{code}
+
+%********************************************************
+%* *
+%* STG ConApps (for inline versions) *
+%* *
+%********************************************************
+
+\begin{code}
+cgExpr (StgConApp con args live_vars)
+ = getAtomAmodes args `thenFC` \ amodes ->
+ cgReturnDataCon con amodes (all zero_size args) live_vars
+ where
+ zero_size atom = getKindSize (getAtomKind atom) == 0
+\end{code}
+
+%********************************************************
+%* *
+%* STG PrimApps (unboxed primitive ops) *
+%* *
+%********************************************************
+
+Here is where we insert real live machine instructions.
+
+\begin{code}
+cgExpr x@(StgPrimApp op args live_vars)
+ = -- trace ("cgExpr:PrimApp:"++(ppShow 80 (ppr PprDebug x))) (
+ getPrimOpArgAmodes op args `thenFC` \ arg_amodes ->
+ let
+ result_regs = assignPrimOpResultRegs op
+ result_amodes = map CReg result_regs
+ may_gc = primOpCanTriggerGC op
+ dyn_tag = head result_amodes
+ -- The tag from a primitive op returning an algebraic data type
+ -- is returned in the first result_reg_amode
+ in
+ (if may_gc then
+ -- Use registers for args, and assign args to the regs
+ -- (Can-trigger-gc primops guarantee to have their args in regs)
+ let
+ (arg_robust_amodes, liveness_mask, arg_assts)
+ = makePrimOpArgsRobust op arg_amodes
+
+ liveness_arg = mkIntCLit liveness_mask
+ in
+ returnFC (
+ arg_assts,
+ mkAbstractCs [
+ spat_prim_macro,
+ COpStmt result_amodes op
+ (pin_liveness op liveness_arg arg_robust_amodes)
+ liveness_mask
+ [{-no vol_regs-}],
+ spat_prim_stop_macro ]
+ )
+ else
+ -- Use args from their current amodes.
+ let
+ liveness_mask = panic "cgExpr: liveness of non-GC-ing primop touched\n"
+ in
+ returnFC (
+-- DO NOT want CCallProfMacros in CSimultaneous stuff. Yurgh. (WDP 95/01)
+-- Arises in compiling PreludeGlaST (and elsewhere??)
+-- mkAbstractCs [
+-- spat_prim_macro,
+ COpStmt result_amodes op arg_amodes liveness_mask [{-no vol_regs-}],
+-- spat_prim_stop_macro ],
+ AbsCNop
+ )
+ ) `thenFC` \ (do_before_stack_cleanup,
+ do_just_before_jump) ->
+
+ case (getPrimOpResultInfo op) of
+
+ ReturnsPrim kind ->
+ performReturn do_before_stack_cleanup
+ (\ sequel -> robustifySequel may_gc sequel
+ `thenFC` \ (ret_asst, sequel') ->
+ absC (ret_asst `mkAbsCStmts` do_just_before_jump)
+ `thenC`
+ mkPrimReturnCode sequel')
+ live_vars
+
+ ReturnsAlg tycon ->
+--OLD: evalCostCentreC "SET_RetCC" [CReg CurCostCentre] `thenC`
+ profCtrC SLIT("RET_NEW_IN_REGS") [] `thenC`
+
+ performReturn do_before_stack_cleanup
+ (\ sequel -> robustifySequel may_gc sequel
+ `thenFC` \ (ret_asst, sequel') ->
+ absC (mkAbstractCs [ret_asst,
+ do_just_before_jump,
+ info_ptr_assign])
+ -- Must load info ptr here, not in do_just_before_stack_cleanup,
+ -- because the info-ptr reg clashes with argument registers
+ -- for the primop
+ `thenC`
+ mkDynamicAlgReturnCode tycon dyn_tag sequel')
+ live_vars
+ where
+
+ -- Here, the destination _can_ be an update frame, so we need to make sure that
+ -- infoptr (R2) is loaded with the constructor's info ptr.
+
+ info_ptr_assign = CAssign (CReg infoptr) info_lbl
+
+ info_lbl
+ = -- OLD: pprTrace "ctrlReturn7:" (ppr PprDebug tycon) (
+ case (ctrlReturnConvAlg tycon) of
+ VectoredReturn _ -> vec_lbl
+ UnvectoredReturn _ -> dir_lbl
+ -- )
+
+ vec_lbl = CTableEntry (CLbl (mkInfoTableVecTblLabel tycon) DataPtrKind)
+ dyn_tag DataPtrKind
+
+ data_con = head (getTyConDataCons tycon)
+ dir_lbl = case dataReturnConvAlg data_con of
+ ReturnInRegs _ -> CLbl (mkPhantomInfoTableLabel data_con)
+ DataPtrKind
+ ReturnInHeap -> panic "CgExpr: can't return prim in heap"
+ -- Never used, and no point in generating
+ -- the code for it!
+ where
+ -- for all PrimOps except ccalls, we pin the liveness info
+ -- on as the first "argument"
+ -- ToDo: un-duplicate?
+
+ pin_liveness (CCallOp _ _ _ _ _) _ args = args
+ pin_liveness other_op liveness_arg args
+ = liveness_arg :args
+
+ -- We only need to worry about the sequel when we may GC and the
+ -- sequel is OnStack. If that's the case, arrange to pull the
+ -- sequel out into RetReg before performing the primOp.
+
+ robustifySequel True sequel@(OnStack _) =
+ sequelToAmode sequel `thenFC` \ amode ->
+ returnFC (CAssign (CReg RetReg) amode, InRetReg)
+ robustifySequel _ sequel = returnFC (AbsCNop, sequel)
+
+ spat_prim_macro = CCallProfCtrMacro SLIT("SET_ACTIVITY") [CLitLit SLIT("ACT_PRIM") IntKind]
+ spat_prim_stop_macro = CCallProfCtrMacro SLIT("SET_ACTIVITY") [CLitLit SLIT("ACT_PRIM_STOP") IntKind]
+
+\end{code}
+
+%********************************************************
+%* *
+%* Case expressions *
+%* *
+%********************************************************
+Case-expression conversion is complicated enough to have its own
+module, @CgCase@.
+\begin{code}
+
+cgExpr (StgCase expr live_vars save_vars uniq alts)
+ = cgCase expr live_vars save_vars uniq alts
+\end{code}
+
+
+%********************************************************
+%* *
+%* Let and letrec *
+%* *
+%********************************************************
+\subsection[let-and-letrec-codegen]{Converting @StgLet@ and @StgLetrec@}
+
+\begin{code}
+cgExpr (StgLet (StgNonRec name rhs) expr)
+ = cgRhs name rhs `thenFC` \ (name, info) ->
+ addBindC name info `thenC`
+ cgExpr expr
+
+cgExpr (StgLet (StgRec pairs) expr)
+ = fixC (\ new_bindings -> addBindsC new_bindings `thenC`
+ listFCs [ cgRhs b e | (b,e) <- pairs ]
+ ) `thenFC` \ new_bindings ->
+
+ addBindsC new_bindings `thenC`
+ cgExpr expr
+\end{code}
+
+\begin{code}
+cgExpr (StgLetNoEscape live_in_whole_let live_in_rhss bindings body)
+ = -- Figure out what volatile variables to save
+ nukeDeadBindings live_in_whole_let `thenC`
+ saveVolatileVarsAndRegs live_in_rhss
+ `thenFC` \ (save_assts, rhs_eob_info, maybe_cc_slot) ->
+
+ -- ToDo: cost centre???
+
+ -- Save those variables right now!
+ absC save_assts `thenC`
+
+ -- Produce code for the rhss
+ -- and add suitable bindings to the environment
+ cgLetNoEscapeBindings live_in_rhss rhs_eob_info maybe_cc_slot bindings `thenC`
+
+ -- Do the body
+ setEndOfBlockInfo rhs_eob_info (cgExpr body)
+\end{code}
+
+
+%********************************************************
+%* *
+%* SCC Expressions *
+%* *
+%********************************************************
+\subsection[scc-codegen]{Converting StgSCC}
+
+SCC expressions are treated specially. They set the current cost
+centre.
+
+For evaluation scoping we also need to save the cost centre in an
+``restore CC frame''. We only need to do this once before setting all
+nested SCCs.
+
+\begin{code}
+cgExpr scc_expr@(StgSCC ty cc expr)
+--OLD:WDP:94/06 = evalPushRCCFrame (isPrimType ty) (cgSccExpr scc_expr)
+ = cgSccExpr scc_expr
+\end{code}
+
+@cgSccExpr@ (also used in \tr{CgClosure}):
+We *don't* set the cost centre for CAF/Dict cost centres
+[Likewise Subsumed and NoCostCentre, but they probably
+don't exist in an StgSCC expression.]
+\begin{code}
+cgSccExpr (StgSCC ty cc expr)
+ = (if setToAbleCostCentre cc then
+ costCentresC SLIT("SET_CCC")
+ [mkCCostCentre cc, mkIntCLit (if isDupdCC cc then 1 else 0)]
+ else
+ nopC) `thenC`
+ cgSccExpr expr
+cgSccExpr other
+ = cgExpr other
+\end{code}
+
+%********************************************************
+%* *
+%* Non-top-level bindings *
+%* *
+%********************************************************
+\subsection[non-top-level-bindings]{Converting non-top-level bindings}
+
+@cgBinding@ is only used for let/letrec, not for unboxed bindings.
+So the kind should always be @PtrKind@.
+
+We rely on the support code in @CgCon@ (to do constructors) and
+in @CgClosure@ (to do closures).
+
+\begin{code}
+cgRhs :: Id -> PlainStgRhs -> FCode (Id, CgIdInfo)
+ -- the Id is passed along so a binding can be set up
+
+cgRhs name (StgRhsCon maybe_cc con args)
+ = getAtomAmodes args `thenFC` \ amodes ->
+ buildDynCon name maybe_cc con amodes (all zero_size args)
+ `thenFC` \ idinfo ->
+ returnFC (name, idinfo)
+ where
+ zero_size atom = getKindSize (getAtomKind atom) == 0
+
+cgRhs name (StgRhsClosure cc bi fvs upd_flag args body)
+ = cgRhsClosure name cc bi fvs args body lf_info
+ where
+ lf_info = mkClosureLFInfo False{-not top level-} fvs upd_flag args body
+\end{code}
+
+\begin{code}
+cgLetNoEscapeBindings live_in_rhss rhs_eob_info maybe_cc_slot (StgNonRec binder rhs)
+ = cgLetNoEscapeRhs live_in_rhss rhs_eob_info maybe_cc_slot binder rhs
+ `thenFC` \ (binder, info) ->
+ addBindC binder info
+
+cgLetNoEscapeBindings live_in_rhss rhs_eob_info maybe_cc_slot (StgRec pairs)
+ = fixC (\ new_bindings ->
+ addBindsC new_bindings `thenC`
+ listFCs [ cgLetNoEscapeRhs full_live_in_rhss rhs_eob_info
+ maybe_cc_slot b e | (b,e) <- pairs ]
+ ) `thenFC` \ new_bindings ->
+
+ addBindsC new_bindings
+ where
+ -- We add the binders to the live-in-rhss set so that we don't
+ -- delete the bindings for the binder from the environment!
+ full_live_in_rhss = live_in_rhss `unionUniqSets` (mkUniqSet [b | (b,r) <- pairs])
+
+cgLetNoEscapeRhs
+ :: PlainStgLiveVars -- Live in rhss
+ -> EndOfBlockInfo
+ -> Maybe VirtualSpBOffset
+ -> Id
+ -> PlainStgRhs
+ -> FCode (Id, CgIdInfo)
+
+cgLetNoEscapeRhs full_live_in_rhss rhs_eob_info maybe_cc_slot binder
+ (StgRhsClosure cc bi _ upd_flag args body)
+ = -- We could check the update flag, but currently we don't switch it off
+ -- for let-no-escaped things, so we omit the check too!
+ -- case upd_flag of
+ -- Updatable -> panic "cgLetNoEscapeRhs" -- Nothing to update!
+ -- other -> cgLetNoEscapeClosure binder cc bi live_in_whole_let live_in_rhss args body
+ cgLetNoEscapeClosure binder cc bi full_live_in_rhss rhs_eob_info maybe_cc_slot args body
+
+-- For a constructor RHS we want to generate a single chunk of code which
+-- can be jumped to from many places, which will return the constructor.
+-- It's easy; just behave as if it was an StgRhsClosure with a ConApp inside!
+cgLetNoEscapeRhs full_live_in_rhss rhs_eob_info maybe_cc_slot binder
+ (StgRhsCon cc con args)
+ = cgLetNoEscapeClosure binder cc stgArgOcc{-safe-} full_live_in_rhss rhs_eob_info maybe_cc_slot
+ [] --No args; the binder is data structure, not a function
+ (StgConApp con args full_live_in_rhss)
+\end{code}
+
+Some PrimOps require a {\em fixed} amount of heap allocation. Rather
+than tidy away ready for GC and do a full heap check, we simply
+allocate a completely uninitialised block in-line, just like any other
+thunk/constructor allocation, and pass it to the PrimOp as its first
+argument. Remember! The PrimOp is entirely responsible for
+initialising the object. In particular, the PrimOp had better not
+trigger GC before it has filled it in, and even then it had better
+make sure that the GC can find the object somehow.
+
+Main current use: allocating SynchVars.
+
+\begin{code}
+getPrimOpArgAmodes op args
+ = getAtomAmodes args `thenFC` \ arg_amodes ->
+
+ case primOpHeapReq op of
+
+ FixedHeapRequired size -> allocHeap size `thenFC` \ amode ->
+ returnFC (amode : arg_amodes)
+
+ _ -> returnFC arg_amodes
+\end{code}
+
+
diff --git a/ghc/compiler/codeGen/CgHeapery.hi b/ghc/compiler/codeGen/CgHeapery.hi
new file mode 100644
index 0000000000..43aa7cb90b
--- /dev/null
+++ b/ghc/compiler/codeGen/CgHeapery.hi
@@ -0,0 +1,33 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgHeapery where
+import AbsCSyn(AbstractC, CAddrMode, CExprMacro, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, StubFlag)
+import ClosureInfo(ClosureInfo, LambdaFormInfo)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import PreludePS(_PackedString)
+import PrimKind(PrimKind)
+import PrimOps(PrimOp)
+import SMRep(SMRep)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+data CAddrMode {-# GHC_PRAGMA CVal RegRelative PrimKind | CAddr RegRelative | CReg MagicId | CTableEntry CAddrMode CAddrMode PrimKind | CTemp Unique PrimKind | CLbl CLabel PrimKind | CUnVecLbl CLabel CLabel | CCharLike CAddrMode | CIntLike CAddrMode | CString _PackedString | CLit BasicLit | CLitLit _PackedString PrimKind | COffset HeapOffset | CCode AbstractC | CLabelledCode CLabel AbstractC | CJoinPoint Int Int | CMacroExpr PrimKind CExprMacro [CAddrMode] | CCostCentre CostCentre Bool #-}
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data ClosureInfo {-# GHC_PRAGMA MkClosureInfo Id LambdaFormInfo SMRep #-}
+data HeapOffset
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+allocDynClosure :: ClosureInfo -> CAddrMode -> CAddrMode -> [(CAddrMode, HeapOffset)] -> CgInfoDownwards -> CgState -> (HeapOffset, CgState)
+ {-# GHC_PRAGMA _A_ 4 _U_ 222111 _N_ _N_ _N_ _N_ #-}
+allocHeap :: HeapOffset -> CgInfoDownwards -> CgState -> (CAddrMode, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 211 _N_ _S_ "LLU(LLU(LLU(LL)))" {_A_ 5 _U_ 21222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+heapCheck :: [MagicId] -> Bool -> (CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 5 _U_ 22221 _N_ _S_ "LLLLU(LLU(LLL))" _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgHeapery.lhs b/ghc/compiler/codeGen/CgHeapery.lhs
new file mode 100644
index 0000000000..226ff6b72a
--- /dev/null
+++ b/ghc/compiler/codeGen/CgHeapery.lhs
@@ -0,0 +1,278 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1994
+%
+\section[CgHeapery]{Heap management functions}
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgHeapery (
+ heapCheck,
+ allocHeap, allocDynClosure,
+
+#ifdef GRAN
+ -- new for GrAnSim HWL
+ heapCheckOnly, fetchAndReschedule,
+#endif {- GRAN -}
+
+ -- and to make the interface self-sufficient...
+ AbstractC, CAddrMode, HeapOffset,
+ CgState, ClosureInfo, Id
+ ) where
+
+import AbsCSyn
+import CgMonad
+
+import CgRetConv ( mkLiveRegsBitMask )
+import CgUsages ( getVirtAndRealHp, setVirtHp, setRealHp,
+ initHeapUsage
+ )
+import ClosureInfo ( closureSize, closureHdrSize, closureGoodStuffSize, slopSize,
+ layOutDynClosure,
+ allocProfilingMsg, closureKind
+ )
+import Util
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgHeapery-heap-overflow]{Heap overflow checking}
+%* *
+%************************************************************************
+
+This is std code we replaced by the bits below for GrAnSim. -- HWL
+
+\begin{code}
+#ifndef GRAN
+
+heapCheck :: [MagicId] -- Live registers
+ -> Bool -- Node reqd after GC?
+ -> Code
+ -> Code
+
+heapCheck regs node_reqd code
+ = initHeapUsage (\ hHw -> do_heap_chk hHw `thenC` code)
+ where
+
+ do_heap_chk :: HeapOffset -> Code
+ do_heap_chk words_required
+ = absC (if isZeroOff(words_required) then AbsCNop else checking_code) `thenC`
+ -- The test is *inside* the absC, to avoid black holes!
+
+ -- Now we have set up the real heap pointer and checked there is
+ -- enough space. It remains only to reflect this in the environment
+
+ setRealHp words_required
+
+ -- The "word_required" here is a fudge.
+ -- *** IT DEPENDS ON THE DIRECTION ***, and on
+ -- whether the Hp is moved the whole way all
+ -- at once or not.
+ where
+ all_regs = if node_reqd then node:regs else regs
+ liveness_mask = mkLiveRegsBitMask all_regs
+
+ checking_code = CMacroStmt HEAP_CHK [
+ mkIntCLit liveness_mask,
+ COffset words_required,
+ mkIntCLit (if node_reqd then 1 else 0)]
+#endif {- GRAN -}
+\end{code}
+
+The GrAnSim code for heapChecks. The code for doing a heap check and
+doing a context switch has been separated. Especially, the HEAP_CHK
+macro only performs a heap check. THREAD_CONTEXT_SWITCH should be used
+for doing a context switch. GRAN_FETCH_AND_RESCHEDULE must be put at
+the beginning of every slow entry code in order to simulate the
+fetching of closures. If fetching is necessary (i.e. current closure
+is not local) then an automatic context switch is done.
+
+\begin{code}
+#ifdef GRAN
+
+heapCheck :: [MagicId] -- Live registers
+ -> Bool -- Node reqd after GC?
+ -> Code
+ -> Code
+
+heapCheck = heapCheck' False
+
+heapCheckOnly :: [MagicId] -- Live registers
+ -> Bool -- Node reqd after GC?
+ -> Code
+ -> Code
+
+heapCheckOnly = heapCheck' False
+
+-- May be emit context switch and emit heap check macro
+
+heapCheck' :: Bool -- context switch here?
+ -> [MagicId] -- Live registers
+ -> Bool -- Node reqd after GC?
+ -> Code
+ -> Code
+
+heapCheck' do_context_switch regs node_reqd code
+ = initHeapUsage (\ hHw -> do_heap_chk hHw `thenC` code)
+ where
+
+ do_heap_chk :: HeapOffset -> Code
+ do_heap_chk words_required
+ =
+ -- HWL:: absC (CComment "Forced heap check --- HWL") `thenC`
+ --absC (if do_context_switch
+ -- then context_switch_code
+ -- else AbsCNop) `thenC`
+
+ absC (if do_context_switch && not (isZeroOff words_required)
+ then context_switch_code
+ else AbsCNop) `thenC`
+ absC (if isZeroOff(words_required)
+ then AbsCNop
+ else checking_code) `thenC`
+
+ -- HWL was here:
+ -- For GrAnSim we want heap checks even if no heap is allocated in
+ -- the basic block to make context switches possible.
+ -- So, the if construct has been replaced by its else branch.
+
+ -- The test is *inside* the absC, to avoid black holes!
+
+ -- Now we have set up the real heap pointer and checked there is
+ -- enough space. It remains only to reflect this in the environment
+
+ setRealHp words_required
+
+ -- The "word_required" here is a fudge.
+ -- *** IT DEPENDS ON THE DIRECTION ***, and on
+ -- whether the Hp is moved the whole way all
+ -- at once or not.
+ where
+ all_regs = if node_reqd then node:regs else regs
+ liveness_mask = mkLiveRegsBitMask all_regs
+
+ maybe_context_switch = if do_context_switch
+ then context_switch_code
+ else AbsCNop
+
+ context_switch_code = CMacroStmt THREAD_CONTEXT_SWITCH [
+ mkIntCLit liveness_mask,
+ mkIntCLit (if node_reqd then 1 else 0)]
+
+ -- Good old heap check (excluding context switch)
+ checking_code = CMacroStmt HEAP_CHK [
+ mkIntCLit liveness_mask,
+ COffset words_required,
+ mkIntCLit (if node_reqd then 1 else 0)]
+
+-- Emit macro for simulating a fetch and then reschedule
+
+fetchAndReschedule :: [MagicId] -- Live registers
+ -> Bool -- Node reqd
+ -> Code
+
+fetchAndReschedule regs node_reqd =
+ if (node `elem` regs || node_reqd)
+ then fetch_code `thenC` reschedule_code
+ else absC AbsCNop
+ where
+ all_regs = if node_reqd then node:regs else regs
+ liveness_mask = mkLiveRegsBitMask all_regs
+
+ reschedule_code = absC (CMacroStmt GRAN_RESCHEDULE [
+ mkIntCLit liveness_mask,
+ mkIntCLit (if node_reqd then 1 else 0)])
+
+ --HWL: generate GRAN_FETCH macro for GrAnSim
+ -- currently GRAN_FETCH and GRAN_FETCH_AND_RESCHEDULE are miai
+ fetch_code = absC (CMacroStmt GRAN_FETCH [])
+
+#endif {- GRAN -}
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[initClosure]{Initialise a dynamic closure}
+%* *
+%************************************************************************
+
+@allocDynClosure@ puts the thing in the heap, and modifies the virtual Hp
+to account for this.
+
+\begin{code}
+allocDynClosure
+ :: ClosureInfo
+ -> CAddrMode -- Cost Centre to stick in the object
+ -> CAddrMode -- Cost Centre to blame for this alloc
+ -- (usually the same; sometimes "OVERHEAD")
+
+ -> [(CAddrMode, VirtualHeapOffset)] -- Offsets from start of the object
+ -- ie Info ptr has offset zero.
+ -> FCode VirtualHeapOffset -- Returns virt offset of object
+
+allocDynClosure closure_info use_cc blame_cc amodes_with_offsets
+ = getVirtAndRealHp `thenFC` \ (virtHp, realHp) ->
+
+ -- FIND THE OFFSET OF THE INFO-PTR WORD
+ -- virtHp points to last allocated word, ie 1 *before* the
+ -- info-ptr word of new object.
+ let info_offset = addOff virtHp (intOff 1)
+
+ -- do_move IS THE ASSIGNMENT FUNCTION
+ do_move (amode, offset_from_start)
+ = CAssign (CVal (HpRel realHp
+ (info_offset `addOff` offset_from_start))
+ (getAmodeKind amode))
+ amode
+ in
+ -- SAY WHAT WE ARE ABOUT TO DO
+ profCtrC (allocProfilingMsg closure_info)
+ [COffset (closureHdrSize closure_info),
+ mkIntCLit (closureGoodStuffSize closure_info),
+ mkIntCLit slop_size,
+ COffset closure_size] `thenC`
+
+ -- GENERATE THE CODE
+ absC ( mkAbstractCs (
+ [ CInitHdr closure_info (HpRel realHp info_offset) use_cc False ]
+ ++ (map do_move amodes_with_offsets))) `thenC`
+
+ -- GENERATE CC PROFILING MESSAGES
+ costCentresC SLIT("CC_ALLOC") [blame_cc,
+ COffset closure_size,
+ CLitLit (_PK_ (closureKind closure_info)) IntKind]
+ `thenC`
+
+ -- BUMP THE VIRTUAL HEAP POINTER
+ setVirtHp (virtHp `addOff` closure_size) `thenC`
+
+ -- RETURN PTR TO START OF OBJECT
+ returnFC info_offset
+ where
+ closure_size = closureSize closure_info
+ slop_size = slopSize closure_info
+\end{code}
+
+%************************************************************************
+%* *
+\subsection{Allocate uninitialized heap space}
+%* *
+%************************************************************************
+
+\begin{code}
+allocHeap :: HeapOffset -- Size of the space required
+ -> FCode CAddrMode -- Addr mode for first word of object
+
+allocHeap space
+ = getVirtAndRealHp `thenFC` \ (virtHp, realHp) ->
+ let block_start = addOff virtHp (intOff 1)
+ in
+ -- We charge the allocation to "PRIM" (which is probably right)
+ profCtrC SLIT("ALLOC_PRIM2") [COffset space] `thenC`
+
+ -- BUMP THE VIRTUAL HEAP POINTER
+ setVirtHp (virtHp `addOff` space) `thenC`
+
+ -- RETURN PTR TO START OF OBJECT
+ returnFC (CAddr (HpRel realHp block_start))
+\end{code}
diff --git a/ghc/compiler/codeGen/CgLetNoEscape.hi b/ghc/compiler/codeGen/CgLetNoEscape.hi
new file mode 100644
index 0000000000..8f5b0c4b23
--- /dev/null
+++ b/ghc/compiler/codeGen/CgLetNoEscape.hi
@@ -0,0 +1,12 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgLetNoEscape where
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, EndOfBlockInfo)
+import CostCentre(CostCentre)
+import Id(Id)
+import Maybes(Labda)
+import StgSyn(StgBinderInfo, StgExpr)
+import UniqFM(UniqFM)
+cgLetNoEscapeClosure :: Id -> CostCentre -> StgBinderInfo -> UniqFM Id -> EndOfBlockInfo -> Labda Int -> [Id] -> StgExpr Id Id -> CgInfoDownwards -> CgState -> ((Id, CgIdInfo), CgState)
+ {-# GHC_PRAGMA _A_ 8 _U_ 2002202212 _N_ _S_ "LAALLALL" {_A_ 5 _U_ 2222212 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgLetNoEscape.lhs b/ghc/compiler/codeGen/CgLetNoEscape.lhs
new file mode 100644
index 0000000000..abc1e115c9
--- /dev/null
+++ b/ghc/compiler/codeGen/CgLetNoEscape.lhs
@@ -0,0 +1,202 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1993-1994
+%
+%********************************************************
+%* *
+\section[CgLetNoEscape]{Handling ``let-no-escapes''}
+%* *
+%********************************************************
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgLetNoEscape ( cgLetNoEscapeClosure ) where
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import CgBindery -- various things
+import CgExpr ( cgExpr )
+import CgHeapery ( heapCheck )
+import CgRetConv ( assignRegs )
+import CgStackery ( mkVirtStkOffsets )
+import CgUsages ( setRealAndVirtualSps, getVirtSps )
+import CLabelInfo ( mkFastEntryLabel )
+import ClosureInfo ( mkLFLetNoEscape )
+import Id ( getIdKind )
+import Util
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[what-is-non-escaping]{What {\em is} a ``non-escaping let''?}
+%* *
+%************************************************************************
+
+[The {\em code} that detects these things is elsewhere.]
+
+Consider:
+\begin{verbatim}
+ let x = fvs \ args -> e
+ in
+ if ... then x else
+ if ... then x else ...
+\end{verbatim}
+@x@ is used twice (so we probably can't unfold it), but when it is
+entered, the stack is deeper than it was then the definition of @x@
+happened. Specifically, if instead of allocating a closure for @x@,
+we saved all @x@'s fvs on the stack, and remembered the stack depth at
+that moment, then whenever we enter @x@ we can simply set the stack
+pointer(s) to these remembered (compile-time-fixed) values, and jump
+to the code for @x@.
+
+All of this is provided x is:
+\begin{enumerate}
+\item
+non-updatable;
+\item
+guaranteed to be entered before the stack retreats -- ie x is not
+buried in a heap-allocated closure, or passed as an argument to something;
+\item
+all the enters have exactly the right number of arguments,
+no more no less;
+\item
+all the enters are tail calls; that is, they return to the
+caller enclosing the definition of @x@.
+\end{enumerate}
+
+Under these circumstances we say that @x@ is {\em non-escaping}.
+
+An example of when (4) does {\em not} hold:
+\begin{verbatim}
+ let x = ...
+ in case x of ...alts...
+\end{verbatim}
+
+Here, @x@ is certainly entered only when the stack is deeper than when
+@x@ is defined, but here it must return to \tr{...alts...} So we can't
+just adjust the stack down to @x@'s recalled points, because that
+would lost @alts@' context.
+
+Things can get a little more complicated. Consider:
+\begin{verbatim}
+ let y = ...
+ in let x = fvs \ args -> ...y...
+ in ...x...
+\end{verbatim}
+
+Now, if @x@ is used in a non-escaping way in \tr{...x...}, {\em and}
+@y@ is used in a non-escaping way in \tr{...y...}, {\em then} @y@ is
+non-escaping.
+
+@x@ can even be recursive! Eg:
+\begin{verbatim}
+ letrec x = [y] \ [v] -> if v then x True else ...
+ in
+ ...(x b)...
+\end{verbatim}
+
+
+%************************************************************************
+%* *
+\subsection[codeGen-for-non-escaping]{Generating code for a ``non-escaping let''}
+%* *
+%************************************************************************
+
+
+Generating code for this is fun. It is all very very similar to what
+we do for a case expression. The duality is between
+\begin{verbatim}
+ let-no-escape x = b
+ in e
+\end{verbatim}
+and
+\begin{verbatim}
+ case e of ... -> b
+\end{verbatim}
+
+That is, the RHS of @x@ (ie @b@) will execute {\em later}, just like
+the alternative of the case; it needs to be compiled in an environment
+in which all volatile bindings are forgotten, and the free vars are
+bound only to stable things like stack locations.. The @e@ part will
+execute {\em next}, just like the scrutinee of a case.
+
+First, we need to save all @x@'s free vars
+on the stack, if they aren't there already.
+
+\begin{code}
+cgLetNoEscapeClosure
+ :: Id -- binder
+ -> CostCentre -- NB: *** NOT USED *** ToDo (WDP 94/06)
+ -> StgBinderInfo -- NB: ditto
+ -> PlainStgLiveVars -- variables live in RHS, including the binders
+ -- themselves in the case of a recursive group
+ -> EndOfBlockInfo -- where are we going to?
+ -> Maybe VirtualSpBOffset -- Slot for current cost centre
+ -> [Id] -- args (as in \ args -> body)
+ -> PlainStgExpr -- body (as in above)
+ -> FCode (Id, CgIdInfo)
+
+-- ToDo: deal with the cost-centre issues
+
+cgLetNoEscapeClosure binder cc bi full_live_in_rhss rhs_eob_info maybe_cc_slot args body
+ = let
+ arity = length args
+ lf_info = mkLFLetNoEscape arity full_live_in_rhss{-used???-}
+ in
+ forkEvalHelp
+ rhs_eob_info
+ (nukeDeadBindings full_live_in_rhss)
+ (forkAbsC (cgLetNoEscapeBody args body))
+ `thenFC` \ (vA, vB, code) ->
+ let
+ label = mkFastEntryLabel binder arity
+ in
+ absC (CCodeBlock label code) `thenC`
+ returnFC (binder, letNoEscapeIdInfo binder vA vB lf_info)
+\end{code}
+
+\begin{code}
+cgLetNoEscapeBody :: [Id] -- Args
+ -> PlainStgExpr -- Body
+ -> Code
+
+cgLetNoEscapeBody all_args rhs
+ = getVirtSps `thenFC` \ (vA, vB) ->
+ let
+ arg_kinds = map getIdKind all_args
+ (arg_regs, _) = assignRegs [{-nothing live-}] arg_kinds
+ stk_args = drop (length arg_regs) all_args
+
+ -- stk_args is the args which are passed on the stack at the fast-entry point
+ -- Using them, we define the stack layout
+ (spA_stk_args, spB_stk_args, stk_bxd_w_offsets, stk_ubxd_w_offsets)
+ = mkVirtStkOffsets
+ vA vB -- Initial virtual SpA, SpB
+ getIdKind
+ stk_args
+ in
+
+ -- Bind args to appropriate regs/stk locns
+ bindArgsToRegs all_args arg_regs `thenC`
+ mapCs bindNewToAStack stk_bxd_w_offsets `thenC`
+ mapCs bindNewToBStack stk_ubxd_w_offsets `thenC`
+ setRealAndVirtualSps spA_stk_args spB_stk_args `thenC`
+
+{- ToDo: NOT SURE ABOUT COST CENTRES!
+ -- Enter the closures cc, if required
+ lexEnterCCcode closure_info maybe_cc `thenC`
+-}
+
+ -- [No need for stack check; forkEvalHelp dealt with that]
+
+ -- Do heap check [ToDo: omit for non-recursive case by recording in
+ -- in envt and absorbing at call site]
+ heapCheck arg_regs False {- Node doesn't point to it -} (
+ -- heapCheck *encloses* the rest
+
+ -- Compile the body
+ cgExpr rhs
+ )
+\end{code}
diff --git a/ghc/compiler/codeGen/CgMonad.hi b/ghc/compiler/codeGen/CgMonad.hi
new file mode 100644
index 0000000000..73a974ecbf
--- /dev/null
+++ b/ghc/compiler/codeGen/CgMonad.hi
@@ -0,0 +1,209 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgMonad where
+import AbsCSyn(AbstractC, CAddrMode, CExprMacro, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgBindery(CgBindings(..), CgIdInfo, StableLoc, VolatileLoc, heapIdInfo, stableAmodeIdInfo)
+import ClosureInfo(ClosureInfo, LambdaFormInfo, StandardFormInfo)
+import CmdLineOpts(GlobalSwitch)
+import CostCentre(CcKind, CostCentre, IsCafCC, IsDupdCC)
+import HeapOffs(HeapOffset, VirtualHeapOffset(..), VirtualSpAOffset(..), VirtualSpBOffset(..))
+import Id(DataCon(..), Id, IdDetails)
+import IdEnv(IdEnv(..))
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import Outputable(NamedThing, Outputable)
+import PreludePS(_PackedString)
+import PrimKind(PrimKind)
+import PrimOps(PrimOp)
+import StgSyn(PlainStgLiveVars(..))
+import UniType(UniType)
+import UniqFM(UniqFM)
+import UniqSet(UniqSet(..))
+import Unique(Unique)
+infixr 9 `thenC`
+infixr 9 `thenFC`
+type AStackUsage = (Int, [(Int, StubFlag)], Int, Int)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+type BStackUsage = (Int, [Int], Int, Int)
+data CAddrMode {-# GHC_PRAGMA CVal RegRelative PrimKind | CAddr RegRelative | CReg MagicId | CTableEntry CAddrMode CAddrMode PrimKind | CTemp Unique PrimKind | CLbl CLabel PrimKind | CUnVecLbl CLabel CLabel | CCharLike CAddrMode | CIntLike CAddrMode | CString _PackedString | CLit BasicLit | CLitLit _PackedString PrimKind | COffset HeapOffset | CCode AbstractC | CLabelledCode CLabel AbstractC | CJoinPoint Int Int | CMacroExpr PrimKind CExprMacro [CAddrMode] | CCostCentre CostCentre Bool #-}
+data CLabel
+type CgBindings = UniqFM CgIdInfo
+data CgIdInfo {-# GHC_PRAGMA MkCgIdInfo Id VolatileLoc StableLoc LambdaFormInfo #-}
+data CgInfoDownwards = MkCgInfoDown CompilationInfo (UniqFM CgIdInfo) EndOfBlockInfo
+data CgState = MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset))
+type Code = CgInfoDownwards -> CgState -> CgState
+data CompilationInfo = MkCompInfo (GlobalSwitch -> Bool) _PackedString
+data CostCentre {-# GHC_PRAGMA NoCostCentre | NormalCC CcKind _PackedString _PackedString IsDupdCC IsCafCC | CurrentCC | SubsumedCosts | AllCafsCC _PackedString _PackedString | AllDictsCC _PackedString _PackedString IsDupdCC | OverheadCC | PreludeCafsCC | PreludeDictsCC IsDupdCC | DontCareCC #-}
+data EndOfBlockInfo = EndOfBlockInfo Int Int Sequel
+type FCode a = CgInfoDownwards -> CgState -> (a, CgState)
+data GlobalSwitch
+ {-# GHC_PRAGMA ProduceC [Char] | ProduceS [Char] | ProduceHi [Char] | AsmTarget [Char] | ForConcurrent | Haskell_1_3 | GlasgowExts | CompilingPrelude | HideBuiltinNames | HideMostBuiltinNames | EnsureSplittableC [Char] | Verbose | PprStyle_User | PprStyle_Debug | PprStyle_All | DoCoreLinting | EmitArityChecks | OmitInterfacePragmas | OmitDerivedRead | OmitReexportedInstances | UnfoldingUseThreshold Int | UnfoldingCreationThreshold Int | UnfoldingOverrideThreshold Int | ReportWhyUnfoldingsDisallowed | UseGetMentionedVars | ShowPragmaNameErrs | NameShadowingNotOK | SigsRequired | SccProfilingOn | AutoSccsOnExportedToplevs | AutoSccsOnAllToplevs | AutoSccsOnIndividualCafs | SccGroup [Char] | DoTickyProfiling | DoSemiTagging | FoldrBuildOn | FoldrBuildTrace | SpecialiseImports | ShowImportSpecs | OmitUnspecialisedCode | SpecialiseOverloaded | SpecialiseUnboxed | SpecialiseAll | SpecialiseTrace | OmitBlackHoling | StgDoLetNoEscapes | IgnoreStrictnessPragmas | IrrefutableTuples | IrrefutableEverything | AllStrict | AllDemanded | D_dump_rif2hs | D_dump_rn4 | D_dump_tc | D_dump_deriv | D_dump_ds | D_dump_occur_anal | D_dump_simpl | D_dump_spec | D_dump_stranal | D_dump_deforest | D_dump_stg | D_dump_absC | D_dump_flatC | D_dump_realC | D_dump_asm | D_dump_core_passes | D_dump_core_passes_info | D_verbose_core2core | D_verbose_stg2stg | D_simplifier_stats #-}
+data HeapOffset
+type HeapUsage = (HeapOffset, HeapOffset)
+data LambdaFormInfo {-# GHC_PRAGMA LFReEntrant Bool Int Bool | LFCon Id Bool | LFTuple Id Bool | LFThunk Bool Bool Bool StandardFormInfo | LFArgument | LFImported | LFLetNoEscape Int (UniqFM Id) | LFBlackHole | LFIndirection #-}
+data IsCafCC {-# GHC_PRAGMA IsCafCC | IsNotCafCC #-}
+type SemiTaggingStuff = Labda ([(Int, (AbstractC, CLabel))], Labda (Labda Id, (AbstractC, CLabel)))
+data Sequel = InRetReg | OnStack Int | UpdateCode CAddrMode | CaseAlts CAddrMode (Labda ([(Int, (AbstractC, CLabel))], Labda (Labda Id, (AbstractC, CLabel))))
+data StubFlag {-# GHC_PRAGMA Stubbed | NotStubbed #-}
+type VirtualHeapOffset = HeapOffset
+type VirtualSpAOffset = Int
+type VirtualSpBOffset = Int
+type DataCon = Id
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+type IdEnv a = UniqFM a
+data Labda a {-# GHC_PRAGMA Hamna | Ni a #-}
+type PlainStgLiveVars = UniqFM Id
+data UniqFM a {-# GHC_PRAGMA EmptyUFM | LeafUFM Int# a | NodeUFM Int# Int# (UniqFM a) (UniqFM a) #-}
+type UniqSet a = UniqFM a
+data Unique {-# GHC_PRAGMA MkUnique Int# #-}
+absC :: AbstractC -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLL)" {_A_ 4 _U_ 2222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+addBindC :: Id -> CgIdInfo -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 1201 _N_ _S_ "LLAU(LLL)" {_A_ 5 _U_ 12222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+addBindsC :: [(Id, CgIdInfo)] -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 101 _N_ _S_ "LAU(LLL)" {_A_ 4 _U_ 1222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+addFreeBSlots :: [Int] -> [Int] -> [Int]
+ {-# GHC_PRAGMA _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+costCentresC :: _PackedString -> [CAddrMode] -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2211 _N_ _S_ "LLU(U(SA)AA)U(LLL)" {_A_ 4 _U_ 2211 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+costCentresFlag :: CgInfoDownwards -> CgState -> (Bool, CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 12 _N_ _S_ "U(U(LA)AA)L" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+fixC :: (a -> CgInfoDownwards -> CgState -> (a, CgState)) -> CgInfoDownwards -> CgState -> (a, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _S_ "SLL" _N_ _N_ #-}
+forkAbsC :: (CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> (AbstractC, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 121 _N_ _S_ "LLU(LLL)" {_A_ 5 _U_ 12222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+forkAlts :: [CgInfoDownwards -> CgState -> (a, CgState)] -> [CgInfoDownwards -> CgState -> (a, CgState)] -> (CgInfoDownwards -> CgState -> (b, CgState)) -> CgInfoDownwards -> CgState -> (([a], b), CgState)
+ {-# GHC_PRAGMA _A_ 5 _U_ 11122 _N_ _N_ _N_ _N_ #-}
+forkClosureBody :: (CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 111 _N_ _S_ "LU(LLA)U(LLL)" {_A_ 4 _U_ 1221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+forkEval :: EndOfBlockInfo -> (CgInfoDownwards -> CgState -> CgState) -> (CgInfoDownwards -> CgState -> (Sequel, CgState)) -> CgInfoDownwards -> CgState -> (EndOfBlockInfo, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 21112 _N_ _N_ _N_ _N_ #-}
+forkEvalHelp :: EndOfBlockInfo -> (CgInfoDownwards -> CgState -> CgState) -> (CgInfoDownwards -> CgState -> (a, CgState)) -> CgInfoDownwards -> CgState -> ((Int, Int, a), CgState)
+ {-# GHC_PRAGMA _A_ 5 _U_ 21112 _N_ _S_ "LLLU(LLA)L" _N_ _N_ #-}
+forkStatics :: (CgInfoDownwards -> CgState -> (a, CgState)) -> CgInfoDownwards -> CgState -> (a, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 111 _N_ _S_ "LU(LAA)U(LLL)" {_A_ 5 _U_ 12222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getAbsC :: (CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> (AbstractC, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 121 _N_ _S_ "LLU(LLL)" {_A_ 5 _U_ 12222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getEndOfBlockInfo :: CgInfoDownwards -> CgState -> (EndOfBlockInfo, CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 12 _N_ _S_ "U(AAL)L" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 3 \ (u0 :: EndOfBlockInfo) (u1 :: CgState) -> _!_ _TUP_2 [EndOfBlockInfo, CgState] [u0, u1] _N_} _F_ _IF_ARGS_ 0 2 CX 4 \ (u0 :: CgInfoDownwards) (u1 :: CgState) -> case u0 of { _ALG_ _ORIG_ CgMonad MkCgInfoDown (u2 :: CompilationInfo) (u3 :: UniqFM CgIdInfo) (u4 :: EndOfBlockInfo) -> _!_ _TUP_2 [EndOfBlockInfo, CgState] [u4, u1]; _NO_DEFLT_ } _N_ #-}
+getUnstubbedAStackSlots :: Int -> CgInfoDownwards -> CgState -> ([Int], CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(U(LLLL)LL))" {_A_ 4 _U_ 2222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+heapIdInfo :: Id -> HeapOffset -> LambdaFormInfo -> CgIdInfo
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _N_ _N_ _N_ #-}
+initC :: CompilationInfo -> (CgInfoDownwards -> CgState -> CgState) -> AbstractC
+ {-# GHC_PRAGMA _A_ 2 _U_ 21 _N_ _S_ "LS" _N_ _N_ #-}
+isStringSwitchSetC :: ([Char] -> GlobalSwitch) -> CgInfoDownwards -> CgState -> (Bool, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 112 _N_ _S_ "LU(U(LA)AA)L" {_A_ 3 _U_ 112 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+isStubbed :: StubFlag -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "E" _F_ _IF_ARGS_ 0 1 C 4 \ (u0 :: StubFlag) -> case u0 of { _ALG_ _ORIG_ CgMonad Stubbed -> _!_ True [] []; _ORIG_ CgMonad NotStubbed -> _!_ False [] []; _NO_DEFLT_ } _N_ #-}
+isSwitchSetC :: GlobalSwitch -> CgInfoDownwards -> CgState -> (Bool, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 212 _N_ _S_ "LU(U(LA)AA)L" {_A_ 3 _U_ 212 _N_ _N_ _F_ _IF_ARGS_ 0 3 XXX 6 \ (u0 :: GlobalSwitch) (u1 :: GlobalSwitch -> Bool) (u2 :: CgState) -> let {(u3 :: Bool) = _APP_ u1 [ u0 ]} in _!_ _TUP_2 [Bool, CgState] [u3, u2] _N_} _F_ _ALWAYS_ \ (u0 :: GlobalSwitch) (u1 :: CgInfoDownwards) (u2 :: CgState) -> case u1 of { _ALG_ _ORIG_ CgMonad MkCgInfoDown (u3 :: CompilationInfo) (u4 :: UniqFM CgIdInfo) (u5 :: EndOfBlockInfo) -> case u3 of { _ALG_ _ORIG_ CgMonad MkCompInfo (u6 :: GlobalSwitch -> Bool) (u7 :: _PackedString) -> let {(u8 :: Bool) = _APP_ u6 [ u0 ]} in _!_ _TUP_2 [Bool, CgState] [u8, u2]; _NO_DEFLT_ }; _NO_DEFLT_ } _N_ #-}
+listCs :: [CgInfoDownwards -> CgState -> CgState] -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 122 _N_ _S_ "SLL" _N_ _N_ #-}
+listFCs :: [CgInfoDownwards -> CgState -> (a, CgState)] -> CgInfoDownwards -> CgState -> ([a], CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 122 _N_ _S_ "SLL" _N_ _N_ #-}
+lookupBindC :: Id -> CgInfoDownwards -> CgState -> (CgIdInfo, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 211 _N_ _S_ "LU(ALA)U(LLL)" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+mapCs :: (a -> CgInfoDownwards -> CgState -> CgState) -> [a] -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2122 _N_ _S_ "LSLL" _N_ _N_ #-}
+mapFCs :: (a -> CgInfoDownwards -> CgState -> (b, CgState)) -> [a] -> CgInfoDownwards -> CgState -> ([b], CgState)
+ {-# GHC_PRAGMA _A_ 4 _U_ 2122 _N_ _S_ "LSLL" _N_ _N_ #-}
+modifyBindC :: Id -> (CgIdInfo -> CgIdInfo) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 1201 _N_ _S_ "LLAU(LLL)" {_A_ 5 _U_ 12222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+moduleName :: CgInfoDownwards -> CgState -> (_PackedString, CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 12 _N_ _S_ "U(U(AL)AA)L" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 3 \ (u0 :: _PackedString) (u1 :: CgState) -> _!_ _TUP_2 [_PackedString, CgState] [u0, u1] _N_} _F_ _IF_ARGS_ 0 2 CX 5 \ (u0 :: CgInfoDownwards) (u1 :: CgState) -> case u0 of { _ALG_ _ORIG_ CgMonad MkCgInfoDown (u2 :: CompilationInfo) (u3 :: UniqFM CgIdInfo) (u4 :: EndOfBlockInfo) -> case u2 of { _ALG_ _ORIG_ CgMonad MkCompInfo (u5 :: GlobalSwitch -> Bool) (u6 :: _PackedString) -> _!_ _TUP_2 [_PackedString, CgState] [u6, u1]; _NO_DEFLT_ }; _NO_DEFLT_ } _N_ #-}
+noBlackHolingFlag :: CgInfoDownwards -> CgState -> (Bool, CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 12 _N_ _S_ "U(U(LA)AA)L" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+nopC :: CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 2 _U_ 01 _N_ _S_ "AU(LLL)" {_A_ 3 _U_ 222 _N_ _N_ _F_ _IF_ARGS_ 0 3 XXX 4 \ (u0 :: AbstractC) (u1 :: UniqFM CgIdInfo) (u2 :: ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset))) -> _!_ _ORIG_ CgMonad MkCgState [] [u0, u1, u2] _N_} _F_ _IF_ARGS_ 0 2 XX 1 \ (u0 :: CgInfoDownwards) (u1 :: CgState) -> u1 _N_ #-}
+nukeDeadBindings :: UniqFM Id -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(U(LLLL)U(LLLL)L))" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+profCtrC :: _PackedString -> [CAddrMode] -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2211 _N_ _S_ "LLU(U(SA)AA)U(LLL)" {_A_ 4 _U_ 2211 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+returnFC :: a -> CgInfoDownwards -> CgState -> (a, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 202 _N_ _N_ _F_ _ALWAYS_ _/\_ u0 -> \ (u1 :: u0) (u2 :: CgInfoDownwards) (u3 :: CgState) -> _!_ _TUP_2 [u0, CgState] [u1, u3] _N_ #-}
+sequelToAmode :: Sequel -> CgInfoDownwards -> CgState -> (CAddrMode, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 102 _N_ _S_ "SAL" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+setEndOfBlockInfo :: EndOfBlockInfo -> (CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2112 _N_ _S_ "LSU(LLA)L" {_A_ 5 _U_ 21222 _N_ _N_ _F_ _IF_ARGS_ 0 5 XXXXX 8 \ (u0 :: EndOfBlockInfo) (u1 :: CgInfoDownwards -> CgState -> CgState) (u2 :: CompilationInfo) (u3 :: UniqFM CgIdInfo) (u4 :: CgState) -> let {(u5 :: CgInfoDownwards) = _!_ _ORIG_ CgMonad MkCgInfoDown [] [u2, u3, u0]} in _APP_ u1 [ u5, u4 ] _N_} _F_ _ALWAYS_ \ (u0 :: EndOfBlockInfo) (u1 :: CgInfoDownwards -> CgState -> CgState) (u2 :: CgInfoDownwards) (u3 :: CgState) -> case u2 of { _ALG_ _ORIG_ CgMonad MkCgInfoDown (u4 :: CompilationInfo) (u5 :: UniqFM CgIdInfo) (u6 :: EndOfBlockInfo) -> let {(u7 :: CgInfoDownwards) = _!_ _ORIG_ CgMonad MkCgInfoDown [] [u4, u5, u0]} in _APP_ u1 [ u7, u3 ]; _NO_DEFLT_ } _N_ #-}
+stableAmodeIdInfo :: Id -> CAddrMode -> LambdaFormInfo -> CgIdInfo
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _N_ _N_ _N_ #-}
+thenC :: (CgInfoDownwards -> CgState -> CgState) -> (CgInfoDownwards -> CgState -> a) -> CgInfoDownwards -> CgState -> a
+ {-# GHC_PRAGMA _A_ 4 _U_ 1122 _N_ _S_ "LSLL" _F_ _ALWAYS_ _/\_ u0 -> \ (u1 :: CgInfoDownwards -> CgState -> CgState) (u2 :: CgInfoDownwards -> CgState -> u0) (u3 :: CgInfoDownwards) (u4 :: CgState) -> let {(u5 :: CgState) = _APP_ u1 [ u3, u4 ]} in _APP_ u2 [ u3, u5 ] _N_ #-}
+thenFC :: (CgInfoDownwards -> CgState -> (a, CgState)) -> (a -> CgInfoDownwards -> CgState -> b) -> CgInfoDownwards -> CgState -> b
+ {-# GHC_PRAGMA _A_ 4 _U_ 1122 _N_ _S_ "LSLL" _F_ _ALWAYS_ _/\_ u0 u1 -> \ (u2 :: CgInfoDownwards -> CgState -> (u0, CgState)) (u3 :: u0 -> CgInfoDownwards -> CgState -> u1) (u4 :: CgInfoDownwards) (u5 :: CgState) -> let {(u6 :: (u0, CgState)) = _APP_ u2 [ u4, u5 ]} in let {(u9 :: u0) = case u6 of { _ALG_ _TUP_2 (u7 :: u0) (u8 :: CgState) -> u7; _NO_DEFLT_ }} in let {(uc :: CgState) = case u6 of { _ALG_ _TUP_2 (ua :: u0) (ub :: CgState) -> ub; _NO_DEFLT_ }} in _APP_ u3 [ u9, u4, uc ] _N_ #-}
+instance Eq CLabel
+ {-# GHC_PRAGMA _M_ CLabelInfo {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 3 _!_ _TUP_2 [(CLabel -> CLabel -> Bool), (CLabel -> CLabel -> Bool)] [_CONSTM_ Eq (==) (CLabel), _CONSTM_ Eq (/=) (CLabel)] _N_
+ (==) = _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_,
+ (/=) = _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_ #-}
+instance Eq GlobalSwitch
+ {-# GHC_PRAGMA _M_ CmdLineOpts {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 3 _!_ _TUP_2 [(GlobalSwitch -> GlobalSwitch -> Bool), (GlobalSwitch -> GlobalSwitch -> Bool)] [_CONSTM_ Eq (==) (GlobalSwitch), _CONSTM_ Eq (/=) (GlobalSwitch)] _N_
+ (==) = _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_,
+ (/=) = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+instance Eq Id
+ {-# GHC_PRAGMA _M_ Id {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 3 _!_ _TUP_2 [(Id -> Id -> Bool), (Id -> Id -> Bool)] [_CONSTM_ Eq (==) (Id), _CONSTM_ Eq (/=) (Id)] _N_
+ (==) = _A_ 2 _U_ 11 _N_ _S_ "U(U(P)AAA)U(U(P)AAA)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 5 \ (u0 :: Int#) (u1 :: Int#) -> case _APP_ _WRKR_ _ORIG_ Id cmpId [ u0, u1 ] of { _PRIM_ 0# -> _!_ True [] []; (u2 :: Int#) -> _!_ False [] [] } _N_} _F_ _IF_ARGS_ 0 2 XX 5 \ (u0 :: Id) (u1 :: Id) -> case _APP_ _ORIG_ Id cmpId [ u0, u1 ] of { _PRIM_ 0# -> _!_ True [] []; (u2 :: Int#) -> _!_ False [] [] } _N_,
+ (/=) = _A_ 2 _U_ 11 _N_ _S_ "U(U(P)AAA)U(U(P)AAA)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 5 \ (u0 :: Int#) (u1 :: Int#) -> case _APP_ _WRKR_ _ORIG_ Id cmpId [ u0, u1 ] of { _PRIM_ 0# -> _!_ False [] []; (u2 :: Int#) -> _!_ True [] [] } _N_} _F_ _IF_ARGS_ 0 2 XX 5 \ (u0 :: Id) (u1 :: Id) -> case _APP_ _ORIG_ Id cmpId [ u0, u1 ] of { _PRIM_ 0# -> _!_ False [] []; (u2 :: Int#) -> _!_ True [] [] } _N_ #-}
+instance Eq Unique
+ {-# GHC_PRAGMA _M_ Unique {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 3 _!_ _TUP_2 [(Unique -> Unique -> Bool), (Unique -> Unique -> Bool)] [_CONSTM_ Eq (==) (Unique), _CONSTM_ Eq (/=) (Unique)] _N_
+ (==) = _A_ 2 _U_ 11 _N_ _S_ "U(P)U(P)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 1 \ (u0 :: Int#) (u1 :: Int#) -> _#_ eqInt# [] [u0, u1] _N_} _F_ _IF_ARGS_ 0 2 CC 3 \ (u0 :: Unique) (u1 :: Unique) -> case u0 of { _ALG_ _ORIG_ Unique MkUnique (u2 :: Int#) -> case u1 of { _ALG_ _ORIG_ Unique MkUnique (u3 :: Int#) -> _#_ eqInt# [] [u2, u3]; _NO_DEFLT_ }; _NO_DEFLT_ } _N_,
+ (/=) = _A_ 2 _U_ 11 _N_ _S_ "U(P)U(P)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 5 \ (u0 :: Int#) (u1 :: Int#) -> case _#_ eqInt# [] [u0, u1] of { _ALG_ True -> _!_ False [] []; False -> _!_ True [] []; _NO_DEFLT_ } _N_} _F_ _IF_ARGS_ 0 2 CC 7 \ (u0 :: Unique) (u1 :: Unique) -> case u0 of { _ALG_ _ORIG_ Unique MkUnique (u2 :: Int#) -> case u1 of { _ALG_ _ORIG_ Unique MkUnique (u3 :: Int#) -> case _#_ eqInt# [] [u2, u3] of { _ALG_ True -> _!_ False [] []; False -> _!_ True [] []; _NO_DEFLT_ }; _NO_DEFLT_ }; _NO_DEFLT_ } _N_ #-}
+instance Ord CLabel
+ {-# GHC_PRAGMA _M_ CLabelInfo {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 9 _!_ _TUP_8 [{{Eq CLabel}}, (CLabel -> CLabel -> Bool), (CLabel -> CLabel -> Bool), (CLabel -> CLabel -> Bool), (CLabel -> CLabel -> Bool), (CLabel -> CLabel -> CLabel), (CLabel -> CLabel -> CLabel), (CLabel -> CLabel -> _CMP_TAG)] [_DFUN_ Eq (CLabel), _CONSTM_ Ord (<) (CLabel), _CONSTM_ Ord (<=) (CLabel), _CONSTM_ Ord (>=) (CLabel), _CONSTM_ Ord (>) (CLabel), _CONSTM_ Ord max (CLabel), _CONSTM_ Ord min (CLabel), _CONSTM_ Ord _tagCmp (CLabel)] _N_
+ (<) = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ (<=) = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ (>=) = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ (>) = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ max = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ min = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ _tagCmp = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_ #-}
+instance Ord GlobalSwitch
+ {-# GHC_PRAGMA _M_ CmdLineOpts {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 9 _!_ _TUP_8 [{{Eq GlobalSwitch}}, (GlobalSwitch -> GlobalSwitch -> Bool), (GlobalSwitch -> GlobalSwitch -> Bool), (GlobalSwitch -> GlobalSwitch -> Bool), (GlobalSwitch -> GlobalSwitch -> Bool), (GlobalSwitch -> GlobalSwitch -> GlobalSwitch), (GlobalSwitch -> GlobalSwitch -> GlobalSwitch), (GlobalSwitch -> GlobalSwitch -> _CMP_TAG)] [_DFUN_ Eq (GlobalSwitch), _CONSTM_ Ord (<) (GlobalSwitch), _CONSTM_ Ord (<=) (GlobalSwitch), _CONSTM_ Ord (>=) (GlobalSwitch), _CONSTM_ Ord (>) (GlobalSwitch), _CONSTM_ Ord max (GlobalSwitch), _CONSTM_ Ord min (GlobalSwitch), _CONSTM_ Ord _tagCmp (GlobalSwitch)] _N_
+ (<) = _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_,
+ (<=) = _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_,
+ (>=) = _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_,
+ (>) = _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_,
+ max = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ min = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ _tagCmp = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+instance Ord Id
+ {-# GHC_PRAGMA _M_ Id {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 9 _!_ _TUP_8 [{{Eq Id}}, (Id -> Id -> Bool), (Id -> Id -> Bool), (Id -> Id -> Bool), (Id -> Id -> Bool), (Id -> Id -> Id), (Id -> Id -> Id), (Id -> Id -> _CMP_TAG)] [_DFUN_ Eq (Id), _CONSTM_ Ord (<) (Id), _CONSTM_ Ord (<=) (Id), _CONSTM_ Ord (>=) (Id), _CONSTM_ Ord (>) (Id), _CONSTM_ Ord max (Id), _CONSTM_ Ord min (Id), _CONSTM_ Ord _tagCmp (Id)] _N_
+ (<) = _A_ 2 _U_ 11 _N_ _S_ "U(U(P)AAA)U(U(P)AAA)" {_A_ 2 _U_ 22 _N_ _N_ _N_ _N_} _N_ _N_,
+ (<=) = _A_ 2 _U_ 11 _N_ _S_ "U(U(P)AAA)U(U(P)AAA)" {_A_ 2 _U_ 22 _N_ _N_ _N_ _N_} _N_ _N_,
+ (>=) = _A_ 2 _U_ 11 _N_ _S_ "U(U(P)AAA)U(U(P)AAA)" {_A_ 2 _U_ 22 _N_ _N_ _N_ _N_} _N_ _N_,
+ (>) = _A_ 2 _U_ 11 _N_ _S_ "U(U(P)AAA)U(U(P)AAA)" {_A_ 2 _U_ 22 _N_ _N_ _N_ _N_} _N_ _N_,
+ max = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ min = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ _tagCmp = _A_ 2 _U_ 11 _N_ _S_ "U(U(P)AAA)U(U(P)AAA)" {_A_ 2 _U_ 22 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+instance Ord Unique
+ {-# GHC_PRAGMA _M_ Unique {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 9 _!_ _TUP_8 [{{Eq Unique}}, (Unique -> Unique -> Bool), (Unique -> Unique -> Bool), (Unique -> Unique -> Bool), (Unique -> Unique -> Bool), (Unique -> Unique -> Unique), (Unique -> Unique -> Unique), (Unique -> Unique -> _CMP_TAG)] [_DFUN_ Eq (Unique), _CONSTM_ Ord (<) (Unique), _CONSTM_ Ord (<=) (Unique), _CONSTM_ Ord (>=) (Unique), _CONSTM_ Ord (>) (Unique), _CONSTM_ Ord max (Unique), _CONSTM_ Ord min (Unique), _CONSTM_ Ord _tagCmp (Unique)] _N_
+ (<) = _A_ 2 _U_ 11 _N_ _S_ "U(P)U(P)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 1 \ (u0 :: Int#) (u1 :: Int#) -> _#_ ltInt# [] [u0, u1] _N_} _F_ _IF_ARGS_ 0 2 CC 3 \ (u0 :: Unique) (u1 :: Unique) -> case u0 of { _ALG_ _ORIG_ Unique MkUnique (u2 :: Int#) -> case u1 of { _ALG_ _ORIG_ Unique MkUnique (u3 :: Int#) -> _#_ ltInt# [] [u2, u3]; _NO_DEFLT_ }; _NO_DEFLT_ } _N_,
+ (<=) = _A_ 2 _U_ 11 _N_ _S_ "U(P)U(P)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 1 \ (u0 :: Int#) (u1 :: Int#) -> _#_ leInt# [] [u0, u1] _N_} _F_ _IF_ARGS_ 0 2 CC 3 \ (u0 :: Unique) (u1 :: Unique) -> case u0 of { _ALG_ _ORIG_ Unique MkUnique (u2 :: Int#) -> case u1 of { _ALG_ _ORIG_ Unique MkUnique (u3 :: Int#) -> _#_ leInt# [] [u2, u3]; _NO_DEFLT_ }; _NO_DEFLT_ } _N_,
+ (>=) = _A_ 2 _U_ 11 _N_ _S_ "U(P)U(P)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 5 \ (u0 :: Int#) (u1 :: Int#) -> case _#_ ltInt# [] [u0, u1] of { _ALG_ True -> _!_ False [] []; False -> _!_ True [] []; _NO_DEFLT_ } _N_} _F_ _IF_ARGS_ 0 2 CC 7 \ (u0 :: Unique) (u1 :: Unique) -> case u0 of { _ALG_ _ORIG_ Unique MkUnique (u2 :: Int#) -> case u1 of { _ALG_ _ORIG_ Unique MkUnique (u3 :: Int#) -> case _#_ ltInt# [] [u2, u3] of { _ALG_ True -> _!_ False [] []; False -> _!_ True [] []; _NO_DEFLT_ }; _NO_DEFLT_ }; _NO_DEFLT_ } _N_,
+ (>) = _A_ 2 _U_ 11 _N_ _S_ "U(P)U(P)" {_A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 5 \ (u0 :: Int#) (u1 :: Int#) -> case _#_ leInt# [] [u0, u1] of { _ALG_ True -> _!_ False [] []; False -> _!_ True [] []; _NO_DEFLT_ } _N_} _F_ _IF_ARGS_ 0 2 CC 7 \ (u0 :: Unique) (u1 :: Unique) -> case u0 of { _ALG_ _ORIG_ Unique MkUnique (u2 :: Int#) -> case u1 of { _ALG_ _ORIG_ Unique MkUnique (u3 :: Int#) -> case _#_ leInt# [] [u2, u3] of { _ALG_ True -> _!_ False [] []; False -> _!_ True [] []; _NO_DEFLT_ }; _NO_DEFLT_ }; _NO_DEFLT_ } _N_,
+ max = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ min = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ _tagCmp = _A_ 2 _U_ 11 _N_ _S_ "U(P)U(P)" {_A_ 2 _U_ 22 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+instance NamedThing Id
+ {-# GHC_PRAGMA _M_ Id {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 11 _!_ _TUP_10 [(Id -> ExportFlag), (Id -> Bool), (Id -> (_PackedString, _PackedString)), (Id -> _PackedString), (Id -> [_PackedString]), (Id -> SrcLoc), (Id -> Unique), (Id -> Bool), (Id -> UniType), (Id -> Bool)] [_CONSTM_ NamedThing getExportFlag (Id), _CONSTM_ NamedThing isLocallyDefined (Id), _CONSTM_ NamedThing getOrigName (Id), _CONSTM_ NamedThing getOccurrenceName (Id), _CONSTM_ NamedThing getInformingModules (Id), _CONSTM_ NamedThing getSrcLoc (Id), _CONSTM_ NamedThing getTheUnique (Id), _CONSTM_ NamedThing hasType (Id), _CONSTM_ NamedThing getType (Id), _CONSTM_ NamedThing fromPreludeCore (Id)] _N_
+ getExportFlag = _A_ 1 _U_ 1 _N_ _S_ "U(AAAS)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_,
+ isLocallyDefined = _A_ 1 _U_ 1 _N_ _S_ "U(AAAS)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_,
+ getOrigName = _A_ 1 _U_ 1 _N_ _S_ "U(LAAS)" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_,
+ getOccurrenceName = _A_ 1 _U_ 1 _N_ _S_ "U(LAAS)" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_,
+ getInformingModules = _A_ 1 _U_ 0 _N_ _S_ _!_ _F_ _IF_ARGS_ 0 1 X 2 \ (u0 :: Id) -> _APP_ _TYAPP_ _ORIG_ Util panic { [_PackedString] } [ _NOREP_S_ "getInformingModule:Id" ] _N_,
+ getSrcLoc = _A_ 1 _U_ 1 _N_ _S_ "U(AALS)" {_A_ 2 _U_ 11 _N_ _N_ _N_ _N_} _N_ _N_,
+ getTheUnique = _A_ 1 _U_ 1 _N_ _S_ "U(U(P)AAA)" {_A_ 1 _U_ 2 _N_ _N_ _F_ _IF_ARGS_ 0 1 X 2 \ (u0 :: Int#) -> _!_ _ORIG_ Unique MkUnique [] [u0] _N_} _F_ _IF_ARGS_ 0 1 C 2 \ (u0 :: Id) -> case u0 of { _ALG_ _ORIG_ Id Id (u1 :: Unique) (u2 :: UniType) (u3 :: IdInfo) (u4 :: IdDetails) -> u1; _NO_DEFLT_ } _N_,
+ hasType = _A_ 1 _U_ 0 _N_ _S_ "A" {_A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 1 _!_ True [] [] _N_} _F_ _IF_ARGS_ 0 1 X 1 \ (u0 :: Id) -> _!_ True [] [] _N_,
+ getType = _A_ 1 _U_ 1 _N_ _S_ "U(ASAA)" {_A_ 1 _U_ 1 _N_ _N_ _F_ _IF_ARGS_ 0 1 X 1 \ (u0 :: UniType) -> u0 _N_} _F_ _IF_ARGS_ 0 1 C 2 \ (u0 :: Id) -> case u0 of { _ALG_ _ORIG_ Id Id (u1 :: Unique) (u2 :: UniType) (u3 :: IdInfo) (u4 :: IdDetails) -> u2; _NO_DEFLT_ } _N_,
+ fromPreludeCore = _A_ 1 _U_ 1 _N_ _S_ "U(AAAS)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+instance Outputable Id
+ {-# GHC_PRAGMA _M_ Id {-dfun-} _A_ 2 _N_ _N_ _N_ _N_ _N_
+ ppr = _A_ 2 _U_ 2222 _N_ _N_ _N_ _N_ #-}
+instance Text Unique
+ {-# GHC_PRAGMA _M_ Unique {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 5 _!_ _TUP_4 [(Int -> [Char] -> [(Unique, [Char])]), (Int -> Unique -> [Char] -> [Char]), ([Char] -> [([Unique], [Char])]), ([Unique] -> [Char] -> [Char])] [_CONSTM_ Text readsPrec (Unique), _CONSTM_ Text showsPrec (Unique), _CONSTM_ Text readList (Unique), _CONSTM_ Text showList (Unique)] _N_
+ readsPrec = _A_ 2 _U_ 02 _N_ _S_ _!_ _F_ _IF_ARGS_ 0 2 XX 3 \ (u0 :: Int) (u1 :: [Char]) -> _APP_ _TYAPP_ _ORIG_ Util panic { ([Char] -> [(Unique, [Char])]) } [ _NOREP_S_ "no readsPrec for Unique", u1 ] _N_,
+ showsPrec = _A_ 3 _U_ 010 _N_ _S_ "AU(P)A" {_A_ 1 _U_ 2 _N_ _N_ _N_ _N_} _F_ _IF_ARGS_ 0 3 XXX 5 \ (u0 :: Int) (u1 :: Unique) (u2 :: [Char]) -> let {(u3 :: _PackedString) = _APP_ _ORIG_ Unique showUnique [ u1 ]} in _APP_ _ORIG_ PreludePS _unpackPS [ u3 ] _N_,
+ readList = _A_ 0 _U_ 2 _N_ _N_ _N_ _N_,
+ showList = _A_ 0 _U_ 12 _N_ _N_ _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgMonad.lhs b/ghc/compiler/codeGen/CgMonad.lhs
new file mode 100644
index 0000000000..ce063c8aeb
--- /dev/null
+++ b/ghc/compiler/codeGen/CgMonad.lhs
@@ -0,0 +1,914 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[CgMonad]{The code generation monad}
+
+See the beginning of the top-level @CodeGen@ module, to see how this
+monadic stuff fits into the Big Picture.
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgMonad (
+ Code(..), -- type
+ FCode(..), -- type
+
+ initC, thenC, thenFC, listCs, listFCs, mapCs, mapFCs,
+ returnFC, fixC, absC, nopC, getAbsC,
+
+ forkClosureBody, forkStatics, forkAlts, forkEval,
+ forkEvalHelp, forkAbsC,
+ SemiTaggingStuff(..),
+
+ addBindC, addBindsC, modifyBindC, lookupBindC,
+--UNUSED: grabBindsC,
+
+ EndOfBlockInfo(..),
+ setEndOfBlockInfo, getEndOfBlockInfo,
+
+ AStackUsage(..), BStackUsage(..), HeapUsage(..),
+ StubFlag,
+ isStubbed,
+--UNUSED: grabStackSizeC,
+
+ nukeDeadBindings, getUnstubbedAStackSlots,
+
+-- addFreeASlots, -- no need to export it
+ addFreeBSlots, -- ToDo: Belong elsewhere
+
+ isSwitchSetC, isStringSwitchSetC,
+
+ noBlackHolingFlag,
+ profCtrC, --UNUSED: concurrentC,
+
+ costCentresC, costCentresFlag, moduleName,
+
+ Sequel(..), -- ToDo: unabstract?
+ sequelToAmode,
+
+ -- out of general friendliness, we also export ...
+ CgBindings(..),
+ CgInfoDownwards(..), CgState(..), -- non-abstract
+ CgIdInfo, -- abstract
+ CompilationInfo(..),
+ GlobalSwitch, -- abstract
+
+ stableAmodeIdInfo, heapIdInfo,
+
+ -- and to make the interface self-sufficient...
+ AbstractC, CAddrMode, CLabel, LambdaFormInfo, IdEnv(..),
+ Unique, HeapOffset, CostCentre, IsCafCC,
+ Id, UniqSet(..), UniqFM,
+ VirtualSpAOffset(..), VirtualSpBOffset(..),
+ VirtualHeapOffset(..), DataCon(..), PlainStgLiveVars(..),
+ Maybe
+ ) where
+
+import AbsCSyn
+import AbsUniType ( kindFromType, UniType
+ IF_ATTACK_PRAGMAS(COMMA cmpUniType)
+ )
+import CgBindery
+import CgUsages ( getSpBRelOffset )
+import CmdLineOpts ( GlobalSwitch(..) )
+import Id ( getIdUniType, ConTag(..), DataCon(..) )
+import IdEnv -- ops on CgBindings use these
+import Maybes ( catMaybes, maybeToBool, Maybe(..) )
+import Pretty -- debugging only?
+import PrimKind ( getKindSize, retKindSize )
+import UniqSet -- ( elementOfUniqSet, UniqSet(..) )
+import CostCentre -- profiling stuff
+import StgSyn ( PlainStgAtom(..), PlainStgLiveVars(..) )
+import Unique ( UniqueSupply )
+import Util
+
+infixr 9 `thenC` -- Right-associative!
+infixr 9 `thenFC`
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgMonad-environment]{Stuff for manipulating environments}
+%* *
+%************************************************************************
+
+This monadery has some information that it only passes {\em
+downwards}, as well as some ``state'' which is modified as we go
+along.
+
+\begin{code}
+data CgInfoDownwards -- information only passed *downwards* by the monad
+ = MkCgInfoDown
+ CompilationInfo -- COMPLETELY STATIC info about this compilation
+ -- (e.g., what flags were passed to the compiler)
+
+ CgBindings -- [Id -> info] : static environment
+
+ EndOfBlockInfo -- Info for stuff to do at end of basic block:
+
+
+data CompilationInfo
+ = MkCompInfo
+ (GlobalSwitch -> Bool)
+ -- use it to look up whatever we like in command-line flags
+ FAST_STRING -- the module name
+
+
+data CgState
+ = MkCgState
+ AbstractC -- code accumulated so far
+ CgBindings -- [Id -> info] : *local* bindings environment
+ -- Bindings for top-level things are given in the info-down part
+ CgStksAndHeapUsage
+\end{code}
+
+@EndOfBlockInfo@ tells what to do at the end of this block of code
+or, if the expression is a @case@, what to do at the end of each alternative.
+
+\begin{code}
+data EndOfBlockInfo
+ = EndOfBlockInfo
+ VirtualSpAOffset -- Args SpA: trim the A stack to this point at a return;
+ -- push arguments starting just above this point on
+ -- a tail call.
+
+ -- This is therefore the A-stk ptr as seen
+ -- by a case alternative.
+
+ -- Args SpA is used when we want to stub any
+ -- currently-unstubbed dead A-stack (ptr) slots;
+ -- we want to know what SpA in the continuation is
+ -- so that we don't stub any slots which are off the
+ -- top of the continuation's stack!
+
+ VirtualSpBOffset -- Args SpB: Very similar to Args SpA.
+
+ -- Two main differences:
+ -- 1. If Sequel isn't OnStack, then Args SpB points
+ -- just below the slot in which the return address
+ -- should be put. In effect, the Sequel is
+ -- a pending argument. If it is OnStack, Args SpB
+ -- points to the top word of the return address.
+ --
+ -- 2. It ain't used for stubbing because there are
+ -- no ptrs on B stk.
+
+ Sequel
+
+
+initEobInfo = EndOfBlockInfo 0 0 InRetReg
+
+
+\end{code}
+
+Any addressing modes inside @Sequel@ must be ``robust,'' in the sense
+that it must survive stack pointer adjustments at the end of the
+block.
+
+\begin{code}
+data Sequel
+ = InRetReg -- The continuation is in RetReg
+
+ | OnStack VirtualSpBOffset
+ -- Continuation is on the stack, at the
+ -- specified location
+
+
+--UNUSED: | RestoreCostCentre
+
+ | UpdateCode CAddrMode -- May be standard update code, or might be
+ -- the data-type-specific one.
+
+ | CaseAlts
+ CAddrMode -- Jump to this; if the continuation is for a vectored
+ -- case this might be the label of a return vector
+ -- Guaranteed to be a non-volatile addressing mode (I think)
+
+ SemiTaggingStuff
+
+type SemiTaggingStuff
+ = Maybe -- Maybe[1] we don't have any semi-tagging stuff...
+ ([(ConTag, JoinDetails)], -- Alternatives
+ Maybe (Maybe Id, JoinDetails) -- Default (but Maybe[2] we don't have one)
+ -- Maybe[3] the default is a
+ -- bind-default (Just b); that is,
+ -- it expects a ptr to the thing
+ -- in Node, bound to b
+ )
+
+type JoinDetails
+ = (AbstractC, CLabel) -- Code to load regs from heap object + profiling macros,
+ -- and join point label
+-- The abstract C is executed only from a successful
+-- semitagging venture, when a case has looked at a variable, found
+-- that it's evaluated, and wants to load up the contents and go to the
+-- join point.
+
+
+-- DIRE WARNING.
+-- The OnStack case of sequelToAmode delivers an Amode which is only valid
+-- just before the final control transfer, because it assumes that
+-- SpB is pointing to the top word of the return address.
+-- This seems unclean but there you go.
+
+sequelToAmode :: Sequel -> FCode CAddrMode
+
+sequelToAmode (OnStack virt_spb_offset)
+ = getSpBRelOffset virt_spb_offset `thenFC` \ spb_rel ->
+ returnFC (CVal spb_rel RetKind)
+
+sequelToAmode InRetReg = returnFC (CReg RetReg)
+--UNUSED:sequelToAmode RestoreCostCentre = returnFC mkRestoreCostCentreLbl
+--Andy/Simon's patch:
+--WAS: sequelToAmode (UpdateCode amode) = returnFC amode
+sequelToAmode (UpdateCode amode) = returnFC (CReg StdUpdRetVecReg)
+sequelToAmode (CaseAlts amode _) = returnFC amode
+
+-- ToDo: move/do something
+--UNUSED:mkRestoreCostCentreLbl = panic "mkRestoreCostCentreLbl"
+\end{code}
+
+See the NOTES about the details of stack/heap usage tracking.
+
+\begin{code}
+type CgStksAndHeapUsage -- stacks and heap usage information
+ = (AStackUsage, -- A-stack usage
+ BStackUsage, -- B-stack usage
+ HeapUsage)
+
+type AStackUsage =
+ (Int, -- virtSpA: Virtual offset of topmost allocated slot
+ [(Int,StubFlag)], -- freeA: List of free slots, in increasing order
+ Int, -- realSpA: Virtual offset of real stack pointer
+ Int) -- hwSpA: Highest value ever taken by virtSp
+
+data StubFlag = Stubbed | NotStubbed
+
+isStubbed Stubbed = True -- so the type can be abstract
+isStubbed NotStubbed = False
+
+type BStackUsage =
+ (Int, -- virtSpB: Virtual offset of topmost allocated slot
+ [Int], -- freeB: List of free slots, in increasing order
+ Int, -- realSpB: Virtual offset of real stack pointer
+ Int) -- hwSpB: Highest value ever taken by virtSp
+
+type HeapUsage =
+ (HeapOffset, -- virtHp: Virtual offset of highest-numbered allocated word
+ HeapOffset) -- realHp: Virtual offset of real heap ptr
+\end{code}
+NB: absolutely every one of the above Ints is really
+a VirtualOffset of some description (the code generator
+works entirely in terms of VirtualOffsets; see NOTES).
+
+Initialisation.
+
+\begin{code}
+initialStateC = MkCgState AbsCNop nullIdEnv initUsage
+
+initUsage :: CgStksAndHeapUsage
+initUsage = ((0,[],0,0), (0,[],0,0), (initVirtHp, initRealHp))
+initVirtHp = panic "Uninitialised virtual Hp"
+initRealHp = panic "Uninitialised real Hp"
+\end{code}
+
+@envInitForAlternatives@ initialises the environment for a case alternative,
+assuming that the alternative is entered after an evaluation.
+This involves:
+\begin{itemize}
+\item
+zapping any volatile bindings, which aren't valid.
+\item
+zapping the heap usage. It should be restored by a heap check.
+\item
+setting the virtual AND real stack pointer fields to the given virtual stack offsets.
+this doesn't represent any {\em code}; it is a prediction of where the
+real stack pointer will be when we come back from the case analysis.
+\item
+BUT LEAVING the rest of the stack-usage info because it is all valid.
+In particular, we leave the tail stack pointers unchanged, becuase the
+alternative has to de-allocate the original @case@ expression's stack.
+\end{itemize}
+
+@stateIncUsage@$~e_1~e_2$ incorporates in $e_1$ the stack and heap high water
+marks found in $e_2$.
+
+\begin{code}
+stateIncUsage :: CgState -> CgState -> CgState
+
+stateIncUsage (MkCgState abs_c bs ((vA,fA,rA,hA1),(vB,fB,rB,hB1),(vH1,rH1)))
+ (MkCgState _ _ (( _, _, _,hA2),( _, _, _,hB2),(vH2, _)))
+ = MkCgState abs_c
+ bs
+ ((vA,fA,rA,hA1 `max` hA2),
+ (vB,fB,rB,hB1 `max` hB2),
+ (vH1 `maxOff` vH2, rH1))
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgMonad-basics]{Basic code-generation monad magic}
+%* *
+%************************************************************************
+
+\begin{code}
+type FCode a = CgInfoDownwards -> CgState -> (a, CgState)
+type Code = CgInfoDownwards -> CgState -> CgState
+
+#ifdef __GLASGOW_HASKELL__
+{-# INLINE thenC #-}
+{-# INLINE thenFC #-}
+{-# INLINE returnFC #-}
+#endif
+\end{code}
+The Abstract~C is not in the environment so as to improve strictness.
+
+\begin{code}
+initC :: CompilationInfo -> Code -> AbstractC
+
+initC cg_info code
+ = case (code (MkCgInfoDown cg_info (error "initC: statics") initEobInfo)
+ initialStateC) of
+ MkCgState abc _ _ -> abc
+
+returnFC :: a -> FCode a
+
+returnFC val info_down state = (val, state)
+\end{code}
+
+\begin{code}
+thenC :: Code
+ -> (CgInfoDownwards -> CgState -> a)
+ -> CgInfoDownwards -> CgState -> a
+
+-- thenC has both of the following types:
+-- thenC :: Code -> Code -> Code
+-- thenC :: Code -> FCode a -> FCode a
+
+(m `thenC` k) info_down state
+ = k info_down new_state
+ where
+ new_state = m info_down state
+
+listCs :: [Code] -> Code
+
+listCs [] info_down state = state
+listCs (c:cs) info_down state = stateN
+ where
+ state1 = c info_down state
+ stateN = listCs cs info_down state1
+
+mapCs :: (a -> Code) -> [a] -> Code
+
+mapCs f [] info_down state = state
+mapCs f (c:cs) info_down state = stateN
+ where
+ state1 = (f c) info_down state
+ stateN = mapCs f cs info_down state1
+\end{code}
+
+\begin{code}
+thenFC :: FCode a
+ -> (a -> CgInfoDownwards -> CgState -> c)
+ -> CgInfoDownwards -> CgState -> c
+
+-- thenFC :: FCode a -> (a -> FCode b) -> FCode b
+-- thenFC :: FCode a -> (a -> Code) -> Code
+
+(m `thenFC` k) info_down state
+ = k m_result info_down new_state
+ where
+ (m_result, new_state) = m info_down state
+
+listFCs :: [FCode a] -> FCode [a]
+
+listFCs [] info_down state = ([], state)
+listFCs (fc:fcs) info_down state = (thing : things, stateN)
+ where
+ (thing, state1) = fc info_down state
+ (things, stateN) = listFCs fcs info_down state1
+
+mapFCs :: (a -> FCode b) -> [a] -> FCode [b]
+
+mapFCs f [] info_down state = ([], state)
+mapFCs f (fc:fcs) info_down state = (thing : things, stateN)
+ where
+ (thing, state1) = (f fc) info_down state
+ (things, stateN) = mapFCs f fcs info_down state1
+\end{code}
+
+And the knot-tying combinator:
+\begin{code}
+fixC :: (a -> FCode a) -> FCode a
+fixC fcode info_down state = result
+ where
+ result@(v, _) = fcode v info_down state
+ -- ^-------------^
+\end{code}
+
+@forkClosureBody@ takes a code, $c$, and compiles it in a completely
+fresh environment, except that:
+ - compilation info and statics are passed in unchanged.
+The current environment is passed on completely unaltered, except that
+abstract C from the fork is incorporated.
+
+@forkAbsC@ takes a code and compiles it in the current environment,
+returning the abstract C thus constructed. The current environment
+is passed on completely unchanged. It is pretty similar to @getAbsC@,
+except that the latter does affect the environment. ToDo: combine?
+
+@forkStatics@ $fc$ compiles $fc$ in an environment whose statics come
+from the current bindings, but which is otherwise freshly initialised.
+The Abstract~C returned is attached to the current state, but the
+bindings and usage information is otherwise unchanged.
+
+\begin{code}
+forkClosureBody :: Code -> Code
+
+forkClosureBody code
+ (MkCgInfoDown cg_info statics _)
+ (MkCgState absC_in binds un_usage)
+ = MkCgState (AbsCStmts absC_in absC_fork) binds un_usage
+ where
+ fork_state = code body_info_down initialStateC
+ MkCgState absC_fork _ _ = fork_state
+ body_info_down = MkCgInfoDown cg_info statics initEobInfo
+
+forkStatics :: FCode a -> FCode a
+
+forkStatics fcode (MkCgInfoDown cg_info _ _)
+ (MkCgState absC_in statics un_usage)
+ = (result, MkCgState (AbsCStmts absC_in absC_fork) statics un_usage)
+ where
+ (result, state) = fcode rhs_info_down initialStateC
+ MkCgState absC_fork _ _ = state -- Don't merge these this line with the one
+ -- above or it becomes too strict!
+ rhs_info_down = MkCgInfoDown cg_info statics initEobInfo
+
+forkAbsC :: Code -> FCode AbstractC
+forkAbsC code info_down (MkCgState absC1 bs usage)
+ = (absC2, new_state)
+ where
+ MkCgState absC2 _ ((_, _, _,hA2),(_, _, _,hB2), _) =
+ code info_down (MkCgState AbsCNop bs usage)
+ ((vA, fA, rA, hA1), (vB, fB, rB, hB1), heap_usage) = usage
+
+ new_usage = ((vA, fA, rA, hA1 `max` hA2), (vB, fB, rB, hB1 `max` hB2), heap_usage)
+ new_state = MkCgState absC1 bs new_usage
+\end{code}
+
+@forkAlts@ $bs~d$ takes fcodes $bs$ for the branches of a @case@, and
+an fcode for the default case $d$, and compiles each in the current
+environment. The current environment is passed on unmodified, except
+that
+ - the worst stack high-water mark is incorporated
+ - the virtual Hp is moved on to the worst virtual Hp for the branches
+
+The "extra branches" arise from handling the default case:
+
+ case f x of
+ C1 a b -> e1
+ z -> e2
+
+Here we in effect expand to
+
+ case f x of
+ C1 a b -> e1
+ C2 c -> let z = C2 c in JUMP(default)
+ C3 d e f -> let z = C2 d e f in JUMP(default)
+
+ default: e2
+
+The stuff for C2 and C3 are the extra branches. They are
+handled differently by forkAlts, because their
+heap usage is joined onto that for the default case.
+
+\begin{code}
+forkAlts :: [FCode a] -> [FCode a] -> FCode b -> FCode ([a],b)
+
+forkAlts branch_fcodes extra_branch_fcodes deflt_fcode info_down in_state
+ = ((extra_branch_results ++ branch_results , deflt_result), out_state)
+ where
+ compile fc = fc info_down in_state
+
+ (branch_results, branch_out_states) = unzip (map compile branch_fcodes)
+ (extra_branch_results, extra_branch_out_states) = unzip (map compile extra_branch_fcodes)
+
+ -- The "in_state" for the default branch is got by worst-casing the
+ -- heap usages etc from the "extra_branches"
+ default_in_state = foldl stateIncUsage in_state extra_branch_out_states
+ (deflt_result, deflt_out_state) = deflt_fcode info_down default_in_state
+
+ out_state = foldl stateIncUsage default_in_state (deflt_out_state:branch_out_states)
+ -- NB foldl. in_state is the *left* argument to stateIncUsage
+\end{code}
+
+@forkEval@ takes two blocks of code.
+\begin{itemize}
+\item The first meddles with the environment to set it up as expected by
+ the alternatives of a @case@ which does an eval (or gc-possible primop).
+\item The second block is the code for the alternatives.
+ (plus info for semi-tagging purposes)
+\end{itemize}
+@forkEval@ picks up the virtual stack pointers and stubbed stack slots
+as set up by the first block, and returns a suitable @EndOfBlockInfo@ for
+the caller to use, together with whatever value is returned by the second block.
+
+It uses @initEnvForAlternatives@ to initialise the environment, and
+@stateIncUsageAlt@ to incorporate usage; the latter ignores the heap
+usage.
+
+\begin{code}
+forkEval :: EndOfBlockInfo -- For the body
+ -> Code -- Code to set environment
+ -> FCode Sequel -- Semi-tagging info to store
+ -> FCode EndOfBlockInfo -- The new end of block info
+
+forkEval body_eob_info env_code body_code
+ = forkEvalHelp body_eob_info env_code body_code `thenFC` \ (vA, vB, sequel) ->
+ returnFC (EndOfBlockInfo vA vB sequel)
+
+forkEvalHelp :: EndOfBlockInfo -- For the body
+ -> Code -- Code to set environment
+ -> FCode a -- The code to do after the eval
+ -> FCode (Int, -- SpA
+ Int, -- SpB
+ a) -- Result of the FCode
+
+forkEvalHelp body_eob_info env_code body_code
+ info_down@(MkCgInfoDown cg_info statics _) state
+ = ((vA,vB,value_returned), state `stateIncUsageEval` state_at_end_return)
+ where
+ info_down_for_body = MkCgInfoDown cg_info statics body_eob_info
+
+ (MkCgState _ binds ((vA,fA,_,_), (vB,fB,_,_), _)) = env_code info_down_for_body state
+ -- These vA and fA things are now set up as the body code expects them
+
+ state_at_end_return :: CgState
+
+ (value_returned, state_at_end_return) = body_code info_down_for_body state_for_body
+
+ state_for_body :: CgState
+
+ state_for_body = MkCgState AbsCNop
+ (nukeVolatileBinds binds)
+ ((vA,stubbed_fA,vA,vA), -- Set real and hwms
+ (vB,fB,vB,vB), -- to virtual ones
+ (initVirtHp, initRealHp))
+
+ stubbed_fA = [ (offset, Stubbed) | (offset,_) <- fA ]
+ -- In the branch, all free locations will have been stubbed
+
+
+stateIncUsageEval :: CgState -> CgState -> CgState
+stateIncUsageEval (MkCgState absC1 bs ((vA,fA,rA,hA1),(vB,fB,rB,hB1),heap_usage))
+ (MkCgState absC2 _ (( _, _, _,hA2),( _, _, _,hB2), _))
+ = MkCgState (absC1 `AbsCStmts` absC2)
+ -- The AbsC coming back should consist only of nested declarations,
+ -- notably of the return vector!
+ bs
+ ((vA,fA,rA,hA1 `max` hA2),
+ (vB,fB,rB,hB1 `max` hB2),
+ heap_usage)
+ -- We don't max the heap high-watermark because stateIncUsageEval is
+ -- used only in forkEval, which in turn is only used for blocks of code
+ -- which do their own heap-check.
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgMonad-spitting-AbstractC]{Spitting out @AbstractC@}
+%* *
+%************************************************************************
+
+@nopC@ is the no-op for the @Code@ monad; it adds no Abstract~C to the
+environment; @absC@ glues @ab_C@ onto the Abstract~C collected so far.
+\begin{code}
+nopC :: Code
+nopC info_down state = state
+
+absC :: AbstractC -> Code
+absC more_absC info_down state@(MkCgState absC binds usage)
+ = MkCgState (mkAbsCStmts absC more_absC) binds usage
+\end{code}
+
+These two are just like @absC@, except they examine the compilation
+info (whether SCC profiling or profiling-ctrs going) and possibly emit
+nothing.
+
+\begin{code}
+isSwitchSetC :: GlobalSwitch -> FCode Bool
+
+isSwitchSetC switch (MkCgInfoDown (MkCompInfo sw_chkr _) _ _) state
+ = (sw_chkr switch, state)
+
+isStringSwitchSetC :: (String -> GlobalSwitch) -> FCode Bool
+
+isStringSwitchSetC switch (MkCgInfoDown (MkCompInfo sw_chkr _) _ _) state
+ = (sw_chkr (switch (panic "isStringSwitchSetC")), state)
+
+costCentresC :: FAST_STRING -> [CAddrMode] -> Code
+
+costCentresC macro args (MkCgInfoDown (MkCompInfo sw_chkr _) _ _)
+ state@(MkCgState absC binds usage)
+ = if sw_chkr SccProfilingOn
+ then MkCgState (mkAbsCStmts absC (CCallProfCCMacro macro args)) binds usage
+ else state
+
+profCtrC :: FAST_STRING -> [CAddrMode] -> Code
+
+profCtrC macro args (MkCgInfoDown (MkCompInfo sw_chkr _) _ _)
+ state@(MkCgState absC binds usage)
+ = if not (sw_chkr DoTickyProfiling)
+ then state
+ else MkCgState (mkAbsCStmts absC (CCallProfCtrMacro macro args)) binds usage
+
+{- Try to avoid adding too many special compilation strategies here.
+ It's better to modify the header files as necessary for particular targets,
+ so that we can get away with as few variants of .hc files as possible.
+ 'ForConcurrent' is somewhat special anyway, as it changes entry conventions
+ pretty significantly.
+-}
+
+-- if compiling for concurrency...
+
+{- UNUSED, as it happens:
+concurrentC :: AbstractC -> Code
+
+concurrentC more_absC (MkCgInfoDown (MkCompInfo sw_chkr _) _ _)
+ state@(MkCgState absC binds usage)
+ = if not (sw_chkr ForConcurrent)
+ then state
+ else MkCgState (mkAbsCStmts absC more_absC) binds usage
+-}
+\end{code}
+
+@getAbsC@ compiles the code in the current environment, and returns
+the abstract C thus constructed (leaving the abstract C being carried
+around in the state untouched). @getAbsC@ does not generate any
+in-line Abstract~C itself, but the environment it returns is that
+obtained from the compilation.
+
+\begin{code}
+getAbsC :: Code -> FCode AbstractC
+
+getAbsC code info_down (MkCgState absC binds usage)
+ = (absC2, MkCgState absC binds2 usage2)
+ where
+ (MkCgState absC2 binds2 usage2) = code info_down (MkCgState AbsCNop binds usage)
+\end{code}
+
+\begin{code}
+noBlackHolingFlag, costCentresFlag :: FCode Bool
+
+noBlackHolingFlag (MkCgInfoDown (MkCompInfo sw_chkr _) _ _) state
+ = (sw_chkr OmitBlackHoling, state)
+
+costCentresFlag (MkCgInfoDown (MkCompInfo sw_chkr _) _ _) state
+ = (sw_chkr SccProfilingOn, state)
+\end{code}
+
+\begin{code}
+
+moduleName :: FCode FAST_STRING
+moduleName (MkCgInfoDown (MkCompInfo _ mod_name) _ _) state
+ = (mod_name, state)
+
+\end{code}
+
+\begin{code}
+setEndOfBlockInfo :: EndOfBlockInfo -> Code -> Code
+setEndOfBlockInfo eob_info code (MkCgInfoDown c_info statics _) state
+ = code (MkCgInfoDown c_info statics eob_info) state
+
+getEndOfBlockInfo :: FCode EndOfBlockInfo
+getEndOfBlockInfo (MkCgInfoDown c_info statics eob_info) state
+ = (eob_info, state)
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgMonad-bindery]{Monad things for fiddling with @CgBindings@}
+%* *
+%************************************************************************
+
+There are three basic routines, for adding (@addBindC@), modifying
+(@modifyBindC@) and looking up (@lookupBindC@) bindings. Each routine
+is just a wrapper for its lower-level @Bind@ routine (drop the \tr{C}
+on the end of each function name).
+
+A @Id@ is bound to a @(VolatileLoc, StableLoc)@ triple.
+The name should not already be bound.
+\begin{code}
+addBindC :: Id -> CgIdInfo -> Code
+addBindC name stuff_to_bind info_down (MkCgState absC binds usage)
+ = MkCgState absC (addOneToIdEnv binds name stuff_to_bind) usage
+\end{code}
+
+\begin{code}
+addBindsC :: [(Id, CgIdInfo)] -> Code
+addBindsC new_bindings info_down (MkCgState absC binds usage)
+ = MkCgState absC new_binds usage
+ where
+ new_binds = foldl (\ binds (name,info) -> addOneToIdEnv binds name info)
+ binds
+ new_bindings
+\end{code}
+
+\begin{code}
+modifyBindC :: Id -> (CgIdInfo -> CgIdInfo) -> Code
+modifyBindC name mangle_fn info_down (MkCgState absC binds usage)
+ = MkCgState absC (modifyIdEnv binds mangle_fn name) usage
+\end{code}
+
+Lookup is expected to find a binding for the @Id@.
+\begin{code}
+lookupBindC :: Id -> FCode CgIdInfo
+lookupBindC name info_down@(MkCgInfoDown _ static_binds _)
+ state@(MkCgState absC local_binds usage)
+ = (val, state)
+ where
+ val = case (lookupIdEnv local_binds name) of
+ Nothing -> try_static
+ Just this -> this
+
+ try_static = case (lookupIdEnv static_binds name) of
+ Just this -> this
+ Nothing
+ -> pprPanic "lookupBindC:no info!\n"
+ (ppAboves [
+ ppCat [ppStr "for:", ppr PprShowAll name],
+ ppStr "(probably: data dependencies broken by an optimisation pass)",
+ ppStr "static binds for:",
+ ppAboves [ ppr PprDebug i | (MkCgIdInfo i _ _ _) <- rngIdEnv static_binds ],
+ ppStr "local binds for:",
+ ppAboves [ ppr PprDebug i | (MkCgIdInfo i _ _ _) <- rngIdEnv local_binds ]
+ ])
+\end{code}
+
+For dumping debug information, we also have the ability to grab the
+local bindings environment.
+
+ToDo: Maybe do the pretty-printing here to restrict what people do
+with the environment.
+
+\begin{code}
+{- UNUSED:
+grabBindsC :: FCode CgBindings
+grabBindsC info_down state@(MkCgState absC binds usage)
+ = (binds, state)
+-}
+\end{code}
+
+\begin{code}
+{- UNUSED:
+grabStackSizeC :: FCode (Int, Int)
+grabStackSizeC info_down state -- @(MkCgState absC binds ((vA,_,_,_), (vB,_,_,_), _))
+ = panic "grabStackSizeC" -- (vA, vB)
+-}
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgStackery-deadslots]{Finding dead stack slots}
+%* *
+%************************************************************************
+
+@nukeDeadBindings@ does the following:
+\begin{itemize}
+\item Removes all bindings from the environment other than those
+ for variables in the argument to @nukeDeadBindings@.
+\item Collects any stack slots so freed, and returns them to the appropriate
+ stack free list.
+\item Moves the virtual stack pointers to point to the topmost used
+ stack locations.
+\end{itemize}
+
+Find dead slots on the stacks *and* remove bindings for dead variables
+from the bindings.
+
+You can have multi-word slots on the B stack; if dead, such a slot
+will be reported as {\em several} offsets (one per word).
+
+NOT YET: It returns empty lists if the -fno-stack-stubbing flag is
+set, so that no stack-stubbing will take place.
+
+Probably *naughty* to look inside monad...
+
+\begin{code}
+nukeDeadBindings :: PlainStgLiveVars -- All the *live* variables
+ -> Code
+nukeDeadBindings
+ live_vars
+ info_down
+ state@(MkCgState abs_c binds ((vsp_a, free_a, real_a, hw_a),
+ (vsp_b, free_b, real_b, hw_b),
+ heap_usage))
+ = MkCgState abs_c (mkIdEnv bs') new_usage
+ where
+ new_usage = ((new_vsp_a, new_free_a, real_a, hw_a),
+ (new_vsp_b, new_free_b, real_b, hw_b),
+ heap_usage)
+
+ (dead_a_slots, dead_b_slots, bs')
+ = dead_slots live_vars
+ [] [] []
+ [ (i, b) | b@(MkCgIdInfo i _ _ _) <- rngIdEnv binds ]
+ --OLD: (getIdEnvMapping binds)
+
+ extra_free_a = (sortLt (<) dead_a_slots) `zip` (repeat NotStubbed)
+ extra_free_b = sortLt (<) dead_b_slots
+
+ (new_vsp_a, new_free_a) = trim fst vsp_a (addFreeASlots free_a extra_free_a)
+ (new_vsp_b, new_free_b) = trim id vsp_b (addFreeBSlots free_b extra_free_b)
+
+getUnstubbedAStackSlots
+ :: VirtualSpAOffset -- Ignore slots bigger than this
+ -> FCode [VirtualSpAOffset] -- Return the list of slots found
+
+getUnstubbedAStackSlots tail_spa
+ info_down state@(MkCgState _ _ ((_, free_a, _, _), _, _))
+ = ([ slot | (slot, NotStubbed) <- free_a, slot <= tail_spa ], state)
+\end{code}
+
+Several boring auxiliary functions to do the dirty work.
+
+\begin{code}
+dead_slots :: PlainStgLiveVars
+ -> [(Id,CgIdInfo)] -> [VirtualSpAOffset] -> [VirtualSpBOffset]
+ -> [(Id,CgIdInfo)]
+ -> ([VirtualSpAOffset], [VirtualSpBOffset], [(Id,CgIdInfo)])
+
+-- dead_slots carries accumulating parameters for
+-- filtered bindings, dead a and b slots
+dead_slots live_vars fbs das dbs []
+ = (nub das, nub dbs, reverse fbs) -- Finished; rm the dups, if any
+
+dead_slots live_vars fbs das dbs ((v,i):bs)
+ | v `elementOfUniqSet` live_vars
+ = dead_slots live_vars ((v,i):fbs) das dbs bs
+ -- Live, so don't record it in dead slots
+ -- Instead keep it in the filtered bindings
+
+ | otherwise
+ = case i of
+ MkCgIdInfo _ _ stable_loc _
+ | is_Astk_loc ->
+ dead_slots live_vars fbs (offsetA : das) dbs bs
+
+ | is_Bstk_loc ->
+ dead_slots live_vars fbs das ([offsetB .. (offsetB + size - 1)] ++ dbs) bs
+ where
+ maybe_Astk_loc = maybeAStkLoc stable_loc
+ is_Astk_loc = maybeToBool maybe_Astk_loc
+ (Just offsetA) = maybe_Astk_loc
+
+ maybe_Bstk_loc = maybeBStkLoc stable_loc
+ is_Bstk_loc = maybeToBool maybe_Bstk_loc
+ (Just offsetB) = maybe_Bstk_loc
+
+ _ -> dead_slots live_vars fbs das dbs bs
+ where
+ size :: Int
+ size = (getKindSize . kindFromType . getIdUniType) v
+
+-- addFreeSlots expects *both* args to be in increasing order
+addFreeASlots :: [(Int,StubFlag)] -> [(Int,StubFlag)] -> [(Int,StubFlag)]
+addFreeASlots = addFreeSlots fst
+
+addFreeBSlots :: [Int] -> [Int] -> [Int]
+addFreeBSlots = addFreeSlots id
+
+addFreeSlots :: (slot -> Int{-offset-}) -> [slot] -> [slot] -> [slot]
+
+addFreeSlots get_offset cs [] = cs
+addFreeSlots get_offset [] ns = ns
+addFreeSlots get_offset (c:cs) (n:ns)
+ = if off_c < off_n then
+ (c : addFreeSlots get_offset cs (n:ns))
+ else if off_c > off_n then
+ (n : addFreeSlots get_offset (c:cs) ns)
+ else
+ panic ("addFreeSlots: equal slots: ")-- ++ show (c:cs) ++ show (n:ns))
+ where
+ off_c = get_offset c
+ off_n = get_offset n
+
+trim :: (slot -> Int{-offset-}) -> Int{-offset-} -> [slot] -> (Int{-offset-}, [slot])
+
+trim get_offset current_sp free_slots
+ = try current_sp (reverse free_slots)
+ where
+ try csp [] = (csp, [])
+ try csp (slot:slots)
+ = if csp < slot_off then
+ try csp slots -- Free slot off top of stk; ignore
+
+ else if csp == slot_off then
+ try (csp-1) slots -- Free slot at top of stk; trim
+
+ else
+ (csp, reverse (slot:slots)) -- Otherwise gap; give up
+ where
+ slot_off = get_offset slot
+\end{code}
diff --git a/ghc/compiler/codeGen/CgRetConv.hi b/ghc/compiler/codeGen/CgRetConv.hi
new file mode 100644
index 0000000000..f722d3089d
--- /dev/null
+++ b/ghc/compiler/codeGen/CgRetConv.hi
@@ -0,0 +1,39 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgRetConv where
+import AbsCSyn(AbstractC, CAddrMode, MagicId)
+import CLabelInfo(CLabel)
+import Class(Class)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import NameTypes(FullName)
+import PrimKind(PrimKind)
+import PrimOps(PrimOp)
+import TyCon(TyCon)
+import TyVar(TyVarTemplate)
+import UniType(UniType)
+import Unique(Unique)
+data MagicId {-# GHC_PRAGMA BaseReg | StkOReg | VanillaReg PrimKind Int# | FloatReg Int# | DoubleReg Int# | TagReg | RetReg | SpA | SuA | SpB | SuB | Hp | HpLim | LivenessReg | ActivityReg | StdUpdRetVecReg | StkStubReg | CurCostCentre | VoidReg #-}
+data CLabel
+data CtrlReturnConvention = VectoredReturn Int | UnvectoredReturn Int
+data DataReturnConvention = ReturnInHeap | ReturnInRegs [MagicId]
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data PrimKind {-# GHC_PRAGMA PtrKind | CodePtrKind | DataPtrKind | RetKind | InfoPtrKind | CostCentreKind | CharKind | IntKind | WordKind | AddrKind | FloatKind | DoubleKind | MallocPtrKind | StablePtrKind | ArrayKind | ByteArrayKind | VoidKind #-}
+data TyCon {-# GHC_PRAGMA SynonymTyCon Unique FullName Int [TyVarTemplate] UniType Bool | DataTyCon Unique FullName Int [TyVarTemplate] [Id] [Class] Bool | TupleTyCon Int | PrimTyCon Unique FullName Int ([PrimKind] -> PrimKind) | SpecTyCon TyCon [Labda UniType] #-}
+assignPrimOpResultRegs :: PrimOp -> [MagicId]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+assignRegs :: [MagicId] -> [PrimKind] -> ([MagicId], [PrimKind])
+ {-# GHC_PRAGMA _A_ 2 _U_ 22 _N_ _S_ "LS" _N_ _N_ #-}
+ctrlReturnConvAlg :: TyCon -> CtrlReturnConvention
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+dataReturnConvAlg :: Id -> DataReturnConvention
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(AAAS)" {_A_ 1 _U_ 2 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+dataReturnConvPrim :: PrimKind -> MagicId
+ {-# GHC_PRAGMA _A_ 1 _U_ 2 _N_ _S_ "E" _N_ _N_ #-}
+makePrimOpArgsRobust :: PrimOp -> [CAddrMode] -> ([CAddrMode], Int, AbstractC)
+ {-# GHC_PRAGMA _A_ 2 _U_ 02 _N_ _S_ "AL" {_A_ 1 _U_ 2 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+mkLiveRegsBitMask :: [MagicId] -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 2 _N_ _S_ "S" _N_ _N_ #-}
+noLiveRegsMask :: Int
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 2 _!_ I# [] [0#] _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgRetConv.lhs b/ghc/compiler/codeGen/CgRetConv.lhs
new file mode 100644
index 0000000000..9b6a130124
--- /dev/null
+++ b/ghc/compiler/codeGen/CgRetConv.lhs
@@ -0,0 +1,436 @@
+%
+% (c) The GRASP Project, Glasgow University, 1992-1995
+%
+\section[CgRetConv]{Return conventions for the code generator}
+
+The datatypes and functions here encapsulate what there is to know
+about return conventions.
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgRetConv (
+ CtrlReturnConvention(..), DataReturnConvention(..),
+
+ ctrlReturnConvAlg,
+ dataReturnConvAlg,
+
+ mkLiveRegsBitMask, noLiveRegsMask,
+
+ dataReturnConvPrim,
+
+ assignPrimOpResultRegs,
+ makePrimOpArgsRobust,
+ assignRegs,
+
+ -- and to make the interface self-sufficient...
+ MagicId, PrimKind, Id, CLabel, TyCon
+ ) where
+
+import AbsCSyn
+
+import AbsPrel ( PrimOp(..), PrimOpResultInfo(..), primOpCanTriggerGC,
+ getPrimOpResultInfo, PrimKind
+ IF_ATTACK_PRAGMAS(COMMA tagOf_PrimOp)
+ IF_ATTACK_PRAGMAS(COMMA pprPrimOp)
+ )
+import AbsUniType ( getTyConFamilySize, kindFromType, getTyConDataCons,
+ TyVarTemplate, TyCon, Class,
+ TauType(..), ThetaType(..), UniType
+ IF_ATTACK_PRAGMAS(COMMA cmpTyCon COMMA cmpClass)
+ IF_ATTACK_PRAGMAS(COMMA cmpUniType)
+ )
+import CgCompInfo -- various things
+
+import Id ( Id, getDataConSig, fIRST_TAG, isDataCon,
+ DataCon(..), ConTag(..)
+ )
+import Maybes ( catMaybes, Maybe(..) )
+import PrimKind
+import Util
+import Pretty
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgRetConv-possibilities]{Data types that encode possible return conventions}
+%* *
+%************************************************************************
+
+A @CtrlReturnConvention@ says how {\em control} is returned.
+\begin{code}
+data CtrlReturnConvention
+ = VectoredReturn Int -- size of the vector table (family size)
+ | UnvectoredReturn Int -- family size
+\end{code}
+
+A @DataReturnConvention@ says how the data for a particular
+data-constructor is returned.
+\begin{code}
+data DataReturnConvention
+ = ReturnInHeap
+ | ReturnInRegs [MagicId]
+\end{code}
+The register assignment given by a @ReturnInRegs@ obeys three rules:
+\begin{itemize}
+\item R1 is dead.
+\item R2 points to the info table for the phantom constructor
+\item The list of @MagicId@ is in the same order as the arguments
+ to the constructor.
+\end{itemize}
+
+
+%************************************************************************
+%* *
+\subsection[CgRetConv-algebraic]{Return conventions for algebraic datatypes}
+%* *
+%************************************************************************
+
+\begin{code}
+ctrlReturnConvAlg :: TyCon -> CtrlReturnConvention
+ctrlReturnConvAlg tycon
+ = case (getTyConFamilySize tycon) of
+ Nothing -> -- pprPanic "ctrlReturnConvAlg:" (ppr PprDebug tycon)
+ UnvectoredReturn 0 -- e.g., w/ "data Bin"
+
+ Just size -> -- we're supposed to know...
+ if (size > (1::Int) && size <= mAX_FAMILY_SIZE_FOR_VEC_RETURNS) then
+ VectoredReturn size
+ else
+ UnvectoredReturn size
+\end{code}
+
+@dataReturnConvAlg@ determines the return conventions from the
+(possibly specialised) data constructor.
+
+(See also @getDataConReturnConv@ (in @Id@).) We grab the types
+of the data constructor's arguments. We feed them and a list of
+available registers into @assign_reg@, which sequentially assigns
+registers of the appropriate types to the arguments, based on the
+types. If @assign_reg@ runs out of a particular kind of register,
+then it gives up, returning @ReturnInHeap@.
+
+\begin{code}
+dataReturnConvAlg :: DataCon -> DataReturnConvention
+
+dataReturnConvAlg data_con
+ = ASSERT(isDataCon data_con)
+ case leftover_kinds of
+ [] -> ReturnInRegs reg_assignment
+ other -> ReturnInHeap -- Didn't fit in registers
+ where
+ (_, _, arg_tys, _) = getDataConSig data_con
+ (reg_assignment, leftover_kinds) = assignRegs [node,infoptr]
+ (map kindFromType arg_tys)
+\end{code}
+
+\begin{code}
+noLiveRegsMask :: Int -- Mask indicating nothing live
+noLiveRegsMask = 0
+
+mkLiveRegsBitMask
+ :: [MagicId] -- Candidate live regs; depends what they have in them
+ -> Int
+
+mkLiveRegsBitMask regs
+ = foldl do_reg noLiveRegsMask regs
+ where
+ do_reg acc (VanillaReg kind reg_no)
+ | isFollowableKind kind
+ = acc + (reg_tbl !! IBOX(reg_no _SUB_ ILIT(1)))
+
+ do_reg acc anything_else = acc
+
+ reg_tbl -- ToDo: mk Array!
+ = [lIVENESS_R1, lIVENESS_R2, lIVENESS_R3, lIVENESS_R4,
+ lIVENESS_R5, lIVENESS_R6, lIVENESS_R7, lIVENESS_R8]
+
+{-
+-- Completely opaque code. ADR
+-- What's wrong with: (untested)
+
+mkLiveRegsBitMask regs
+ = foldl (+) noLiveRegsMask (map liveness_bit regs)
+ where
+ liveness_bit (VanillaReg kind reg_no)
+ | isFollowableKind kind
+ = reg_tbl !! (reg_no - 1)
+
+ liveness_bit anything_else
+ = noLiveRegsBitMask
+
+ reg_tbl
+ = [lIVENESS_R1, lIVENESS_R2, lIVENESS_R3, lIVENESS_R4,
+ lIVENESS_R5, lIVENESS_R6, lIVENESS_R7, lIVENESS_R8]
+-}
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[CgRetConv-prim]{Return conventions for primitive datatypes}
+%* *
+%************************************************************************
+
+WARNING! If you add a return convention which can return a pointer,
+make sure you alter CgCase (cgPrimDefault) to generate the right sort
+of heap check!
+\begin{code}
+dataReturnConvPrim :: PrimKind -> MagicId
+
+#ifndef DPH
+dataReturnConvPrim IntKind = VanillaReg IntKind ILIT(1)
+dataReturnConvPrim WordKind = VanillaReg WordKind ILIT(1)
+dataReturnConvPrim AddrKind = VanillaReg AddrKind ILIT(1)
+dataReturnConvPrim CharKind = VanillaReg CharKind ILIT(1)
+dataReturnConvPrim FloatKind = FloatReg ILIT(1)
+dataReturnConvPrim DoubleKind = DoubleReg ILIT(1)
+dataReturnConvPrim VoidKind = VoidReg
+
+-- Return a primitive-array pointer in the usual register:
+dataReturnConvPrim ArrayKind = VanillaReg ArrayKind ILIT(1)
+dataReturnConvPrim ByteArrayKind = VanillaReg ByteArrayKind ILIT(1)
+
+dataReturnConvPrim StablePtrKind = VanillaReg StablePtrKind ILIT(1)
+dataReturnConvPrim MallocPtrKind = VanillaReg MallocPtrKind ILIT(1)
+
+dataReturnConvPrim PtrKind = panic "dataReturnConvPrim: PtrKind"
+dataReturnConvPrim _ = panic "dataReturnConvPrim: other"
+
+#else
+dataReturnConvPrim VoidKind = VoidReg
+dataReturnConvPrim PtrKind = panic "dataReturnConvPrim: PtrKind"
+dataReturnConvPrim kind = DataReg kind 2 -- Don't Hog a Modifier reg.
+#endif {- Data Parallel Haskell -}
+\end{code}
+
+
+%********************************************************
+%* *
+\subsection[primop-stuff]{Argument and return conventions for Prim Ops}
+%* *
+%********************************************************
+
+\begin{code}
+assignPrimOpResultRegs
+ :: PrimOp -- The constructors in canonical order
+ -> [MagicId] -- The return regs all concatenated to together,
+ -- (*including* one for the tag if necy)
+
+assignPrimOpResultRegs op
+ = case (getPrimOpResultInfo op) of
+
+ ReturnsPrim kind -> [dataReturnConvPrim kind]
+
+ ReturnsAlg tycon -> let cons = getTyConDataCons tycon
+ result_regs = concat (map get_return_regs cons)
+ in
+ -- Since R1 is dead, it can hold the tag if necessary
+ case cons of
+ [_] -> result_regs
+ other -> (VanillaReg IntKind ILIT(1)) : result_regs
+
+ where
+ get_return_regs con = case (dataReturnConvAlg con) of
+ ReturnInHeap -> panic "getPrimOpAlgResultRegs"
+ ReturnInRegs regs -> regs
+\end{code}
+
+@assignPrimOpArgsRobust@ is used only for primitive ops which may
+trigger GC. [MAYBE (WDP 94/05)] For these, we pass all (nonRobust)
+arguments in registers. This function assigns them and tells us which
+of those registers are now live (because we've shoved a followable
+argument into it).
+
+Bug: it is assumed that robust amodes cannot contain pointers. This
+seems reasonable but isn't true. For example, \tr{Array#}'s
+\tr{MallocPtr#}'s are pointers. (This is only known to bite on
+\tr{_ccall_GC_} with a MallocPtr argument.)
+
+See after for some ADR comments...
+
+\begin{code}
+makePrimOpArgsRobust
+ :: PrimOp
+ -> [CAddrMode] -- Arguments
+ -> ([CAddrMode], -- Arg registers
+ Int, -- Liveness mask
+ AbstractC) -- Simultaneous assignments to assign args to regs
+
+makePrimOpArgsRobust op arg_amodes
+ = ASSERT (primOpCanTriggerGC op)
+ let
+ non_robust_amodes = filter (not . amodeCanSurviveGC) arg_amodes
+ arg_kinds = map getAmodeKind non_robust_amodes
+
+ (arg_regs, extra_args) = assignRegs [{-nothing live-}] arg_kinds
+
+ -- Check that all the args fit before returning arg_regs
+ final_arg_regs = case extra_args of
+ [] -> arg_regs
+ other -> error ("Cannot allocate enough registers for primop (try rearranging code or reducing number of arguments?) " ++ ppShow 80 (ppr PprDebug op))
+
+ arg_assts = mkAbstractCs (zipWith assign_to_reg arg_regs non_robust_amodes)
+ assign_to_reg reg_id amode = CAssign (CReg reg_id) amode
+
+ safe_arg regs arg
+ | amodeCanSurviveGC arg = (regs, arg)
+ | otherwise = (tail regs, CReg (head regs))
+ safe_amodes = snd (mapAccumL safe_arg arg_regs arg_amodes)
+
+ liveness_mask = mkLiveRegsBitMask arg_regs
+ in
+ (safe_amodes, liveness_mask, arg_assts)
+\end{code}
+
+%************************************************************************
+%* *
+\subsubsection[CgRetConv-regs]{Register assignment}
+%* *
+%************************************************************************
+
+How to assign registers.
+Registers are assigned in order.
+
+If we run out, we don't attempt to assign
+any further registers (even though we might have run out of only one kind of
+register); we just return immediately with the left-overs specified.
+
+\begin{code}
+assignRegs :: [MagicId] -- Unavailable registers
+ -> [PrimKind] -- Arg or result kinds to assign
+ -> ([MagicId], -- Register assignment in same order
+ -- for *initial segment of* input list
+ [PrimKind])-- leftover kinds
+
+#ifndef DPH
+assignRegs regs_in_use kinds
+ = assign_reg kinds [] (mkRegTbl regs_in_use)
+ where
+
+ assign_reg :: [PrimKind] -- arg kinds being scrutinized
+ -> [MagicId] -- accum. regs assigned so far (reversed)
+ -> ([Int], [Int], [Int])
+ -- regs still avail: Vanilla, Float, Double
+ -> ([MagicId], [PrimKind])
+
+ assign_reg (VoidKind:ks) acc supply
+ = assign_reg ks (VoidReg:acc) supply -- one VoidReg is enough for everybody!
+
+ assign_reg (FloatKind:ks) acc (vanilla_rs, IBOX(f):float_rs, double_rs)
+ = assign_reg ks (FloatReg f:acc) (vanilla_rs, float_rs, double_rs)
+
+ assign_reg (DoubleKind:ks) acc (vanilla_rs, float_rs, IBOX(d):double_rs)
+ = assign_reg ks (DoubleReg d:acc) (vanilla_rs, float_rs, double_rs)
+
+ assign_reg (k:ks) acc (IBOX(v):vanilla_rs, float_rs, double_rs)
+ | not (isFloatingKind k)
+ = assign_reg ks (VanillaReg k v:acc) (vanilla_rs, float_rs, double_rs)
+
+ -- The catch-all. It can happen because either
+ -- (a) we've assigned all the regs so leftover_ks is []
+ -- (b) we couldn't find a spare register in the appropriate supply
+ -- or, I suppose,
+ -- (c) we came across a Kind we couldn't handle (this one shouldn't happen)
+ assign_reg leftover_ks acc _ = (reverse acc, leftover_ks)
+#else
+assignRegs node_using_Ret1 kinds
+ = if node_using_Ret1
+ then assign_reg kinds [] (tail vanillaRegNos) (tail datRegNos)
+ else assign_reg kinds [] vanillaRegNos (tail datRegNos)
+ where
+ assign_reg:: [PrimKind] -- arg kinds being scrutinized
+ -> [MagicId] -- accum. regs assigned so far (reversed)
+ -> [Int] -- Vanilla Regs (ptr, int, char, float or double)
+ -> [Int] -- Data Regs ( int, char, float or double)
+ -> ([MagicId], [PrimKind])
+
+ assign_reg (k:ks) acc (IBOX(p):ptr_regs) dat_regs
+ | isFollowableKind k
+ = assign_reg ks (VanillaReg k p:acc) ptr_regs dat_regs
+
+ assign_reg (CharKind:ks) acc ptr_regs (d:dat_regs)
+ = assign_reg ks (DataReg CharKind d:acc) ptr_regs dat_regs
+
+ assign_reg (IntKind:ks) acc ptr_regs (d:dat_regs)
+ = assign_reg ks (DataReg IntKind d:acc) ptr_regs dat_regs
+
+ assign_reg (WordKind:ks) acc ptr_regs (d:dat_regs)
+ = assign_reg ks (DataReg WordKind d:acc) ptr_regs dat_regs
+
+ assign_reg (AddrKind:ks) acc ptr_regs (d:dat_regs)
+ = assign_reg ks (DataReg AddrKind d:acc) ptr_regs dat_regs
+
+ assign_reg (FloatKind:ks) acc ptr_regs (d:dat_regs)
+ = assign_reg ks (DataReg FloatKind d:acc) ptr_regs dat_regs
+
+ -- Notice how doubles take up two data registers....
+ assign_reg (DoubleKind:ks) acc ptr_regs (IBOX(d1):d2:dat_regs)
+ = assign_reg ks (DoubleReg d1:acc) ptr_regs dat_regs
+
+ assign_reg (VoidKind:ks) acc ptr_regs dat_regs
+ = assign_reg ks (VoidReg:acc) ptr_regs dat_regs
+
+ -- The catch-all. It can happen because either
+ -- (a) we've assigned all the regs so leftover_ks is []
+ -- (b) we couldn't find a spare register in the appropriate supply
+ -- or, I suppose,
+ -- (c) we came across a Kind we couldn't handle (this one shouldn't happen)
+ -- ToDo Maybe when dataReg becomes empty, we can start using the
+ -- vanilla registers ????
+ assign_reg leftover_ks acc _ _ = (reverse acc, leftover_ks)
+#endif {- Data Parallel Haskell -}
+\end{code}
+
+Register supplies. Vanilla registers can contain pointers, Ints, Chars.
+
+\begin{code}
+vanillaRegNos :: [Int]
+vanillaRegNos = [1 .. mAX_Vanilla_REG]
+\end{code}
+
+Only a subset of the registers on the DAP can be used to hold pointers (and most
+of these are taken up with things like the heap pointer and stack pointers).
+However the resulting registers can hold integers, floats or chars. We therefore
+allocate pointer like things into the @vanillaRegNos@ (and Ints Chars or Floats
+if the remaining registers are empty). See NOTE.regsiterMap for an outline of
+the global and local register allocation scheme.
+
+\begin{code}
+#ifdef DPH
+datRegNos ::[Int]
+datRegNos = [1..mAX_Data_REG] -- For Ints, Floats, Doubles or Chars
+#endif {- Data Parallel Haskell -}
+\end{code}
+
+Floats and doubles have separate register supplies.
+
+\begin{code}
+#ifndef DPH
+floatRegNos, doubleRegNos :: [Int]
+floatRegNos = [1 .. mAX_Float_REG]
+doubleRegNos = [1 .. mAX_Double_REG]
+
+mkRegTbl :: [MagicId] -> ([Int], [Int], [Int])
+mkRegTbl regs_in_use = (ok_vanilla, ok_float, ok_double)
+ where
+ ok_vanilla = catMaybes (map (select (VanillaReg VoidKind)) vanillaRegNos)
+ ok_float = catMaybes (map (select FloatReg) floatRegNos)
+ ok_double = catMaybes (map (select DoubleReg) doubleRegNos)
+
+ select :: (FAST_INT -> MagicId) -> Int{-cand-} -> Maybe Int
+ -- one we've unboxed the Int, we make a MagicId
+ -- and see if it is already in use; if not, return its number.
+
+ select mk_reg_fun cand@IBOX(i)
+ = let
+ reg = mk_reg_fun i
+ in
+ if reg `not_elem` regs_in_use
+ then Just cand
+ else Nothing
+ where
+ not_elem = isn'tIn "mkRegTbl"
+
+#endif {- Data Parallel Haskell -}
+\end{code}
diff --git a/ghc/compiler/codeGen/CgStackery.hi b/ghc/compiler/codeGen/CgStackery.hi
new file mode 100644
index 0000000000..25448fd3ad
--- /dev/null
+++ b/ghc/compiler/codeGen/CgStackery.hi
@@ -0,0 +1,35 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgStackery where
+import AbsCSyn(AbstractC, CAddrMode, CExprMacro, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, StubFlag)
+import ClosureInfo(ClosureInfo)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Maybes(Labda)
+import PreludePS(_PackedString)
+import PrimKind(PrimKind)
+import PrimOps(PrimOp)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+data CAddrMode {-# GHC_PRAGMA CVal RegRelative PrimKind | CAddr RegRelative | CReg MagicId | CTableEntry CAddrMode CAddrMode PrimKind | CTemp Unique PrimKind | CLbl CLabel PrimKind | CUnVecLbl CLabel CLabel | CCharLike CAddrMode | CIntLike CAddrMode | CString _PackedString | CLit BasicLit | CLitLit _PackedString PrimKind | COffset HeapOffset | CCode AbstractC | CLabelledCode CLabel AbstractC | CJoinPoint Int Int | CMacroExpr PrimKind CExprMacro [CAddrMode] | CCostCentre CostCentre Bool #-}
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data PrimKind {-# GHC_PRAGMA PtrKind | CodePtrKind | DataPtrKind | RetKind | InfoPtrKind | CostCentreKind | CharKind | IntKind | WordKind | AddrKind | FloatKind | DoubleKind | MallocPtrKind | StablePtrKind | ArrayKind | ByteArrayKind | VoidKind #-}
+adjustRealSps :: Int -> Int -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2201 _N_ _S_ "LLAU(LLU(U(LLLL)U(LLLL)L))" {_A_ 5 _U_ 22221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+allocAStack :: CgInfoDownwards -> CgState -> (Int, CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 01 _N_ _S_ "AU(LLU(U(LLLL)LL))" {_A_ 5 _U_ 22122 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+allocBStack :: Int -> CgInfoDownwards -> CgState -> (Int, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(LU(LLLL)L))" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+allocUpdateFrame :: Int -> CAddrMode -> ((Int, Int, Int) -> CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 5 _U_ 12111 _N_ _S_ "LLSU(LLU(LLS))U(LLU(LU(LLLL)L))" _N_ _N_ #-}
+getFinalStackHW :: (Int -> Int -> CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 221 _N_ _S_ "SLU(LLL)" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+mkStkAmodes :: Int -> Int -> [CAddrMode] -> CgInfoDownwards -> CgState -> ((Int, Int, AbstractC), CgState)
+ {-# GHC_PRAGMA _A_ 5 _U_ 22201 _N_ _S_ "LLLAU(LLL)" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+mkVirtStkOffsets :: Int -> Int -> (a -> PrimKind) -> [a] -> (Int, Int, [(a, Int)], [(a, Int)])
+ {-# GHC_PRAGMA _A_ 4 _U_ 2222 _N_ _N_ _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgStackery.lhs b/ghc/compiler/codeGen/CgStackery.lhs
new file mode 100644
index 0000000000..3ec30f02ea
--- /dev/null
+++ b/ghc/compiler/codeGen/CgStackery.lhs
@@ -0,0 +1,264 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[CgStackery]{Stack management functions}
+
+Stack-twiddling operations, which are pretty low-down and grimy.
+(This is the module that knows all about stack layouts, etc.)
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgStackery (
+ allocAStack, allocBStack, allocUpdateFrame,
+ adjustRealSps, getFinalStackHW,
+ mkVirtStkOffsets, mkStkAmodes,
+
+ -- and to make the interface self-sufficient...
+ AbstractC, CAddrMode, CgState, PrimKind
+ ) where
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import CgUsages ( getSpBRelOffset )
+import Maybes ( Maybe(..) )
+import PrimKind ( getKindSize, retKindSize, separateByPtrFollowness )
+import Util
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgStackery-layout]{Laying out a stack frame}
+%* *
+%************************************************************************
+
+@mkVirtStkOffsets@ is given a list of arguments. The first argument
+gets the {\em largest} virtual stack offset (remember, virtual offsets
+increase towards the top of stack).
+
+\begin{code}
+mkVirtStkOffsets :: VirtualSpAOffset -- Offset of the last allocated thing
+ -> VirtualSpBOffset -- ditto
+ -> (a -> PrimKind) -- to be able to grab kinds
+ -> [a] -- things to make offsets for
+ -> (VirtualSpAOffset, -- OUTPUTS: Topmost allocated word
+ VirtualSpBOffset, -- ditto
+ [(a, VirtualSpAOffset)], -- boxed things with offsets
+ [(a, VirtualSpBOffset)]) -- unboxed things with offsets
+
+mkVirtStkOffsets init_SpA_offset init_SpB_offset kind_fun things
+ = let (boxeds, unboxeds)
+ = separateByPtrFollowness kind_fun things
+ (last_SpA_offset, boxd_w_offsets)
+ = mapAccumR computeOffset init_SpA_offset boxeds
+ (last_SpB_offset, ubxd_w_offsets)
+ = mapAccumR computeOffset init_SpB_offset unboxeds
+ in
+ (last_SpA_offset, last_SpB_offset, boxd_w_offsets, ubxd_w_offsets)
+ where
+ computeOffset offset thing
+ = (offset + (getKindSize . kind_fun) thing, (thing, offset+(1::Int)))
+\end{code}
+
+@mkStackAmodes@ is a higher-level version of @mkStackOffsets@.
+It starts from the tail-call locations.
+It returns a single list of addressing modes for the stack locations,
+and therefore is in the monad.
+
+It also adjusts the high water mark if necessary.
+
+\begin{code}
+mkStkAmodes :: VirtualSpAOffset -- Tail call positions
+ -> VirtualSpBOffset
+ -> [CAddrMode] -- things to make offsets for
+ -> FCode (VirtualSpAOffset, -- OUTPUTS: Topmost allocated word
+ VirtualSpBOffset, -- ditto
+ AbstractC) -- Assignments to appropriate stk slots
+
+mkStkAmodes tail_spa tail_spb things
+ info_down (MkCgState absC binds usage)
+ = (result, MkCgState absC binds new_usage)
+ where
+ result = (last_SpA_offset, last_SpB_offset, mkAbstractCs abs_cs)
+
+ (last_SpA_offset, last_SpB_offset, ptrs_w_offsets, non_ptrs_w_offsets)
+ = mkVirtStkOffsets tail_spa tail_spb getAmodeKind things
+
+ abs_cs
+ = [ CAssign (CVal (SpARel realSpA offset) PtrKind) thing
+ | (thing, offset) <- ptrs_w_offsets
+ ]
+ ++
+ [ CAssign (CVal (SpBRel realSpB offset) (getAmodeKind thing)) thing
+ | (thing, offset) <- non_ptrs_w_offsets
+ ]
+
+ ((vspA,fspA,realSpA,hwSpA), (vspB,fspB,realSpB,hwSpB), h_usage) = usage
+
+ new_usage = ((vspA,fspA,realSpA,max last_SpA_offset hwSpA),
+ (vspB,fspB,realSpB,max last_SpB_offset hwSpB),
+ h_usage)
+ -- No need to fiddle with virtual SpA etc because this call is
+ -- only done just before the end of a block
+
+
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgStackery-monadery]{Inside-monad functions for stack manipulation}
+%* *
+%************************************************************************
+
+Allocate a virtual offset for something.
+\begin{code}
+allocAStack :: FCode VirtualSpAOffset
+
+allocAStack info_down (MkCgState absC binds
+ ((virt_a, free_a, real_a, hw_a), b_usage, h_usage))
+ = (chosen_slot, MkCgState absC binds (new_a_usage, b_usage, h_usage))
+ where
+ push_virt_a = virt_a + 1
+
+ (chosen_slot, new_a_usage)
+ = if null free_a then
+ -- No free slots, so push a new one
+ -- We need to adjust the high-water mark
+ (push_virt_a, (push_virt_a, [], real_a, hw_a `max` push_virt_a))
+ else
+ -- Free slots available, so use one
+ (free_slot, (virt_a, new_free_a, real_a, hw_a))
+
+ (free_slot, _) = head ([f | f@(slot, st) <- free_a, not (isStubbed st)] ++ free_a)
+ -- Try to find an un-stubbed location;
+ -- if none, return the first in the free list
+ -- We'll only try this if free_a is known to be non-empty
+
+ -- Free list with the free_slot deleted
+ new_free_a = [ f | f@(s,_) <- free_a, s /= free_slot ]
+
+allocBStack :: Int -> FCode VirtualSpBOffset
+allocBStack size info_down (MkCgState absC binds
+ (a_usage, (virt_b, free_b, real_b, hw_b), h_usage))
+ = (chosen_slot, MkCgState absC binds (a_usage, new_b_usage, h_usage))
+ where
+ push_virt_b = virt_b + size
+
+ (chosen_slot, new_b_usage)
+ = case find_block free_b of
+ Nothing -> (virt_b+1, (push_virt_b, free_b, real_b,
+ hw_b `max` push_virt_b))
+ -- Adjust high water mark
+
+ Just slot -> (slot, (virt_b, delete_block free_b slot, real_b, hw_b))
+
+ -- find_block looks for a contiguous chunk of free slots
+ find_block :: [VirtualSpBOffset] -> Maybe VirtualSpBOffset
+ find_block [] = Nothing
+ find_block (slot:slots)
+ | take size (slot:slots) == take size (repeat slot)
+ = Just slot
+ | otherwise
+ = find_block slots
+
+ delete_block free_b slot = [s | s <- free_b, (s<slot) || (s>=slot+size)]
+ -- Retain slots which are not in the range
+ -- slot..slot+size-1
+\end{code}
+
+@allocUpdateFrame@ allocates enough space for an update frame
+on the B stack, records the fact in the end-of-block info (in the ``args''
+fields), and passes on the old ``args'' fields to the enclosed code.
+
+This is all a bit disgusting.
+
+\begin{code}
+allocUpdateFrame :: Int -- Size of frame
+ -> CAddrMode -- Return address which is to be the
+ -- top word of frame
+ -> ((VirtualSpAOffset, VirtualSpBOffset, VirtualSpBOffset) -> Code)
+ -- Scope of update
+ -> Code
+
+allocUpdateFrame size update_amode code
+ (MkCgInfoDown c_info statics (EndOfBlockInfo args_spa args_spb sequel))
+ (MkCgState absc binds (a_usage, (vB,rr,qq,hwB),h_usage))
+ = case sequel of
+
+ InRetReg -> code (args_spa, args_spb, vB)
+ (MkCgInfoDown c_info statics new_eob_info)
+ (MkCgState absc binds new_usage)
+
+ other -> panic "allocUpdateFrame"
+
+ where
+ new_vB = vB + size
+ new_eob_info = EndOfBlockInfo args_spa new_vB (UpdateCode update_amode)
+ new_usage = (a_usage, (new_vB,rr,qq,hwB `max` new_vB), h_usage)
+\end{code}
+
+
+A knot-tying beast.
+
+\begin{code}
+getFinalStackHW :: (VirtualSpAOffset -> VirtualSpBOffset -> Code) -> Code
+getFinalStackHW fcode info_down (MkCgState absC binds usages) = state1
+ where
+ state1 = fcode hwSpA hwSpB info_down (MkCgState absC binds usages)
+ (MkCgState _ _ ((_,_,_, hwSpA), (_,_,_, hwSpB), _)) = state1
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[CgStackery-adjust]{Adjusting the stack pointers}
+%* *
+%************************************************************************
+
+@adjustRealSpX@ generates code to alter the actual stack pointer, and
+adjusts the environment accordingly. We are careful to push the
+conditional inside the abstract C code to avoid black holes.
+ToDo: combine together?
+
+These functions {\em do not} deal with high-water-mark adjustment.
+That's done by functions which allocate stack space.
+
+\begin{code}
+adjustRealSpA :: VirtualSpAOffset -- New offset for Arg stack ptr
+ -> Code
+adjustRealSpA newRealSpA info_down (MkCgState absC binds
+ ((vspA,fA,realSpA,hwspA),
+ b_usage, h_usage))
+ = MkCgState (mkAbsCStmts absC move_instrA) binds new_usage
+ where
+ move_instrA = if (newRealSpA == realSpA) then AbsCNop
+ else (CAssign
+ (CReg SpA)
+ (CAddr (SpARel realSpA newRealSpA)))
+ new_usage = ((vspA, fA, newRealSpA, hwspA),
+ b_usage, h_usage)
+
+adjustRealSpB :: VirtualSpBOffset -- New offset for Basic/Control stack ptr
+ -> Code
+adjustRealSpB newRealSpB info_down (MkCgState absC binds
+ (a_usage,
+ (vspB,fB,realSpB,hwspB),
+ h_usage))
+ = MkCgState (mkAbsCStmts absC move_instrB) binds new_usage
+ where
+ move_instrB = if (newRealSpB == realSpB) then AbsCNop
+ else (CAssign {-PtrKind-}
+ (CReg SpB)
+ (CAddr (SpBRel realSpB newRealSpB)))
+ new_usage = (a_usage,
+ (vspB, fB, newRealSpB, hwspB),
+ h_usage)
+
+adjustRealSps :: VirtualSpAOffset -- New offset for Arg stack ptr
+ -> VirtualSpBOffset -- Ditto B stack
+ -> Code
+adjustRealSps newRealSpA newRealSpB
+ = adjustRealSpA newRealSpA `thenC` adjustRealSpB newRealSpB
+\end{code}
diff --git a/ghc/compiler/codeGen/CgTailCall.hi b/ghc/compiler/codeGen/CgTailCall.hi
new file mode 100644
index 0000000000..fe77b1f72b
--- /dev/null
+++ b/ghc/compiler/codeGen/CgTailCall.hi
@@ -0,0 +1,44 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgTailCall where
+import AbsCSyn(AbstractC, CAddrMode, CExprMacro, MagicId, RegRelative)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, CompilationInfo, EndOfBlockInfo, Sequel, StubFlag)
+import Class(Class)
+import ClosureInfo(LambdaFormInfo)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import NameTypes(FullName)
+import PreludePS(_PackedString)
+import PrimKind(PrimKind)
+import StgSyn(StgAtom)
+import TyCon(TyCon)
+import TyVar(TyVarTemplate)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data CAddrMode {-# GHC_PRAGMA CVal RegRelative PrimKind | CAddr RegRelative | CReg MagicId | CTableEntry CAddrMode CAddrMode PrimKind | CTemp Unique PrimKind | CLbl CLabel PrimKind | CUnVecLbl CLabel CLabel | CCharLike CAddrMode | CIntLike CAddrMode | CString _PackedString | CLit BasicLit | CLitLit _PackedString PrimKind | COffset HeapOffset | CCode AbstractC | CLabelledCode CLabel AbstractC | CJoinPoint Int Int | CMacroExpr PrimKind CExprMacro [CAddrMode] | CCostCentre CostCentre Bool #-}
+data CgInfoDownwards {-# GHC_PRAGMA MkCgInfoDown CompilationInfo (UniqFM CgIdInfo) EndOfBlockInfo #-}
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data HeapOffset
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data Labda a {-# GHC_PRAGMA Hamna | Ni a #-}
+data StgAtom a {-# GHC_PRAGMA StgVarAtom a | StgLitAtom BasicLit #-}
+data TyCon {-# GHC_PRAGMA SynonymTyCon Unique FullName Int [TyVarTemplate] UniType Bool | DataTyCon Unique FullName Int [TyVarTemplate] [Id] [Class] Bool | TupleTyCon Int | PrimTyCon Unique FullName Int ([PrimKind] -> PrimKind) | SpecTyCon TyCon [Labda UniType] #-}
+cgTailCall :: StgAtom Id -> [StgAtom Id] -> UniqFM Id -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 12222 _N_ _S_ "SSL" _N_ _N_ #-}
+mkDynamicAlgReturnCode :: TyCon -> CAddrMode -> Sequel -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 12222 _N_ _S_ "SLS" _N_ _N_ #-}
+mkPrimReturnCode :: Sequel -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _S_ "SLL" _N_ _N_ #-}
+mkStaticAlgReturnCode :: Id -> Labda CLabel -> Sequel -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 21222 _N_ _S_ "LLS" _N_ _N_ #-}
+performReturn :: AbstractC -> (Sequel -> CgInfoDownwards -> CgState -> CgState) -> UniqFM Id -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 5 _U_ 21221 _N_ _S_ "LSLU(LLU(LLL))L" _N_ _N_ #-}
+tailCallBusiness :: Id -> CAddrMode -> LambdaFormInfo -> [CAddrMode] -> UniqFM Id -> AbstractC -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 6 _U_ 22222222 _N_ _S_ "LSLLLL" _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgTailCall.lhs b/ghc/compiler/codeGen/CgTailCall.lhs
new file mode 100644
index 0000000000..a292b04525
--- /dev/null
+++ b/ghc/compiler/codeGen/CgTailCall.lhs
@@ -0,0 +1,548 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+%********************************************************
+%* *
+\section[CgTailCall]{Tail calls: converting @StgApps@}
+%* *
+%********************************************************
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgTailCall (
+ cgTailCall,
+ performReturn,
+ mkStaticAlgReturnCode, mkDynamicAlgReturnCode,
+ mkPrimReturnCode,
+
+ tailCallBusiness,
+
+ -- and to make the interface self-sufficient...
+ StgAtom, Id, CgState, CAddrMode, TyCon,
+ CgInfoDownwards, HeapOffset, Maybe
+ ) where
+
+IMPORT_Trace
+import Pretty -- Pretty/Outputable: rm (debugging only) ToDo
+import Outputable
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import AbsUniType ( isPrimType, UniType )
+import CgBindery ( getAtomAmodes, getCAddrMode, getCAddrModeAndInfo )
+import CgCompInfo ( oTHER_TAG, iND_TAG )
+import CgRetConv ( dataReturnConvPrim, ctrlReturnConvAlg, dataReturnConvAlg,
+ mkLiveRegsBitMask,
+ CtrlReturnConvention(..), DataReturnConvention(..)
+ )
+import CgStackery ( adjustRealSps, mkStkAmodes )
+import CgUsages ( getSpARelOffset, getSpBRelOffset )
+import CLabelInfo ( CLabel, mkStdUpdCodePtrVecLabel, mkConUpdCodePtrVecLabel )
+import ClosureInfo ( nodeMustPointToIt, getEntryConvention, EntryConvention(..) )
+import CmdLineOpts ( GlobalSwitch(..) )
+import Id ( getDataConTyCon, getDataConTag,
+ getIdUniType, getIdKind, fIRST_TAG, Id,
+ ConTag(..)
+ )
+import Maybes ( assocMaybe, maybeToBool, Maybe(..) )
+import PrimKind ( retKindSize )
+import Util
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[tailcall-doc]{Documentation}
+%* *
+%************************************************************************
+
+\begin{code}
+cgTailCall :: PlainStgAtom -> [PlainStgAtom] -> PlainStgLiveVars -> Code
+\end{code}
+
+Here's the code we generate for a tail call. (NB there may be no
+arguments, in which case this boils down to just entering a variable.)
+
+\begin{itemize}
+\item Adjust the stack ptr to \tr{tailSp + #args}.
+\item Put args in the top locations of the resulting stack.
+\item Make Node point to the function closure.
+\item Enter the function closure.
+\end{itemize}
+
+Things to be careful about:
+\begin{itemize}
+\item Don't overwrite stack locations before you have finished with
+ them (remember you need the function and the as-yet-unmoved
+ arguments).
+\item Preferably, generate no code to replace x by x on the stack (a
+ common situation in tail-recursion).
+\item Adjust the stack high water mark appropriately.
+\end{itemize}
+
+Literals are similar to constructors; they return by putting
+themselves in an appropriate register and returning to the address on
+top of the B stack.
+
+\begin{code}
+cgTailCall (StgLitAtom lit) [] live_vars
+ = performPrimReturn (CLit lit) live_vars
+\end{code}
+
+Treat unboxed locals exactly like literals (above) except use the addr
+mode for the local instead of (CLit lit) in the assignment.
+
+Case for unboxed @Ids@ first:
+\begin{code}
+cgTailCall atom@(StgVarAtom fun) [] live_vars
+ | isPrimType (getIdUniType fun)
+ = getCAddrMode fun `thenFC` \ amode ->
+ performPrimReturn amode live_vars
+\end{code}
+
+The general case (@fun@ is boxed):
+\begin{code}
+cgTailCall (StgVarAtom fun) args live_vars = performTailCall fun args live_vars
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[return-and-tail-call]{Return and tail call}
+%* *
+%************************************************************************
+
+ADR-HACK
+
+ A quick bit of hacking to try to solve my void#-leaking blues...
+
+ I think I'm getting bitten by this stuff because code like
+
+ \begin{pseudocode}
+ case ds.s12 :: IoWorld of {
+ -- lvs: [ds.s12]; rhs lvs: []; uniq: c0
+ IoWorld ds.s13# -> ds.s13#;
+ } :: Universe#
+ \end{pseudocode}
+
+ causes me to try to allocate a register to return the result in. The
+ hope is that the following will avoid such problems (and that Will
+ will do this in a cleaner way when he hits the same problem).
+
+KCAH-RDA
+
+\begin{code}
+performPrimReturn :: CAddrMode -- The thing to return
+ -> PlainStgLiveVars
+ -> Code
+
+performPrimReturn amode live_vars
+ = let
+ kind = getAmodeKind amode
+ ret_reg = dataReturnConvPrim kind
+
+ assign_possibly = case kind of
+ VoidKind -> AbsCNop
+ kind -> (CAssign (CReg ret_reg) amode)
+ in
+ performReturn assign_possibly mkPrimReturnCode live_vars
+
+mkPrimReturnCode :: Sequel -> Code
+--UNUSED:mkPrimReturnCode RestoreCostCentre = panic "mkPrimReturnCode: RCC"
+mkPrimReturnCode (UpdateCode _) = panic "mkPrimReturnCode: Upd"
+mkPrimReturnCode sequel = sequelToAmode sequel `thenFC` \ dest_amode ->
+ absC (CReturn dest_amode DirectReturn)
+ -- Direct, no vectoring
+
+-- All constructor arguments in registers; Node and InfoPtr are set.
+-- All that remains is
+-- (a) to set TagReg, if necessary
+-- (b) to set InfoPtr to the info ptr, if necessary
+-- (c) to do the right sort of jump.
+
+mkStaticAlgReturnCode :: Id -- The constructor
+ -> Maybe CLabel -- The info ptr, if it isn't already set
+ -> Sequel -- where to return to
+ -> Code
+
+mkStaticAlgReturnCode con maybe_info_lbl sequel
+ = -- Generate profiling code if necessary
+ (case return_convention of
+ VectoredReturn _ -> profCtrC SLIT("VEC_RETURN") []
+ other -> nopC
+ ) `thenC`
+
+ -- Set tag if necessary
+ -- This is done by a macro, because if we are short of registers
+ -- we don't set TagReg; instead the continuation gets the tag
+ -- by indexing off the info ptr
+ (case return_convention of
+
+ UnvectoredReturn no_of_constrs
+ | no_of_constrs > 1
+ -> absC (CMacroStmt SET_TAG [mkIntCLit zero_indexed_tag])
+
+ other -> nopC
+ ) `thenC`
+
+ -- Generate the right jump or return
+ (case sequel of
+ UpdateCode _ -> -- Ha! We know the constructor,
+ -- so we can go direct to the correct
+ -- update code for that constructor
+
+ -- Set the info pointer, and jump
+ set_info_ptr `thenC`
+ absC (CJump (CLbl update_label CodePtrKind))
+
+ CaseAlts _ (Just (alts, _)) -> -- Ho! We know the constructor so
+ -- we can go right to the alternative
+
+ -- No need to set info ptr when returning to a
+ -- known join point. After all, the code at
+ -- the destination knows what constructor it
+ -- is going to handle.
+
+ case assocMaybe alts tag of
+ Just (alt_absC, join_lbl) -> absC (CJump (CLbl join_lbl CodePtrKind))
+ Nothing -> panic "mkStaticAlgReturnCode: default"
+ -- The Nothing case should never happen; it's the subject
+ -- of a wad of special-case code in cgReturnCon
+
+ other -> -- OnStack, or (CaseAlts) ret_amode Nothing)
+ -- Set the info pointer, and jump
+ set_info_ptr `thenC`
+ sequelToAmode sequel `thenFC` \ ret_amode ->
+ absC (CReturn ret_amode return_info)
+ )
+
+ where
+ tag = getDataConTag con
+ tycon = getDataConTyCon con
+ return_convention = ctrlReturnConvAlg tycon
+ zero_indexed_tag = tag - fIRST_TAG -- Adjust tag to be zero-indexed
+ -- cf AbsCFuns.mkAlgAltsCSwitch
+
+ update_label = case dataReturnConvAlg con of
+ ReturnInHeap -> mkStdUpdCodePtrVecLabel tycon tag
+ ReturnInRegs _ -> mkConUpdCodePtrVecLabel tycon tag
+
+ return_info = case return_convention of
+ UnvectoredReturn _ -> DirectReturn
+ VectoredReturn _ -> StaticVectoredReturn zero_indexed_tag
+
+ set_info_ptr = case maybe_info_lbl of
+ Nothing -> nopC
+ Just info_lbl -> absC (CAssign (CReg infoptr) (CLbl info_lbl DataPtrKind))
+
+
+mkDynamicAlgReturnCode :: TyCon -> CAddrMode -> Sequel -> Code
+
+mkDynamicAlgReturnCode tycon dyn_tag sequel
+ = case ctrlReturnConvAlg tycon of
+ VectoredReturn _ ->
+
+ profCtrC SLIT("VEC_RETURN") [] `thenC`
+ sequelToAmode sequel `thenFC` \ ret_addr ->
+ absC (CReturn ret_addr (DynamicVectoredReturn dyn_tag))
+
+ UnvectoredReturn no_of_constrs ->
+
+ -- Set tag if necessary
+ -- This is done by a macro, because if we are short of registers
+ -- we don't set TagReg; instead the continuation gets the tag
+ -- by indexing off the info ptr
+ (if no_of_constrs > 1 then
+ absC (CMacroStmt SET_TAG [dyn_tag])
+ else
+ nopC
+ ) `thenC`
+
+
+ sequelToAmode sequel `thenFC` \ ret_addr ->
+ -- Generate the right jump or return
+ absC (CReturn ret_addr DirectReturn)
+\end{code}
+
+\begin{code}
+performReturn :: AbstractC -- Simultaneous assignments to perform
+ -> (Sequel -> Code) -- The code to execute to actually do
+ -- the return, given an addressing mode
+ -- for the return address
+ -> PlainStgLiveVars
+ -> Code
+
+performReturn sim_assts finish_code live_vars
+ = getEndOfBlockInfo `thenFC` \ (EndOfBlockInfo args_spa args_spb sequel) ->
+
+ -- Do the simultaneous assignments,
+ doSimAssts args_spa live_vars {-UNUSED:live_regs-} sim_assts `thenC`
+
+ -- Adjust stack pointers
+ adjustRealSps args_spa args_spb `thenC`
+
+ -- Do the return
+ finish_code sequel -- "sequel" is `robust' in that it doesn't
+ -- depend on stk-ptr values
+-- where
+--UNUSED: live_regs = getDestinationRegs sim_assts
+ -- ToDo: this is a *really* boring way to compute the
+ -- live-reg set!
+\end{code}
+
+\begin{code}
+performTailCall :: Id -- Function
+ -> [PlainStgAtom] -- Args
+ -> PlainStgLiveVars
+ -> Code
+
+performTailCall fun args live_vars
+ = -- Get all the info we have about the function and args and go on to
+ -- the business end
+ getCAddrModeAndInfo fun `thenFC` \ (fun_amode, lf_info) ->
+ getAtomAmodes args `thenFC` \ arg_amodes ->
+
+ tailCallBusiness
+ fun fun_amode lf_info arg_amodes
+ live_vars AbsCNop {- No pending assignments -}
+
+
+tailCallBusiness :: Id -> CAddrMode -- Function and its amode
+ -> LambdaFormInfo -- Info about the function
+ -> [CAddrMode] -- Arguments
+ -> PlainStgLiveVars -- Live in continuation
+
+ -> AbstractC -- Pending simultaneous assignments
+ -- *** GUARANTEED to contain only stack assignments.
+ -- In ptic, we don't need to look in here to
+ -- discover all live regs
+
+ -> Code
+
+tailCallBusiness fun fun_amode lf_info arg_amodes live_vars pending_assts
+ = profCtrC SLIT("SET_ACTIVITY") [CLitLit SLIT("ACT_TAILCALL") IntKind] `thenC`
+
+ isSwitchSetC EmitArityChecks `thenFC` \ do_arity_chks ->
+
+ nodeMustPointToIt lf_info `thenFC` \ node_points ->
+ getEntryConvention fun lf_info
+ (map getAmodeKind arg_amodes) `thenFC` \ entry_conv ->
+
+ getEndOfBlockInfo `thenFC` \ (EndOfBlockInfo args_spa args_spb sequel) ->
+
+ let
+ node_asst
+ = if node_points then
+ CAssign (CReg node) fun_amode
+ else
+ AbsCNop
+
+ (arg_regs, finish_code)
+ = case entry_conv of
+ ViaNode ->
+ ([],
+ mkAbstractCs [
+ CCallProfCtrMacro SLIT("ENT_VIA_NODE") [],
+ CAssign (CReg infoptr)
+
+ (CMacroExpr DataPtrKind INFO_PTR [CReg node]),
+ CJump (CMacroExpr CodePtrKind ENTRY_CODE [CReg infoptr])
+ ])
+ StdEntry lbl Nothing -> ([], CJump (CLbl lbl CodePtrKind))
+ StdEntry lbl (Just itbl) -> ([], CAssign (CReg infoptr) (CLbl itbl DataPtrKind)
+ `mkAbsCStmts`
+ CJump (CLbl lbl CodePtrKind))
+ DirectEntry lbl arity regs ->
+ (regs, (if do_arity_chks
+ then CMacroStmt SET_ARITY [mkIntCLit arity]
+ else AbsCNop)
+ `mkAbsCStmts` CJump (CLbl lbl CodePtrKind))
+
+ no_of_args = length arg_amodes
+
+{- UNUSED: live_regs = if node_points then
+ node : arg_regs
+ else
+ arg_regs
+-}
+ (reg_arg_assts, stk_arg_amodes)
+ = (mkAbstractCs (zipWith assign_to_reg arg_regs arg_amodes),
+ drop (length arg_regs) arg_amodes) -- No regs, or
+ -- args beyond arity
+
+ assign_to_reg reg_id amode = CAssign (CReg reg_id) amode
+
+ in
+ case fun_amode of
+ CJoinPoint join_spa join_spb -> -- Ha! A let-no-escape thingy
+
+ ASSERT(not (args_spa > join_spa) || (args_spb > join_spb))
+ -- If ASSERTion fails: Oops: the join point has *lower*
+ -- stack ptrs than the continuation Note that we take
+ -- the SpB point without the return address here. The
+ -- return address is put on by the let-no-escapey thing
+ -- when it finishes.
+
+ mkStkAmodes join_spa join_spb stk_arg_amodes
+ `thenFC` \ (final_spa, final_spb, stk_arg_assts) ->
+
+ -- Do the simultaneous assignments,
+ doSimAssts join_spa live_vars {-UNUSED: live_regs-}
+ (mkAbstractCs [pending_assts, reg_arg_assts, stk_arg_assts])
+ `thenC`
+
+ -- Adjust stack ptrs
+ adjustRealSps final_spa final_spb `thenC`
+
+ -- Jump to join point
+ absC finish_code
+
+ _ -> -- else: not a let-no-escape (the common case)
+
+ -- Make instruction to save return address
+ loadRetAddrIntoRetReg sequel `thenFC` \ ret_asst ->
+
+ mkStkAmodes args_spa args_spb stk_arg_amodes
+ `thenFC`
+ \ (final_spa, final_spb, stk_arg_assts) ->
+
+ -- The B-stack space for the pushed return addess, with any args pushed
+ -- on top, is recorded in final_spb.
+
+ -- Do the simultaneous assignments,
+ doSimAssts args_spa live_vars {-UNUSED: live_regs-}
+ (mkAbstractCs [pending_assts, node_asst, ret_asst,
+ reg_arg_assts, stk_arg_assts])
+ `thenC`
+
+ -- Final adjustment of stack pointers
+ adjustRealSps final_spa final_spb `thenC`
+
+ -- Now decide about semi-tagging
+ isSwitchSetC DoSemiTagging `thenFC` \ semi_tagging_on ->
+ case (semi_tagging_on, arg_amodes, node_points, sequel) of
+
+ --
+ -- *************** The semi-tagging case ***************
+ --
+ ( True, [], True, CaseAlts _ (Just (st_alts, maybe_deflt_join_details))) ->
+
+ -- Whoppee! Semi-tagging rules OK!
+ -- (a) semi-tagging is switched on
+ -- (b) there are no arguments,
+ -- (c) Node points to the closure
+ -- (d) we have a case-alternative sequel with
+ -- some visible alternatives
+
+ -- Why is test (c) necessary?
+ -- Usually Node will point to it at this point, because we're
+ -- scrutinsing something which is either a thunk or a
+ -- constructor.
+ -- But not always! The example I came across is when we have
+ -- a top-level Double:
+ -- lit.3 = D# 3.000
+ -- ... (case lit.3 of ...) ...
+ -- Here, lit.3 is built as a re-entrant thing, which you must enter.
+ -- (OK, the simplifier should have eliminated this, but it's
+ -- easy to deal with the case anyway.)
+
+
+ let
+ join_details_to_code (load_regs_and_profiling_code, join_lbl)
+ = load_regs_and_profiling_code `mkAbsCStmts`
+ CJump (CLbl join_lbl CodePtrKind)
+
+ semi_tagged_alts = [ (mkMachInt (toInteger (tag - fIRST_TAG)),
+ join_details_to_code join_details)
+ | (tag, join_details) <- st_alts
+ ]
+
+ -- This alternative is for the unevaluated case; oTHER_TAG is -1
+ un_evald_alt = (mkMachInt oTHER_TAG, enter_jump)
+
+ enter_jump = CJump (CMacroExpr CodePtrKind ENTRY_CODE [CReg infoptr])
+ -- Enter Node (we know infoptr will have the info ptr in it)!
+
+ in
+
+ -- Final switch
+ absC (mkAbstractCs [
+ CAssign (CReg infoptr)
+ (CVal (NodeRel zeroOff) DataPtrKind),
+
+ case maybe_deflt_join_details of
+ Nothing ->
+ CSwitch (CMacroExpr IntKind INFO_TAG [CReg infoptr])
+ (semi_tagged_alts)
+ (enter_jump)
+ Just (_, details) ->
+ CSwitch (CMacroExpr IntKind EVAL_TAG [CReg infoptr])
+ [(mkMachInt 0, enter_jump)]
+ (CSwitch
+ (CMacroExpr IntKind INFO_TAG [CReg infoptr])
+ (semi_tagged_alts)
+ (join_details_to_code details))
+ ])
+
+ --
+ -- *************** The non-semi-tagging case ***************
+ --
+ other -> absC finish_code
+\end{code}
+
+\begin{code}
+loadRetAddrIntoRetReg :: Sequel -> FCode AbstractC
+
+loadRetAddrIntoRetReg InRetReg
+ = returnFC AbsCNop -- Return address already there
+
+loadRetAddrIntoRetReg sequel
+ = sequelToAmode sequel `thenFC` \ amode ->
+ returnFC (CAssign (CReg RetReg) amode)
+
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[doSimAssts]{@doSimAssts@}
+%* *
+%************************************************************************
+
+@doSimAssts@ happens at the end of every block of code.
+They are separate because we sometimes do some jiggery-pokery in between.
+
+\begin{code}
+doSimAssts :: VirtualSpAOffset -- tail_spa: SpA as seen by continuation
+ -> PlainStgLiveVars -- Live in continuation
+--UNUSED: -> [MagicId] -- Live regs (ptrs and non-ptrs)
+ -> AbstractC
+ -> Code
+
+doSimAssts tail_spa live_vars {-UNUSED: live_regs-} sim_assts
+ = -- Do the simultaneous assignments
+ absC (CSimultaneous sim_assts) `thenC`
+
+ -- Stub any unstubbed slots; the only live variables are indicated in
+ -- the end-of-block info in the monad
+ nukeDeadBindings live_vars `thenC`
+ getUnstubbedAStackSlots tail_spa `thenFC` \ a_slots ->
+ -- Passing in tail_spa here should actually be redundant, because
+ -- the stack should be trimmed (by nukeDeadBindings) to
+ -- exactly the tail_spa position anyhow.
+
+ -- Emit code to stub dead regs; this only generates actual
+ -- machine instructions in in the DEBUG version
+ -- *** NOT DONE YET ***
+
+ (if (null a_slots)
+ then nopC
+ else profCtrC SLIT("A_STK_STUB") [mkIntCLit (length a_slots)] `thenC`
+ mapCs stub_A_slot a_slots
+ )
+ where
+ stub_A_slot :: VirtualSpAOffset -> Code
+ stub_A_slot offset = getSpARelOffset offset `thenFC` \ spa_rel ->
+ absC (CAssign (CVal spa_rel PtrKind)
+ (CReg StkStubReg))
+\end{code}
diff --git a/ghc/compiler/codeGen/CgUpdate.hi b/ghc/compiler/codeGen/CgUpdate.hi
new file mode 100644
index 0000000000..0ff61fa02a
--- /dev/null
+++ b/ghc/compiler/codeGen/CgUpdate.hi
@@ -0,0 +1,7 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgUpdate where
+import AbsCSyn(CAddrMode)
+import CgMonad(CgInfoDownwards, CgState)
+pushUpdateFrame :: CAddrMode -> CAddrMode -> (CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 5 _U_ 22222 _N_ _S_ "LLSU(U(LL)LU(LLS))U(LLU(LU(LLLL)L))" _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgUpdate.lhs b/ghc/compiler/codeGen/CgUpdate.lhs
new file mode 100644
index 0000000000..40daf3714c
--- /dev/null
+++ b/ghc/compiler/codeGen/CgUpdate.lhs
@@ -0,0 +1,155 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1994
+%
+\section[CgUpdate]{Manipulating update frames}
+
+\begin{code}
+#include "HsVersions.h"
+
+module CgUpdate (
+ pushUpdateFrame -- OLD: , evalPushRCCFrame
+ ) where
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import CgCompInfo ( sTD_UF_SIZE, cON_UF_SIZE,
+ sCC_STD_UF_SIZE, sCC_CON_UF_SIZE,
+ spARelToInt, spBRelToInt
+ )
+import CgStackery ( allocUpdateFrame )
+import CgUsages
+import CmdLineOpts ( GlobalSwitch(..) )
+import Util
+\end{code}
+
+
+%********************************************************
+%* *
+%* Setting up update frames *
+%* *
+%********************************************************
+\subsection[setting-update-frames]{Setting up update frames}
+
+@pushUpdateFrame@ $updatee$ pushes a general update frame which
+points to $updatee$ as the thing to be updated. It is only used
+when a thunk has just been entered, so the (real) stack pointers
+are guaranteed to be nicely aligned with the top of stack.
+@pushUpdateFrame@ adjusts the virtual and tail stack pointers
+to reflect the frame pushed.
+
+\begin{code}
+pushUpdateFrame :: CAddrMode -> CAddrMode -> Code -> Code
+
+pushUpdateFrame updatee vector code
+ = isSwitchSetC SccProfilingOn `thenFC` \ profiling_on ->
+ let
+ -- frame_size *includes* the return address
+ frame_size = if profiling_on
+ then sCC_STD_UF_SIZE
+ else sTD_UF_SIZE
+ in
+ getEndOfBlockInfo `thenFC` \ eob_info ->
+ ASSERT(case eob_info of { EndOfBlockInfo _ _ InRetReg -> True; _ -> False})
+ allocUpdateFrame frame_size vector (\ _ ->
+
+ -- Emit the push macro
+ absC (CMacroStmt PUSH_STD_UPD_FRAME [
+ updatee,
+ int_CLit0, -- Known to be zero because we have just
+ int_CLit0 -- entered a thunk
+ ])
+ `thenC` code
+ )
+
+int_CLit0 = mkIntCLit 0 -- out here to avoid pushUpdateFrame CAF (sigh)
+
+{- ---------------------
+ What actually happens is something like this; but it got macro-ised
+
+ = pushOnBStack (CReg CurCostCentre) `thenFC` \ _ ->
+ pushOnBStack (CReg SuA) `thenFC` \ _ ->
+ pushOnBStack (CReg SuB) `thenFC` \ _ ->
+ pushOnBStack updatee `thenFC` \ _ ->
+ pushOnBStack (CLabel sTD_UPD_RET_VEC_LABEL CodePtrKind) `thenFC` \ _ ->
+
+ -- MAKE SuA, SuB POINT TO TOP OF A,B STACKS
+ -- Remember, SpB hasn't yet been incremented to account for the
+ -- 4-word update frame which has been pushed.
+ -- This code seems crude, but effective...
+ absC (AbsCStmts (CAssign (CReg SuA) (CReg SpA))
+ (CAssign (CReg SuB) (CAddr (SpBRel 0 4))))
+-------------------------- -}
+\end{code}
+
+@evalPushRCCFrame@ pushes a frame to restore the cost centre, and
+deallocates stuff from the A and B stack if evaluation profiling. No
+actual update is required so no closure to update is passed.
+@evalPushRCCFrame@ is called for an @scc@ expression and on entry to a
+single-entry thunk: no update reqd but cost centre manipulation is.
+
+\begin{code}
+{- OLD: WDP: 94/06
+
+evalPushRCCFrame :: Bool -> Code -> Code
+
+evalPushRCCFrame prim code
+ = isSwitchSetC SccProfiling_Eval `thenFC` \ eval_profiling ->
+
+ if (not eval_profiling) then
+ code
+ else
+
+ -- Find out how many words of stack must will be
+ -- deallocated at the end of the basic block
+ -- As we push stuff onto the B stack we must make the
+ -- RCC frame dealocate the B stack words
+
+ -- We dont actually push things onto the A stack so we
+ -- can treat the A stack as if these words were not there
+ -- i.e. we subtract them from the A stack offset
+ -- They will be popped by the current block of code
+
+ -- Tell downstream code about the update frame on the B stack
+ allocUpdateFrame
+ sCC_RCC_UF_SIZE
+ (panic "pushEvalRCCFrame: mkRestoreCostCentreLbl")
+ (\ (old_args_spa, old_args_spb, upd_frame_offset) ->
+
+ getSpARelOffset old_args_spa `thenFC` \ old_args_spa_rel ->
+ getSpBRelOffset upd_frame_offset `thenFC` \ upd_frame_rel ->
+
+ let b_wds_to_pop = upd_frame_offset - old_args_spb
+ in
+
+ -- Allocate enough space on the B stack for the frame
+
+ evalCostCentreC
+ (if prim then
+ "PUSH_RCC_FRAME_RETURN"
+ else
+ "PUSH_RCC_FRAME_VECTOR")
+ [
+ mkIntCLit (spARelToInt old_args_spa_rel),
+ {- Place on A stack to ``draw the line'' -}
+ mkIntCLit (spBRelToInt upd_frame_rel),
+ {- Ditto B stk. The update frame is pushed starting
+ just above here -}
+ mkIntCLit 0,
+ {- Number of words of A below the line, which must be
+ popped to get to the tail-call position -}
+ mkIntCLit b_wds_to_pop
+ {- Ditto B stk -}
+ ] `thenC`
+
+ code
+
+
+ -- If we actually pushed things onto the A stack we have
+ -- to arrange for the RCC frame to pop these as well
+ -- Would need to tell downstream code about the update frame
+ -- both the A and B stacks
+ )
+-}
+\end{code}
diff --git a/ghc/compiler/codeGen/CgUsages.hi b/ghc/compiler/codeGen/CgUsages.hi
new file mode 100644
index 0000000000..0a1ecaf7ca
--- /dev/null
+++ b/ghc/compiler/codeGen/CgUsages.hi
@@ -0,0 +1,39 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CgUsages where
+import AbsCSyn(AbstractC, CAddrMode, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import CgBindery(CgIdInfo)
+import CgMonad(CgInfoDownwards, CgState, StubFlag)
+import ClosureInfo(ClosureInfo)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Maybes(Labda)
+import PreludePS(_PackedString)
+import PrimOps(PrimOp)
+import UniqFM(UniqFM)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+data RegRelative {-# GHC_PRAGMA HpRel HeapOffset HeapOffset | SpARel Int Int | SpBRel Int Int | NodeRel HeapOffset #-}
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data HeapOffset
+freeBStkSlot :: Int -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(LU(LLLL)L))" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getHpRelOffset :: HeapOffset -> CgInfoDownwards -> CgState -> (RegRelative, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(LLU(LL)))" {_A_ 4 _U_ 2222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getSpARelOffset :: Int -> CgInfoDownwards -> CgState -> (RegRelative, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(U(LLLL)LL))" {_A_ 4 _U_ 2222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getSpBRelOffset :: Int -> CgInfoDownwards -> CgState -> (RegRelative, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(LU(LLLL)L))" {_A_ 4 _U_ 2222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+getVirtAndRealHp :: CgInfoDownwards -> CgState -> ((HeapOffset, HeapOffset), CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 01 _N_ _S_ "AU(LLU(LLU(LL)))" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _F_ _IF_ARGS_ 0 2 XC 6 \ (u0 :: CgInfoDownwards) (u1 :: CgState) -> case u1 of { _ALG_ _ORIG_ CgMonad MkCgState (u2 :: AbstractC) (u3 :: UniqFM CgIdInfo) (u4 :: ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset))) -> case u4 of { _ALG_ _TUP_3 (u5 :: (Int, [(Int, StubFlag)], Int, Int)) (u6 :: (Int, [Int], Int, Int)) (u7 :: (HeapOffset, HeapOffset)) -> case u7 of { _ALG_ _TUP_2 (u8 :: HeapOffset) (u9 :: HeapOffset) -> _!_ _TUP_2 [(HeapOffset, HeapOffset), CgState] [u7, u1]; _NO_DEFLT_ }; _NO_DEFLT_ }; _NO_DEFLT_ } _N_ #-}
+getVirtSps :: CgInfoDownwards -> CgState -> ((Int, Int), CgState)
+ {-# GHC_PRAGMA _A_ 2 _U_ 01 _N_ _S_ "AU(LLU(U(LLLL)U(LLLL)L))" {_A_ 5 _U_ 22222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+initHeapUsage :: (HeapOffset -> CgInfoDownwards -> CgState -> CgState) -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 221 _N_ _S_ "LLU(LLU(LLL))" {_A_ 5 _U_ 22221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+setRealAndVirtualSps :: Int -> Int -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 4 _U_ 2201 _N_ _S_ "LLAU(LLU(U(ALAA)U(ALAA)L))" {_A_ 5 _U_ 22221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+setRealHp :: HeapOffset -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(LLU(LA)))" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+setVirtHp :: HeapOffset -> CgInfoDownwards -> CgState -> CgState
+ {-# GHC_PRAGMA _A_ 3 _U_ 201 _N_ _S_ "LAU(LLU(LLU(AL)))" {_A_ 4 _U_ 2221 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CgUsages.lhs b/ghc/compiler/codeGen/CgUsages.lhs
new file mode 100644
index 0000000000..41ebe84c6c
--- /dev/null
+++ b/ghc/compiler/codeGen/CgUsages.lhs
@@ -0,0 +1,152 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[CgUsages]{Accessing and modifying stacks and heap usage info}
+
+This module provides the functions to access (\tr{get*} functions) and
+modify (\tr{set*} functions) the stacks and heap usage information.
+
+\begin{code}
+module CgUsages (
+ initHeapUsage, setVirtHp, getVirtAndRealHp, setRealHp,
+ setRealAndVirtualSps,
+
+ getVirtSps,
+
+ getHpRelOffset, getSpARelOffset, getSpBRelOffset,
+--UNUSED: getVirtSpRelOffsets,
+
+ freeBStkSlot,
+
+ -- and to make the interface self-sufficient...
+ AbstractC, HeapOffset, RegRelative, CgState
+ ) where
+
+import AbsCSyn
+import CgMonad
+import Util
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgUsages-heapery]{Monad things for fiddling with heap usage}
+%* *
+%************************************************************************
+
+@initHeapUsage@ applies a function to the amount of heap that it uses.
+It initialises the heap usage to zeros, and passes on an unchanged
+heap usage.
+
+It is usually a prelude to performing a GC check, so everything must
+be in a tidy and consistent state.
+
+\begin{code}
+initHeapUsage :: (VirtualHeapOffset -> Code) -> Code
+
+initHeapUsage fcode info_down (MkCgState absC binds (a_usage, b_usage, heap_usage))
+ = state3
+ where
+ state1 = MkCgState absC binds (a_usage, b_usage, (zeroOff, zeroOff))
+ state2 = fcode (heapHWM heap_usage2) info_down state1
+ (MkCgState absC2 binds2 (a_usage2, b_usage2, heap_usage2)) = state2
+ state3 = MkCgState absC2
+ binds2
+ (a_usage2, b_usage2, heap_usage {- unchanged -})
+\end{code}
+
+\begin{code}
+setVirtHp :: VirtualHeapOffset -> Code
+setVirtHp new_virtHp info_down
+ state@(MkCgState absC binds (a_stk, b_stk, (virtHp, realHp)))
+ = MkCgState absC binds (a_stk, b_stk, (new_virtHp, realHp))
+\end{code}
+
+\begin{code}
+getVirtAndRealHp :: FCode (VirtualHeapOffset, VirtualHeapOffset)
+getVirtAndRealHp info_down state@(MkCgState _ _ (au, bu, (virtHp, realHp)))
+ = ((virtHp, realHp), state)
+\end{code}
+
+\begin{code}
+setRealHp :: VirtualHeapOffset -> Code
+setRealHp realHp info_down (MkCgState absC binds (au, bu, (vHp, _)))
+ = MkCgState absC binds (au, bu, (vHp, realHp))
+\end{code}
+
+\begin{code}
+getHpRelOffset :: VirtualHeapOffset -> FCode RegRelative
+getHpRelOffset virtual_offset info_down state@(MkCgState _ _ (_,_,(_,realHp)))
+ = (HpRel realHp virtual_offset, state)
+\end{code}
+
+The heap high water mark is the larger of virtHp and hwHp. The latter is
+only records the high water marks of forked-off branches, so to find the
+heap high water mark you have to take the max of virtHp and hwHp. Remember,
+virtHp never retreats!
+
+\begin{code}
+heapHWM (virtHp, realHp) = virtHp
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[CgUsages-stackery]{Monad things for fiddling with stack usage}
+%* *
+%************************************************************************
+
+@setRealAndVirtualSps@ sets into the environment the offsets of the
+current position of the real and virtual stack pointers in the current
+stack frame. The high-water mark is set too. It generates no code.
+It is used to initialise things at the beginning of a closure body.
+
+\begin{code}
+setRealAndVirtualSps :: VirtualSpAOffset -- New real SpA
+ -> VirtualSpBOffset -- Ditto B stack
+ -> Code
+
+setRealAndVirtualSps spA spB info_down (MkCgState absC binds
+ ((vspA,fA,realSpA,hwspA),
+ (vspB,fB,realSpB,hwspB),
+ h_usage))
+ = MkCgState absC binds new_usage
+ where
+ new_usage = ((spA, fA, spA, spA),
+ (spB, fB, spB, spB),
+ h_usage)
+\end{code}
+
+\begin{code}
+getVirtSps :: FCode (VirtualSpAOffset,VirtualSpBOffset)
+getVirtSps info_down state@(MkCgState absC binds ((virtSpA,_,_,_), (virtSpB,_,_,_), _))
+ = ((virtSpA,virtSpB), state)
+\end{code}
+
+\begin{code}
+getSpARelOffset :: VirtualSpAOffset -> FCode RegRelative
+getSpARelOffset virtual_offset info_down state@(MkCgState _ _ ((_,_,realSpA,_),_,_))
+ = (SpARel realSpA virtual_offset, state)
+
+getSpBRelOffset :: VirtualSpBOffset -> FCode RegRelative
+getSpBRelOffset virtual_offset info_down state@(MkCgState _ _ (_,(_,_,realSpB,_),_))
+ = (SpBRel realSpB virtual_offset, state)
+\end{code}
+
+
+\begin{code}
+{- UNUSED:
+getVirtSpRelOffsets :: FCode (RegRelative, RegRelative)
+getVirtSpRelOffsets info_down
+ state@(MkCgState absC binds ((virtSpA,_,realSpA,_), (virtSpB,_,realSpB,_), _))
+ = ((SpARel realSpA virtSpA, SpBRel realSpB virtSpB), state)
+-}
+\end{code}
+
+\begin{code}
+freeBStkSlot :: VirtualSpBOffset -> Code
+freeBStkSlot b_slot info_down
+ state@(MkCgState absC binds (spa_usage, (virtSpB,free_b,realSpB,hwSpB), heap_usage))
+ = MkCgState absC binds (spa_usage, (virtSpB,new_free_b,realSpB,hwSpB), heap_usage)
+ where
+ new_free_b = addFreeBSlots free_b [b_slot]
+
+\end{code}
diff --git a/ghc/compiler/codeGen/ClosureInfo.hi b/ghc/compiler/codeGen/ClosureInfo.hi
new file mode 100644
index 0000000000..8914c9fe26
--- /dev/null
+++ b/ghc/compiler/codeGen/ClosureInfo.hi
@@ -0,0 +1,169 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface ClosureInfo where
+import AbsCSyn(AbstractC, CAddrMode, CExprMacro, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel, mkClosureLabel)
+import CgBindery(CgIdInfo, StableLoc, VolatileLoc)
+import CgMonad(CgInfoDownwards, CgState, CompilationInfo, EndOfBlockInfo, FCode(..), StubFlag)
+import Class(Class)
+import CmdLineOpts(GlobalSwitch)
+import CostCentre(CostCentre)
+import HeapOffs(HeapOffset)
+import Id(DataCon(..), Id, IdDetails)
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import NameTypes(FullName)
+import PreludePS(_PackedString)
+import PrimKind(PrimKind)
+import PrimOps(PrimOp)
+import SMRep(SMRep, SMSpecRepKind, SMUpdateKind, getSMInfoStr, getSMInitHdrStr, getSMUpdInplaceHdrStr, ltSMRepHdr)
+import StgSyn(PlainStgAtom(..), PlainStgExpr(..), PlainStgLiveVars(..), StgAtom, StgBinderInfo, StgBinding, StgCaseAlternatives, StgExpr, UpdateFlag(..))
+import TyCon(TyCon)
+import TyVar(TyVarTemplate)
+import UniTyFuns(getUniDataSpecTyCon_maybe)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import UniqSet(UniqSet(..))
+import Unique(Unique)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+data CAddrMode {-# GHC_PRAGMA CVal RegRelative PrimKind | CAddr RegRelative | CReg MagicId | CTableEntry CAddrMode CAddrMode PrimKind | CTemp Unique PrimKind | CLbl CLabel PrimKind | CUnVecLbl CLabel CLabel | CCharLike CAddrMode | CIntLike CAddrMode | CString _PackedString | CLit BasicLit | CLitLit _PackedString PrimKind | COffset HeapOffset | CCode AbstractC | CLabelledCode CLabel AbstractC | CJoinPoint Int Int | CMacroExpr PrimKind CExprMacro [CAddrMode] | CCostCentre CostCentre Bool #-}
+data MagicId {-# GHC_PRAGMA BaseReg | StkOReg | VanillaReg PrimKind Int# | FloatReg Int# | DoubleReg Int# | TagReg | RetReg | SpA | SuA | SpB | SuB | Hp | HpLim | LivenessReg | ActivityReg | StdUpdRetVecReg | StkStubReg | CurCostCentre | VoidReg #-}
+data CLabel
+data CgIdInfo {-# GHC_PRAGMA MkCgIdInfo Id VolatileLoc StableLoc LambdaFormInfo #-}
+data CgInfoDownwards {-# GHC_PRAGMA MkCgInfoDown CompilationInfo (UniqFM CgIdInfo) EndOfBlockInfo #-}
+data CgState {-# GHC_PRAGMA MkCgState AbstractC (UniqFM CgIdInfo) ((Int, [(Int, StubFlag)], Int, Int), (Int, [Int], Int, Int), (HeapOffset, HeapOffset)) #-}
+data ClosureInfo {-# GHC_PRAGMA MkClosureInfo Id LambdaFormInfo SMRep #-}
+data CompilationInfo {-# GHC_PRAGMA MkCompInfo (GlobalSwitch -> Bool) _PackedString #-}
+data EntryConvention = ViaNode | StdEntry CLabel (Labda CLabel) | DirectEntry CLabel Int [MagicId]
+type FCode a = CgInfoDownwards -> CgState -> (a, CgState)
+data HeapOffset
+type DataCon = Id
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data Labda a {-# GHC_PRAGMA Hamna | Ni a #-}
+data LambdaFormInfo {-# GHC_PRAGMA LFReEntrant Bool Int Bool | LFCon Id Bool | LFTuple Id Bool | LFThunk Bool Bool Bool StandardFormInfo | LFArgument | LFImported | LFLetNoEscape Int (UniqFM Id) | LFBlackHole | LFIndirection #-}
+data PrimKind {-# GHC_PRAGMA PtrKind | CodePtrKind | DataPtrKind | RetKind | InfoPtrKind | CostCentreKind | CharKind | IntKind | WordKind | AddrKind | FloatKind | DoubleKind | MallocPtrKind | StablePtrKind | ArrayKind | ByteArrayKind | VoidKind #-}
+data SMRep {-# GHC_PRAGMA StaticRep Int Int | SpecialisedRep SMSpecRepKind Int Int SMUpdateKind | GenericRep Int Int SMUpdateKind | BigTupleRep Int | DataRep Int | DynamicRep | BlackHoleRep | PhantomRep | MuTupleRep Int #-}
+type PlainStgAtom = StgAtom Id
+type PlainStgExpr = StgExpr Id Id
+type PlainStgLiveVars = UniqFM Id
+data StandardFormInfo {-# GHC_PRAGMA NonStandardThunk | SelectorThunk Id Id Int | VapThunk Id [StgAtom Id] Bool #-}
+data StgAtom a {-# GHC_PRAGMA StgVarAtom a | StgLitAtom BasicLit #-}
+data StgBinderInfo {-# GHC_PRAGMA NoStgBinderInfo | StgBinderInfo Bool Bool Bool Bool Bool #-}
+data StgExpr a b {-# GHC_PRAGMA StgApp (StgAtom b) [StgAtom b] (UniqFM b) | StgConApp Id [StgAtom b] (UniqFM b) | StgPrimApp PrimOp [StgAtom b] (UniqFM b) | StgCase (StgExpr a b) (UniqFM b) (UniqFM b) Unique (StgCaseAlternatives a b) | StgLet (StgBinding a b) (StgExpr a b) | StgLetNoEscape (UniqFM b) (UniqFM b) (StgBinding a b) (StgExpr a b) | StgSCC UniType CostCentre (StgExpr a b) #-}
+data UpdateFlag = ReEntrant | Updatable | SingleEntry
+data TyCon {-# GHC_PRAGMA SynonymTyCon Unique FullName Int [TyVarTemplate] UniType Bool | DataTyCon Unique FullName Int [TyVarTemplate] [Id] [Class] Bool | TupleTyCon Int | PrimTyCon Unique FullName Int ([PrimKind] -> PrimKind) | SpecTyCon TyCon [Labda UniType] #-}
+data UniqFM a {-# GHC_PRAGMA EmptyUFM | LeafUFM Int# a | NodeUFM Int# Int# (UniqFM a) (UniqFM a) #-}
+type UniqSet a = UniqFM a
+allocProfilingMsg :: ClosureInfo -> _PackedString
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ASA)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+blackHoleClosureInfo :: ClosureInfo -> ClosureInfo
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LAA)" {_A_ 1 _U_ 2 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+blackHoleOnEntry :: Bool -> ClosureInfo -> Bool
+ {-# GHC_PRAGMA _A_ 2 _U_ 11 _N_ _S_ "LU(ALS)" {_A_ 3 _U_ 111 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureGoodStuffSize :: ClosureInfo -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(AAS)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureHdrSize :: ClosureInfo -> HeapOffset
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(AAS)" {_A_ 1 _U_ 2 _N_ _N_ _F_ _IF_ARGS_ 0 0 X 1 _ORIG_ HeapOffs totHdrSize _N_} _F_ _IF_ARGS_ 0 1 C 3 \ (u0 :: ClosureInfo) -> case u0 of { _ALG_ _ORIG_ ClosureInfo MkClosureInfo (u1 :: Id) (u2 :: LambdaFormInfo) (u3 :: SMRep) -> _APP_ _ORIG_ HeapOffs totHdrSize [ u3 ]; _NO_DEFLT_ } _N_ #-}
+closureId :: ClosureInfo -> Id
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(U(LLLL)AA)" {_A_ 4 _U_ 2222 _N_ _N_ _F_ _IF_ARGS_ 0 4 XXXX 5 \ (u0 :: Unique) (u1 :: UniType) (u2 :: IdInfo) (u3 :: IdDetails) -> _!_ _ORIG_ Id Id [] [u0, u1, u2, u3] _N_} _F_ _IF_ARGS_ 0 1 C 2 \ (u0 :: ClosureInfo) -> case u0 of { _ALG_ _ORIG_ ClosureInfo MkClosureInfo (u1 :: Id) (u2 :: LambdaFormInfo) (u3 :: SMRep) -> u1; _NO_DEFLT_ } _N_ #-}
+closureKind :: ClosureInfo -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ASA)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureLFInfo :: ClosureInfo -> LambdaFormInfo
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ASA)" {_A_ 1 _U_ 1 _N_ _N_ _F_ _IF_ARGS_ 0 1 X 1 \ (u0 :: LambdaFormInfo) -> u0 _N_} _F_ _IF_ARGS_ 0 1 C 2 \ (u0 :: ClosureInfo) -> case u0 of { _ALG_ _ORIG_ ClosureInfo MkClosureInfo (u1 :: Id) (u2 :: LambdaFormInfo) (u3 :: SMRep) -> u2; _NO_DEFLT_ } _N_ #-}
+closureLabelFromCI :: ClosureInfo -> CLabel
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LAA)" {_A_ 1 _U_ 2 _N_ _N_ _F_ _IF_ARGS_ 0 0 X 1 _ORIG_ CLabelInfo mkClosureLabel _N_} _F_ _IF_ARGS_ 0 1 C 3 \ (u0 :: ClosureInfo) -> case u0 of { _ALG_ _ORIG_ ClosureInfo MkClosureInfo (u1 :: Id) (u2 :: LambdaFormInfo) (u3 :: SMRep) -> _APP_ _ORIG_ CLabelInfo mkClosureLabel [ u1 ]; _NO_DEFLT_ } _N_ #-}
+closureNonHdrSize :: ClosureInfo -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ALS)" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closurePtrsSize :: ClosureInfo -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(AAS)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureReturnsUnboxedType :: ClosureInfo -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LSA)" {_A_ 2 _U_ 11 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureSMRep :: ClosureInfo -> SMRep
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(AAS)" {_A_ 1 _U_ 1 _N_ _N_ _F_ _IF_ARGS_ 0 1 X 1 \ (u0 :: SMRep) -> u0 _N_} _F_ _IF_ARGS_ 0 1 C 2 \ (u0 :: ClosureInfo) -> case u0 of { _ALG_ _ORIG_ ClosureInfo MkClosureInfo (u1 :: Id) (u2 :: LambdaFormInfo) (u3 :: SMRep) -> u3; _NO_DEFLT_ } _N_ #-}
+closureSemiTag :: ClosureInfo -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ASA)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureSingleEntry :: ClosureInfo -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ASA)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureSize :: ClosureInfo -> HeapOffset
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ALS)" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureSizeWithoutFixedHdr :: ClosureInfo -> HeapOffset
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ALS)" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureType :: ClosureInfo -> Labda (TyCon, [UniType], [Id])
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LSA)" {_A_ 2 _U_ 11 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureTypeDescr :: ClosureInfo -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(U(ALAS)AA)" {_A_ 2 _U_ 22 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+closureUpdReqd :: ClosureInfo -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ASA)" {_A_ 1 _U_ 1 _N_ _N_ _F_ _IF_ARGS_ 0 1 C 12 \ (u0 :: LambdaFormInfo) -> case u0 of { _ALG_ _ORIG_ ClosureInfo LFThunk (u1 :: Bool) (u2 :: Bool) (u3 :: Bool) (u4 :: StandardFormInfo) -> u3; _ORIG_ ClosureInfo LFBlackHole -> _!_ True [] []; (u5 :: LambdaFormInfo) -> _!_ False [] [] } _N_} _N_ _N_ #-}
+dataConLiveness :: ClosureInfo -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LAS)" {_A_ 2 _U_ 11 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+entryLabelFromCI :: ClosureInfo -> CLabel
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LSL)" {_A_ 3 _U_ 211 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+fastLabelFromCI :: ClosureInfo -> CLabel
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LAA)" {_A_ 1 _U_ 2 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+fitsMinUpdSize :: ClosureInfo -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ALS)" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+funInfoTableRequired :: Id -> StgBinderInfo -> LambdaFormInfo -> Bool
+ {-# GHC_PRAGMA _A_ 3 _U_ 111 _N_ _S_ "LSL" _N_ _N_ #-}
+getEntryConvention :: Id -> LambdaFormInfo -> [PrimKind] -> CgInfoDownwards -> CgState -> (EntryConvention, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 22222 _N_ _N_ _N_ _N_ #-}
+mkClosureLabel :: Id -> CLabel
+ {-# GHC_PRAGMA _A_ 1 _U_ 2 _N_ _N_ _N_ _N_ #-}
+getSMInfoStr :: SMRep -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+getSMInitHdrStr :: SMRep -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+getSMUpdInplaceHdrStr :: SMRep -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+getStandardFormThunkInfo :: LambdaFormInfo -> Labda [StgAtom Id]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+getUniDataSpecTyCon_maybe :: UniType -> Labda (TyCon, [UniType], [Id])
+ {-# GHC_PRAGMA _A_ 1 _U_ 2 _N_ _S_ "S" _N_ _N_ #-}
+infoTableLabelFromCI :: ClosureInfo -> CLabel
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LSL)" {_A_ 3 _U_ 212 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+isConstantRep :: SMRep -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+isPhantomRep :: SMRep -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _F_ _IF_ARGS_ 0 1 C 11 \ (u0 :: SMRep) -> case u0 of { _ALG_ _ORIG_ SMRep PhantomRep -> _!_ True [] []; (u1 :: SMRep) -> _!_ False [] [] } _N_ #-}
+isSpecRep :: SMRep -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _F_ _IF_ARGS_ 0 1 C 11 \ (u0 :: SMRep) -> case u0 of { _ALG_ _ORIG_ SMRep SpecialisedRep (u1 :: SMSpecRepKind) (u2 :: Int) (u3 :: Int) (u4 :: SMUpdateKind) -> _!_ True [] []; (u5 :: SMRep) -> _!_ False [] [] } _N_ #-}
+isStaticClosure :: ClosureInfo -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(AAS)" {_A_ 1 _U_ 1 _N_ _N_ _F_ _IF_ARGS_ 0 1 C 11 \ (u0 :: SMRep) -> case u0 of { _ALG_ _ORIG_ SMRep StaticRep (u1 :: Int) (u2 :: Int) -> _!_ True [] []; (u3 :: SMRep) -> _!_ False [] [] } _N_} _N_ _N_ #-}
+layOutDynClosure :: Id -> (a -> PrimKind) -> [a] -> LambdaFormInfo -> (ClosureInfo, [(a, HeapOffset)])
+ {-# GHC_PRAGMA _A_ 4 _U_ 2222 _N_ _N_ _N_ _N_ #-}
+layOutDynCon :: Id -> (a -> PrimKind) -> [a] -> (ClosureInfo, [(a, HeapOffset)])
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _N_ _N_ _N_ #-}
+layOutPhantomClosure :: Id -> LambdaFormInfo -> ClosureInfo
+ {-# GHC_PRAGMA _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+layOutStaticClosure :: Id -> (a -> PrimKind) -> [a] -> LambdaFormInfo -> (ClosureInfo, [(a, HeapOffset)])
+ {-# GHC_PRAGMA _A_ 4 _U_ 2222 _N_ _N_ _N_ _N_ #-}
+layOutStaticNoFVClosure :: Id -> LambdaFormInfo -> ClosureInfo
+ {-# GHC_PRAGMA _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+ltSMRepHdr :: SMRep -> SMRep -> Bool
+ {-# GHC_PRAGMA _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_ #-}
+maybeSelectorInfo :: ClosureInfo -> Labda (Id, Int)
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ASA)" {_A_ 1 _U_ 1 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+mkClosureLFInfo :: Bool -> [Id] -> UpdateFlag -> [Id] -> StgExpr Id Id -> LambdaFormInfo
+ {-# GHC_PRAGMA _A_ 5 _U_ 22222 _N_ _S_ "LLLSL" _N_ _N_ #-}
+mkConLFInfo :: Id -> LambdaFormInfo
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(LLLS)" {_A_ 4 _U_ 2222 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+mkLFArgument :: LambdaFormInfo
+ {-# GHC_PRAGMA _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 1 _!_ _ORIG_ ClosureInfo LFArgument [] [] _N_ #-}
+mkLFImported :: Id -> LambdaFormInfo
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(AAU(SAAAAAAAAA)A)" {_A_ 1 _U_ 2 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+mkLFLetNoEscape :: Int -> UniqFM Id -> LambdaFormInfo
+ {-# GHC_PRAGMA _A_ 2 _U_ 22 _N_ _N_ _F_ _IF_ARGS_ 0 2 XX 3 \ (u0 :: Int) (u1 :: UniqFM Id) -> _!_ _ORIG_ ClosureInfo LFLetNoEscape [] [u0, u1] _N_ #-}
+mkVirtHeapOffsets :: SMRep -> (a -> PrimKind) -> [a] -> (Int, Int, [(a, HeapOffset)])
+ {-# GHC_PRAGMA _A_ 3 _U_ 222 _N_ _N_ _N_ _N_ #-}
+noUpdVapRequired :: StgBinderInfo -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _F_ _IF_ARGS_ 0 1 C 4 \ (u0 :: StgBinderInfo) -> case u0 of { _ALG_ _ORIG_ StgSyn NoStgBinderInfo -> _!_ False [] []; _ORIG_ StgSyn StgBinderInfo (u1 :: Bool) (u2 :: Bool) (u3 :: Bool) (u4 :: Bool) (u5 :: Bool) -> u4; _NO_DEFLT_ } _N_ #-}
+nodeMustPointToIt :: LambdaFormInfo -> CgInfoDownwards -> CgState -> (Bool, CgState)
+ {-# GHC_PRAGMA _A_ 3 _U_ 122 _N_ _S_ "SLL" _N_ _N_ #-}
+slopSize :: ClosureInfo -> Int
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "U(ALS)" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+slowFunEntryCodeRequired :: Id -> StgBinderInfo -> Bool
+ {-# GHC_PRAGMA _A_ 2 _U_ 11 _N_ _S_ "LS" _N_ _N_ #-}
+staticClosureRequired :: Id -> StgBinderInfo -> LambdaFormInfo -> Bool
+ {-# GHC_PRAGMA _A_ 3 _U_ 111 _N_ _S_ "LSL" _N_ _N_ #-}
+stdVapRequired :: StgBinderInfo -> Bool
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _F_ _IF_ARGS_ 0 1 C 4 \ (u0 :: StgBinderInfo) -> case u0 of { _ALG_ _ORIG_ StgSyn NoStgBinderInfo -> _!_ False [] []; _ORIG_ StgSyn StgBinderInfo (u1 :: Bool) (u2 :: Bool) (u3 :: Bool) (u4 :: Bool) (u5 :: Bool) -> u3; _NO_DEFLT_ } _N_ #-}
+
diff --git a/ghc/compiler/codeGen/ClosureInfo.lhs b/ghc/compiler/codeGen/ClosureInfo.lhs
new file mode 100644
index 0000000000..d705356369
--- /dev/null
+++ b/ghc/compiler/codeGen/ClosureInfo.lhs
@@ -0,0 +1,1328 @@
+
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[ClosureInfo]{Data structures which describe closures}
+
+Much of the rationale for these things is in the ``details'' part of
+the STG paper.
+
+\begin{code}
+#include "HsVersions.h"
+
+module ClosureInfo (
+ ClosureInfo, LambdaFormInfo, SMRep, -- all abstract
+ StandardFormInfo,
+
+ EntryConvention(..),
+
+ mkClosureLFInfo, mkConLFInfo,
+ mkLFImported, mkLFArgument, mkLFLetNoEscape,
+
+ closureSize, closureHdrSize,
+ closureNonHdrSize, closureSizeWithoutFixedHdr,
+ closureGoodStuffSize, closurePtrsSize, -- UNUSED: closureNonPtrsSize,
+ slopSize, fitsMinUpdSize,
+
+ layOutDynClosure, layOutDynCon, layOutStaticClosure,
+ layOutStaticNoFVClosure, layOutPhantomClosure,
+ mkVirtHeapOffsets, -- for GHCI
+
+ nodeMustPointToIt, getEntryConvention,
+ blackHoleOnEntry,
+
+ staticClosureRequired,
+ slowFunEntryCodeRequired, funInfoTableRequired,
+ stdVapRequired, noUpdVapRequired,
+
+ closureId, infoTableLabelFromCI,
+ closureLabelFromCI,
+ entryLabelFromCI, fastLabelFromCI,
+ closureLFInfo, closureSMRep, closureUpdReqd,
+ closureSingleEntry, closureSemiTag, closureType,
+ closureReturnsUnboxedType, getStandardFormThunkInfo,
+
+--OLD auxInfoTableLabelFromCI, isIntLikeRep, -- go away in 0.23
+
+ closureKind, closureTypeDescr, -- profiling
+
+ isConstantRep, isSpecRep, isPhantomRep, -- ToDo: Should be in SMRep, perhaps?
+ isStaticClosure, allocProfilingMsg,
+ blackHoleClosureInfo,
+ getSMInfoStr, getSMInitHdrStr, getSMUpdInplaceHdrStr,
+ ltSMRepHdr, --UNUSED: equivSMRepHdr,
+ maybeSelectorInfo,
+
+ dataConLiveness, -- concurrency
+
+ -- and to make the interface self-sufficient...
+ AbstractC, CAddrMode, HeapOffset, MagicId,
+ CgInfoDownwards, CgState, CgIdInfo, CompilationInfo,
+ CLabel, Id, Maybe, PrimKind, FCode(..), TyCon, StgExpr,
+ StgAtom, StgBinderInfo,
+ DataCon(..), PlainStgExpr(..), PlainStgLiveVars(..),
+ PlainStgAtom(..),
+ UniqSet(..), UniqFM, UpdateFlag(..) -- not abstract
+
+ IF_ATTACK_PRAGMAS(COMMA mkClosureLabel)
+ IF_ATTACK_PRAGMAS(COMMA getUniDataSpecTyCon_maybe)
+ ) where
+
+import AbsCSyn
+import CgMonad
+import SMRep
+import StgSyn
+
+import AbsUniType
+import CgCompInfo -- some magic constants
+import CgRetConv
+import CLabelInfo -- Lots of label-making things
+import CmdLineOpts ( GlobalSwitch(..) )
+import Id
+import IdInfo -- SIGH
+import Maybes ( maybeToBool, assocMaybe, Maybe(..) )
+import Outputable -- needed for INCLUDE_FRC_METHOD
+import Pretty -- ( ppStr, Pretty(..) )
+import PrimKind ( PrimKind, getKindSize, separateByPtrFollowness )
+import Util
+\end{code}
+
+The ``wrapper'' data type for closure information:
+
+\begin{code}
+data ClosureInfo
+ = MkClosureInfo
+ Id -- The thing bound to this closure
+ LambdaFormInfo -- info derivable from the *source*
+ SMRep -- representation used by storage manager
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[ClosureInfo-OLD-DOC]{OLD DOCUMENTATION PROBABLY SUPERCEDED BY stg-details}
+%* *
+%************************************************************************
+
+We can optimise the function-entry code as follows.
+\begin{itemize}
+
+\item If the ``function'' is not updatable, we can jump directly to its
+ entry code, rather than indirecting via the info pointer in the
+ closure. (For updatable thunks we must go via the closure, in
+ case it has been updated.)
+
+\item If the former bullet applies, and the application we are
+ compiling gives the function as many arguments as it needs, we
+ can jump to its fast-entry code. (This only applies if the
+ function has one or more args, because zero-arg closures have
+ no fast-entry code.)
+
+\item If the function is a top-level non-constructor or imported, there
+ is no need to make Node point to its closure. In order for
+ this to be right, we need to ensure that:
+ \begin{itemize}
+ \item If such closures are updatable then they push their
+ static address in the update frame, not Node. Actually
+ we create a black hole and push its address.
+
+ \item The arg satisfaction check should load Node before jumping to
+ UpdatePAP.
+
+ \item Top-level constructor closures need careful handling. If we are to
+ jump direct to the constructor code, we must load Node first, even
+ though they are top-level. But if we go to their ``own''
+ standard-entry code (which loads Node and then jumps to the
+ constructor code) we don't need to load Node.
+ \end{itemize}
+\end{itemize}
+
+
+{\em Top level constructors (@mkStaticConEntryInfo@)}
+
+\begin{verbatim}
+ x = {y,ys} \ {} Cons {y,ys} -- Std form constructor
+\end{verbatim}
+
+x-closure: Cons-info-table, y-closure, ys-closure
+
+x-entry: Node = x-closure; jump( Cons-entry )
+
+x's EntryInfo in its own module:
+\begin{verbatim}
+ Base-label = Cons -- Not x!!
+ NodeMustPoint = True
+ ClosureClass = Constructor
+\end{verbatim}
+
+ So if x is entered, Node will be set up and
+ we'll jump direct to the Cons code.
+
+x's EntryInfo in another module: (which may not know that x is a constructor)
+\begin{verbatim}
+ Base-label = x -- Is x!!
+ NodeMustPoint = False -- All imported things have False
+ ClosureClass = non-committal
+\end{verbatim}
+
+ If x is entered, we'll jump to x-entry, which will set up Node
+ before jumping to the standard Cons code
+
+{\em Top level non-constructors (@mkStaticEntryInfo@)}
+\begin{verbatim}
+ x = ...
+\end{verbatim}
+
+For updatable thunks, x-entry must push an allocated BH in update frame, not Node.
+
+For non-zero arity, arg satis check must load Node before jumping to
+ UpdatePAP.
+
+x's EntryInfo in its own module:
+\begin{verbatim}
+ Base-label = x
+ NodeMustPoint = False
+ ClosureClass = whatever
+\end{verbatim}
+
+{\em Inner constructors (@mkConEntryInfo@)}
+
+\begin{verbatim}
+ Base-label = Cons -- Not x!!
+ NodeMustPoint = True -- If its arity were zero, it would
+ -- have been lifted to top level
+ ClosureClass = Constructor
+\end{verbatim}
+
+{\em Inner non-constructors (@mkEntryInfo@)}
+
+\begin{verbatim}
+ Base-label = x
+ NodeMustPoint = True -- If no free vars, would have been
+ -- lifted to top level
+ ClosureClass = whatever
+\end{verbatim}
+
+{\em Imported}
+
+\begin{verbatim}
+ Nothing,
+ or
+ Base-label = x
+ NodeMustPoint = False
+ ClosureClass = whatever
+\end{verbatim}
+
+==============
+THINK: we could omit making Node point to top-level constructors
+of arity zero; but that might interact nastily with updates.
+==============
+
+
+==========
+The info we need to import for imported things is:
+
+\begin{verbatim}
+ data ImportInfo = UnknownImportInfo
+ | HnfImport Int -- Not updatable, arity given
+ -- Arity can be zero, for (eg) constrs
+ | UpdatableImport -- Must enter via the closure
+\end{verbatim}
+
+ToDo: move this stuff???
+
+\begin{pseudocode}
+mkStaticEntryInfo lbl cl_class
+ = MkEntryInfo lbl False cl_class
+
+mkStaticConEntryInfo lbl
+ = MkEntryInfo lbl True ConstructorClosure
+
+mkEntryInfo lbl cl_class
+ = MkEntryInfo lbl True cl_class
+
+mkConEntryInfo lbl
+ = MkEntryInfo lbl True ConstructorClosure
+\end{pseudocode}
+
+%************************************************************************
+%* *
+\subsection[ClosureInfo-datatypes]{Data types for closure information}
+%* *
+%************************************************************************
+
+%************************************************************************
+%* *
+\subsubsection[LambdaFormInfo-datatype]{@LambdaFormInfo@: source-derivable info}
+%* *
+%************************************************************************
+
+\begin{code}
+data LambdaFormInfo
+ = LFReEntrant -- Reentrant closure; used for PAPs too
+ Bool -- True if top level
+ Int -- Arity
+ Bool -- True <=> no fvs
+
+ | LFCon -- Constructor
+ DataCon -- The constructor (may be specialised)
+ Bool -- True <=> zero arity
+
+ | LFTuple -- Tuples
+ DataCon -- The tuple constructor (may be specialised)
+ Bool -- True <=> zero arity
+
+ | LFThunk -- Thunk (zero arity)
+ Bool -- True <=> top level
+ Bool -- True <=> no free vars
+ Bool -- True <=> updatable (i.e., *not* single-entry)
+ StandardFormInfo
+
+ | LFArgument -- Used for function arguments. We know nothing about
+ -- this closure. Treat like updatable "LFThunk"...
+
+ | LFImported -- Used for imported things. We know nothing about this
+ -- closure. Treat like updatable "LFThunk"...
+ -- Imported things which we do know something about use
+ -- one of the other LF constructors (eg LFReEntrant for
+ -- known functions)
+
+ | LFLetNoEscape -- See LetNoEscape module for precise description of
+ -- these "lets".
+ Int -- arity;
+ PlainStgLiveVars-- list of variables live in the RHS of the let.
+ -- (ToDo: maybe not used)
+
+ | LFBlackHole -- Used for the closures allocated to hold the result
+ -- of a CAF. We want the target of the update frame to
+ -- be in the heap, so we make a black hole to hold it.
+
+ -- This last one is really only for completeness;
+ -- it isn't actually used for anything interesting
+ | LFIndirection
+
+data StandardFormInfo -- Tells whether this thunk has one of a small number
+ -- of standard forms
+
+ = NonStandardThunk -- No, it isn't
+
+ | SelectorThunk
+ Id -- Scrutinee
+ DataCon -- Constructor
+ Int -- 0-origin offset of ak within the "goods" of constructor
+ -- (Recall that the a1,...,an may be laid out in the heap
+ -- in a non-obvious order.)
+
+{- A SelectorThunk is of form
+
+ case x of
+ con a1,..,an -> ak
+
+ and the constructor is from a single-constr type.
+ If we can't convert the heap-offset of the selectee into an Int, e.g.,
+ it's "GEN_VHS+i", we just give up.
+-}
+
+ | VapThunk
+ Id -- Function
+ [PlainStgAtom] -- Args
+ Bool -- True <=> the function is not top-level, so
+ -- must be stored in the thunk too
+
+{- A VapThunk is of form
+
+ f a1 ... an
+
+ where f is a known function, with arity n
+ So for this thunk we can use the label for f's heap-entry
+ info table (generated when f's defn was dealt with),
+ rather than generating a one-off info table and entry code
+ for this one thunk.
+-}
+
+
+mkLFArgument = LFArgument
+mkLFBlackHole = LFBlackHole
+mkLFLetNoEscape = LFLetNoEscape
+
+mkLFImported :: Id -> LambdaFormInfo
+mkLFImported id
+ = case arityMaybe (getIdArity id) of
+ Nothing -> LFImported
+ Just 0 -> LFThunk True{-top-lev-} True{-no fvs-}
+ True{-updatable-} NonStandardThunk
+ Just n -> LFReEntrant True n True -- n > 0
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[ClosureInfo-construction]{Functions which build LFInfos}
+%* *
+%************************************************************************
+
+@mkClosureLFInfo@ figures out the appropriate LFInfo for the closure.
+
+\begin{code}
+mkClosureLFInfo :: Bool -- True of top level
+ -> [Id] -- Free vars
+ -> UpdateFlag -- Update flag
+ -> [Id] -- Args
+ -> PlainStgExpr -- Body of closure: passed so we
+ -- can look for selector thunks!
+ -> LambdaFormInfo
+
+mkClosureLFInfo top fvs upd_flag args@(_:_) body -- Non-empty args
+ = LFReEntrant top (length args) (null fvs)
+
+mkClosureLFInfo top fvs ReEntrant [] body
+ = LFReEntrant top 0 (null fvs)
+\end{code}
+
+OK, this is where we look at the body of the closure to see if it's a
+selector---turgid, but nothing deep. We are looking for a closure of
+{\em exactly} the form:
+\begin{verbatim}
+... = [the_fv] \ u [] ->
+ case the_fv of
+ con a_1 ... a_n -> a_i
+\end{verbatim}
+Here we go:
+\begin{code}
+mkClosureLFInfo False -- don't bother if at top-level
+ [the_fv] -- just one...
+ Updatable
+ [] -- no args (a thunk)
+ (StgCase (StgApp (StgVarAtom scrutinee) [{-no args-}] _)
+ _ _ _ -- ignore live vars and uniq...
+ (StgAlgAlts case_ty
+ [(con, params, use_mask,
+ (StgApp (StgVarAtom selectee) [{-no args-}] _))]
+ StgNoDefault))
+ | the_fv == scrutinee -- Scrutinee is the only free variable
+ && maybeToBool maybe_offset -- Selectee is a component of the tuple
+ && maybeToBool offset_into_int_maybe
+ && offset_into_int <= mAX_SPEC_SELECTEE_SIZE -- Offset is small enough
+ =
+ ASSERT(is_single_constructor) -- This should be true anyway
+ LFThunk False False True (SelectorThunk scrutinee con offset_into_int)
+ where
+ (_, params_w_offsets) = layOutDynCon con getIdKind params
+ maybe_offset = assocMaybe params_w_offsets selectee
+ Just the_offset = maybe_offset
+ offset_into_int_maybe = intOffsetIntoGoods the_offset
+ Just offset_into_int = offset_into_int_maybe
+ is_single_constructor = maybeToBool (maybeSingleConstructorTyCon tycon)
+ (_,_,_, tycon) = getDataConSig con
+\end{code}
+
+Same kind of thing, looking for vector-apply thunks, of the form:
+
+ x = [...] \ .. [] -> f a1 .. an
+
+where f has arity n. We rely on the arity info inside the Id being correct.
+
+\begin{code}
+mkClosureLFInfo top_level
+ fvs
+ upd_flag
+ [] -- No args; a thunk
+ (StgApp (StgVarAtom fun_id) args _)
+ | not top_level -- A top-level thunk would require a static
+ -- vap_info table, which we don't generate just
+ -- now; so top-level thunks are never standard
+ -- form.
+ && isLocallyDefined fun_id -- Must be defined in this module
+ && maybeToBool arity_maybe -- A known function with known arity
+ && fun_arity > 0 -- It'd better be a function!
+ && fun_arity == length args -- Saturated application
+ = LFThunk top_level (null fvs) (isUpdatable upd_flag) (VapThunk fun_id args store_fun_in_vap)
+ where
+ arity_maybe = arityMaybe (getIdArity fun_id)
+ Just fun_arity = arity_maybe
+
+ -- If the function is a free variable then it must be stored
+ -- in the thunk too; if it isn't a free variable it must be
+ -- because it's constant, so it doesn't need to be stored in the thunk
+ store_fun_in_vap = fun_id `is_elem` fvs
+
+ is_elem = isIn "mkClosureLFInfo"
+\end{code}
+
+Finally, the general updatable-thing case:
+\begin{code}
+mkClosureLFInfo top fvs upd_flag [] body
+ = LFThunk top (null fvs) (isUpdatable upd_flag) NonStandardThunk
+
+isUpdatable ReEntrant = False
+isUpdatable SingleEntry = False
+isUpdatable Updatable = True
+\end{code}
+
+@mkConLFInfo@ is similar, for constructors.
+
+\begin{code}
+mkConLFInfo :: DataCon -> LambdaFormInfo
+
+mkConLFInfo con
+ = ASSERT(isDataCon con)
+ let
+ arity = getDataConArity con
+ in
+ if isTupleCon con then
+ LFTuple con (arity == 0)
+ else
+ LFCon con (arity == 0)
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[ClosureInfo-sizes]{Functions about closure {\em sizes}}
+%* *
+%************************************************************************
+
+\begin{code}
+closureSize :: ClosureInfo -> HeapOffset
+closureSize cl_info@(MkClosureInfo _ _ sm_rep)
+ = totHdrSize sm_rep `addOff` (intOff (closureNonHdrSize cl_info))
+
+closureSizeWithoutFixedHdr :: ClosureInfo -> HeapOffset
+closureSizeWithoutFixedHdr cl_info@(MkClosureInfo _ _ sm_rep)
+ = varHdrSize sm_rep `addOff` (intOff (closureNonHdrSize cl_info))
+
+closureHdrSize :: ClosureInfo -> HeapOffset
+closureHdrSize (MkClosureInfo _ _ sm_rep)
+ = totHdrSize sm_rep
+
+closureNonHdrSize :: ClosureInfo -> Int
+closureNonHdrSize cl_info@(MkClosureInfo _ lf_info sm_rep)
+ = tot_wds + computeSlopSize tot_wds sm_rep (closureUpdReqd cl_info) --ToDo: pass lf_info?
+ where
+ tot_wds = closureGoodStuffSize cl_info
+
+closureGoodStuffSize :: ClosureInfo -> Int
+closureGoodStuffSize (MkClosureInfo _ _ sm_rep)
+ = let (ptrs, nonptrs) = sizes_from_SMRep sm_rep
+ in ptrs + nonptrs
+
+closurePtrsSize :: ClosureInfo -> Int
+closurePtrsSize (MkClosureInfo _ _ sm_rep)
+ = let (ptrs, _) = sizes_from_SMRep sm_rep
+ in ptrs
+
+-- not exported:
+sizes_from_SMRep (SpecialisedRep k ptrs nonptrs _) = (ptrs, nonptrs)
+sizes_from_SMRep (GenericRep ptrs nonptrs _) = (ptrs, nonptrs)
+sizes_from_SMRep (BigTupleRep ptrs) = (ptrs, 0)
+sizes_from_SMRep (MuTupleRep ptrs) = (ptrs, 0)
+sizes_from_SMRep (DataRep nonptrs) = (0, nonptrs)
+sizes_from_SMRep BlackHoleRep = (0, 0)
+sizes_from_SMRep (StaticRep ptrs nonptrs) = (ptrs, nonptrs)
+#ifdef DEBUG
+sizes_from_SMRep PhantomRep = panic "sizes_from_SMRep: PhantomRep"
+sizes_from_SMRep DynamicRep = panic "sizes_from_SMRep: DynamicRep"
+#endif
+\end{code}
+
+\begin{code}
+fitsMinUpdSize :: ClosureInfo -> Bool
+fitsMinUpdSize (MkClosureInfo _ _ BlackHoleRep) = True
+fitsMinUpdSize cl_info = isSpecRep (closureSMRep cl_info) && closureNonHdrSize cl_info <= mIN_UPD_SIZE
+\end{code}
+
+Computing slop size. WARNING: this looks dodgy --- it has deep
+knowledge of what the storage manager does with the various
+representations...
+
+Slop Requirements:
+\begin{itemize}
+\item
+Updateable closures must be @mIN_UPD_SIZE@.
+ \begin{itemize}
+ \item
+ Cons cell requires 2 words
+ \item
+ Indirections require 1 word
+ \item
+ Appels collector indirections 2 words
+ \end{itemize}
+THEREFORE: @mIN_UPD_SIZE = 2@.
+
+\item
+Collectable closures which are allocated in the heap
+must be @mIN_SIZE_NonUpdHeapObject@.
+
+Copying collector forward pointer requires 1 word
+
+THEREFORE: @mIN_SIZE_NonUpdHeapObject = 1@
+
+\item
+@SpecialisedRep@ closures closures may require slop:
+ \begin{itemize}
+ \item
+ @ConstantRep@ and @CharLikeRep@ closures always use the address of
+ a static closure. They are never allocated or
+ collected (eg hold forwarding pointer) hence never any slop.
+
+ \item
+ @IntLikeRep@ are never updatable.
+ May need slop to be collected (as they will be size 1 or more
+ this probably has no affect)
+
+ \item
+ @SpecRep@ may be updateable and will be collectable
+
+ \item
+ @StaticRep@ may require slop if updatable. Non-updatable ones are OK.
+
+ \item
+ @GenericRep@ closures will always be larger so never require slop.
+ \end{itemize}
+
+ ***** ToDo: keep an eye on this!
+\end{itemize}
+
+\begin{code}
+slopSize cl_info@(MkClosureInfo _ lf_info sm_rep)
+ = computeSlopSize (closureGoodStuffSize cl_info) sm_rep (closureUpdReqd cl_info)
+
+computeSlopSize :: Int -> SMRep -> Bool -> Int
+
+computeSlopSize tot_wds (SpecialisedRep ConstantRep _ _ _) _
+ = 0
+computeSlopSize tot_wds (SpecialisedRep CharLikeRep _ _ _) _
+ = 0
+
+computeSlopSize tot_wds (SpecialisedRep _ _ _ _) True -- Updatable
+ = max 0 (mIN_UPD_SIZE - tot_wds)
+computeSlopSize tot_wds (StaticRep _ _) True -- Updatable
+ = max 0 (mIN_UPD_SIZE - tot_wds)
+computeSlopSize tot_wds BlackHoleRep _ -- Updatable
+ = max 0 (mIN_UPD_SIZE - tot_wds)
+
+computeSlopSize tot_wds (SpecialisedRep _ _ _ _) False -- Not updatable
+ = max 0 (mIN_SIZE_NonUpdHeapObject - tot_wds)
+
+computeSlopSize tot_wds other_rep _ -- Any other rep
+ = 0
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[layOutDynClosure]{Lay out a dynamic closure}
+%* *
+%************************************************************************
+
+\begin{code}
+layOutDynClosure, layOutStaticClosure
+ :: Id -- STG identifier w/ which this closure assoc'd
+ -> (a -> PrimKind) -- function w/ which to be able to get a PrimKind
+ -> [a] -- the "things" being layed out
+ -> LambdaFormInfo -- what sort of closure it is
+ -> (ClosureInfo, -- info about the closure
+ [(a, VirtualHeapOffset)]) -- things w/ offsets pinned on them
+
+layOutDynClosure name kind_fn things lf_info
+ = (MkClosureInfo name lf_info sm_rep,
+ things_w_offsets)
+ where
+ (tot_wds, -- #ptr_wds + #nonptr_wds
+ ptr_wds, -- #ptr_wds
+ things_w_offsets) = mkVirtHeapOffsets sm_rep kind_fn things
+ sm_rep = chooseDynSMRep lf_info tot_wds ptr_wds
+
+layOutStaticClosure name kind_fn things lf_info
+ = (MkClosureInfo name lf_info (StaticRep ptr_wds (tot_wds - ptr_wds)),
+ things_w_offsets)
+ where
+ (tot_wds, -- #ptr_wds + #nonptr_wds
+ ptr_wds, -- #ptr_wds
+ things_w_offsets) = mkVirtHeapOffsets (StaticRep bot bot) kind_fn things
+ bot = panic "layoutStaticClosure"
+
+layOutStaticNoFVClosure :: Id -> LambdaFormInfo -> ClosureInfo
+layOutStaticNoFVClosure name lf_info
+ = MkClosureInfo name lf_info (StaticRep ptr_wds nonptr_wds)
+ where
+ -- I am very uncertain that this is right - it will show up when testing
+ -- my dynamic loading code. ADR
+ -- (If it's not right, we'll have to grab the kinds of the arguments from
+ -- somewhere.)
+ ptr_wds = 0
+ nonptr_wds = 0
+
+layOutPhantomClosure :: Id -> LambdaFormInfo -> ClosureInfo
+layOutPhantomClosure name lf_info = MkClosureInfo name lf_info PhantomRep
+\end{code}
+
+A wrapper for when used with data constructors:
+\begin{code}
+layOutDynCon :: DataCon
+ -> (a -> PrimKind)
+ -> [a]
+ -> (ClosureInfo, [(a,VirtualHeapOffset)])
+
+layOutDynCon con kind_fn args
+ = ASSERT(isDataCon con)
+ layOutDynClosure con kind_fn args (mkConLFInfo con)
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[SMreps]{Choosing SM reps}
+%* *
+%************************************************************************
+
+\begin{code}
+chooseDynSMRep
+ :: LambdaFormInfo
+ -> Int -> Int -- Tot wds, ptr wds
+ -> SMRep
+
+chooseDynSMRep lf_info tot_wds ptr_wds
+ = let
+ nonptr_wds = tot_wds - ptr_wds
+
+ updatekind = case lf_info of
+ LFThunk _ _ upd _ -> if upd then SMUpdatable else SMSingleEntry
+ LFBlackHole -> SMUpdatable
+ _ -> SMNormalForm
+ in
+ if (nonptr_wds == 0 && ptr_wds <= mAX_SPEC_ALL_PTRS)
+ || (tot_wds <= mAX_SPEC_MIXED_FIELDS)
+ || (ptr_wds == 0 && nonptr_wds <= mAX_SPEC_ALL_NONPTRS) then
+ let
+ spec_kind = case lf_info of
+
+ (LFTuple _ True) -> ConstantRep
+
+ (LFTuple _ _) -> SpecRep
+
+ (LFCon _ True) -> ConstantRep
+
+ (LFCon con _ ) -> if maybeToBool (maybeCharLikeTyCon tycon) then CharLikeRep
+ else if maybeToBool (maybeIntLikeTyCon tycon) then IntLikeRep
+ else SpecRep
+ where
+ tycon = getDataConTyCon con
+
+ _ -> SpecRep
+ in
+ SpecialisedRep spec_kind ptr_wds nonptr_wds updatekind
+ else
+ GenericRep ptr_wds nonptr_wds updatekind
+\end{code}
+
+
+%************************************************************************
+%* *
+\subsection[mkVirtHeapOffsets]{Assigning heap offsets in a closure}
+%* *
+%************************************************************************
+
+@mkVirtHeapOffsets@ (the heap version) always returns boxed things with
+smaller offsets than the unboxed things, and furthermore, the offsets in
+the result list
+
+\begin{code}
+mkVirtHeapOffsets :: SMRep -- Representation to be used by storage manager
+ -> (a -> PrimKind) -- To be able to grab kinds;
+ -- w/ a kind, we can find boxedness
+ -> [a] -- Things to make offsets for
+ -> (Int, -- *Total* number of words allocated
+ Int, -- Number of words allocated for *pointers*
+ [(a, VirtualHeapOffset)]) -- Things with their offsets from start of object
+ -- in order of increasing offset
+
+-- First in list gets lowest offset, which is initial offset + 1.
+
+mkVirtHeapOffsets sm_rep kind_fun things
+ = let (ptrs, non_ptrs) = separateByPtrFollowness kind_fun things
+ (wds_of_ptrs, ptrs_w_offsets) = mapAccumL computeOffset 0 ptrs
+ (tot_wds, non_ptrs_w_offsets) = mapAccumL computeOffset wds_of_ptrs non_ptrs
+ in
+ (tot_wds, wds_of_ptrs, ptrs_w_offsets ++ non_ptrs_w_offsets)
+ where
+ offset_of_first_word = totHdrSize sm_rep
+ computeOffset wds_so_far thing
+ = (wds_so_far + (getKindSize . kind_fun) thing,
+ (thing, (offset_of_first_word `addOff` (intOff wds_so_far)))
+ )
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[ClosureInfo-4-questions]{Four major questions about @ClosureInfo@}
+%* *
+%************************************************************************
+
+Be sure to see the stg-details notes about these...
+
+\begin{code}
+nodeMustPointToIt :: LambdaFormInfo -> FCode Bool
+nodeMustPointToIt lf_info
+ = isSwitchSetC SccProfilingOn `thenFC` \ do_profiling ->
+
+ case lf_info of
+ LFReEntrant top arity no_fvs -> returnFC (
+ not no_fvs || -- Certainly if it has fvs we need to point to it
+
+ not top -- If it is not top level we will point to it
+ -- We can have a \r closure with no_fvs which
+ -- is not top level as special case cgRhsClosure
+ -- has been dissabled in favour of let floating
+
+--OLD: || (arity == 0 && do_profiling)
+-- -- Access to cost centre required for 0 arity if profiling
+-- -- Simon: WHY? (94/12)
+
+ -- For lex_profiling we also access the cost centre for a
+ -- non-inherited function i.e. not top level
+ -- the not top case above ensures this is ok.
+ )
+
+ LFCon _ zero_arity -> returnFC True
+ LFTuple _ zero_arity -> returnFC True
+
+ -- Strictly speaking, the above two don't need Node to point
+ -- to it if the arity = 0. But this is a *really* unlikely
+ -- situation. If we know it's nil (say) and we are entering
+ -- it. Eg: let x = [] in x then we will certainly have inlined
+ -- x, since nil is a simple atom. So we gain little by not
+ -- having Node point to known zero-arity things. On the other
+ -- hand, we do lose something; Patrick's code for figuring out
+ -- when something has been updated but not entered relies on
+ -- having Node point to the result of an update. SLPJ
+ -- 27/11/92.
+
+ LFThunk _ no_fvs updatable _
+ -> returnFC (updatable || not no_fvs || do_profiling)
+
+ -- For the non-updatable (single-entry case):
+ --
+ -- True if has fvs (in which case we need access to them, and we
+ -- should black-hole it)
+ -- or profiling (in which case we need to recover the cost centre
+ -- from inside it)
+
+ LFArgument -> returnFC True
+ LFImported -> returnFC True
+ LFBlackHole -> returnFC True
+ -- BH entry may require Node to point
+
+ LFLetNoEscape _ _ -> returnFC False
+\end{code}
+
+The entry conventions depend on the type of closure being entered,
+whether or not it has free variables, and whether we're running
+sequentially or in parallel.
+
+\begin{tabular}{lllll}
+Closure Characteristics & Parallel & Node Req'd & Argument Passing & Enter Via \\
+Unknown & no & yes & stack & node \\
+Known fun ($\ge$ 1 arg), no fvs & no & no & registers & fast entry (enough args) \\
+\ & \ & \ & \ & slow entry (otherwise) \\
+Known fun ($\ge$ 1 arg), fvs & no & yes & registers & fast entry (enough args) \\
+0 arg, no fvs @\r,\s@ & no & no & n/a & direct entry \\
+0 arg, no fvs @\u@ & no & yes & n/a & node \\
+0 arg, fvs @\r,\s@ & no & yes & n/a & direct entry \\
+0 arg, fvs @\u@ & no & yes & n/a & node \\
+
+Unknown & yes & yes & stack & node \\
+Known fun ($\ge$ 1 arg), no fvs & yes & no & registers & fast entry (enough args) \\
+\ & \ & \ & \ & slow entry (otherwise) \\
+Known fun ($\ge$ 1 arg), fvs & yes & yes & registers & node \\
+0 arg, no fvs @\r,\s@ & yes & no & n/a & direct entry \\
+0 arg, no fvs @\u@ & yes & yes & n/a & node \\
+0 arg, fvs @\r,\s@ & yes & yes & n/a & node \\
+0 arg, fvs @\u@ & yes & yes & n/a & node\\
+\end{tabular}
+
+When black-holing, single-entry closures could also be entered via node
+(rather than directly) to catch double-entry.
+
+\begin{code}
+data EntryConvention
+ = ViaNode -- The "normal" convention
+
+ | StdEntry CLabel -- Jump to this code, with args on stack
+ (Maybe CLabel) -- possibly setting infoptr to this
+
+ | DirectEntry -- Jump directly to code, with args in regs
+ CLabel -- The code label
+ Int -- Its arity
+ [MagicId] -- Its register assignments (possibly empty)
+
+getEntryConvention :: Id -- Function being applied
+ -> LambdaFormInfo -- Its info
+ -> [PrimKind] -- Available arguments
+ -> FCode EntryConvention
+
+getEntryConvention id lf_info arg_kinds
+ = nodeMustPointToIt lf_info `thenFC` \ node_points ->
+ isSwitchSetC ForConcurrent `thenFC` \ is_concurrent ->
+ returnFC (
+
+ if (node_points && is_concurrent) then ViaNode else
+
+ case lf_info of
+
+ LFReEntrant _ arity _ ->
+ if arity == 0 || (length arg_kinds) < arity then
+ StdEntry (mkStdEntryLabel id) Nothing
+ else
+ DirectEntry (mkFastEntryLabel id arity) arity arg_regs
+ where
+ (arg_regs, _) = assignRegs live_regs (take arity arg_kinds)
+ live_regs = if node_points then [node] else []
+
+ LFCon con zero_arity
+ -> let itbl = if zero_arity then
+ mkPhantomInfoTableLabel con
+ else
+ mkInfoTableLabel con
+ in StdEntry (mkStdEntryLabel con) (Just itbl)
+ -- Should have no args
+ LFTuple tup zero_arity
+ -> StdEntry (mkStdEntryLabel tup)
+ (Just (mkInfoTableLabel tup))
+ -- Should have no args
+
+ LFThunk _ _ updatable std_form_info
+ -> if updatable
+ then ViaNode
+ else StdEntry (thunkEntryLabel id std_form_info updatable) Nothing
+
+ LFArgument -> ViaNode
+ LFImported -> ViaNode
+ LFBlackHole -> ViaNode -- Presumably the black hole has by now
+ -- been updated, but we don't know with
+ -- what, so we enter via Node
+
+ LFLetNoEscape arity _
+ -> ASSERT(arity == length arg_kinds)
+ DirectEntry (mkFastEntryLabel id arity) arity arg_regs
+ where
+ (arg_regs, _) = assignRegs live_regs arg_kinds
+ live_regs = if node_points then [node] else []
+ )
+
+blackHoleOnEntry :: Bool -- No-black-holing flag
+ -> ClosureInfo
+ -> Bool
+
+-- Static closures are never themselves black-holed.
+-- Updatable ones will be overwritten with a CAFList cell, which points to a black hole;
+-- Single-entry ones have no fvs to plug, and we trust they don't form part of a loop.
+
+blackHoleOnEntry no_black_holing (MkClosureInfo _ _ (StaticRep _ _)) = False
+
+blackHoleOnEntry no_black_holing (MkClosureInfo _ lf_info _)
+ = case lf_info of
+ LFReEntrant _ _ _ -> False
+ LFThunk _ no_fvs updatable _
+ -> if updatable
+ then not no_black_holing
+ else not no_fvs
+ other -> panic "blackHoleOnEntry" -- Should never happen
+
+getStandardFormThunkInfo
+ :: LambdaFormInfo
+ -> Maybe [PlainStgAtom] -- Nothing => not a standard-form thunk
+ -- Just atoms => a standard-form thunk with payload atoms
+
+getStandardFormThunkInfo (LFThunk _ _ _ (SelectorThunk scrutinee _ _))
+ = --trace "Selector thunk: missed opportunity to save info table + code"
+ Nothing
+ -- Just [StgVarAtom scrutinee]
+ -- We can't save the info tbl + code until we have a way to generate
+ -- a fixed family thereof.
+
+getStandardFormThunkInfo (LFThunk _ _ _ (VapThunk fun_id args fun_in_payload))
+ | fun_in_payload = Just (StgVarAtom fun_id : args)
+ | otherwise = Just args
+
+getStandardFormThunkInfo other_lf_info = Nothing
+
+maybeSelectorInfo (MkClosureInfo _ (LFThunk _ _ _ (SelectorThunk _ con offset)) _) = Just (con,offset)
+maybeSelectorInfo _ = Nothing
+\end{code}
+
+Avoiding generating entries and info tables
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+At present, for every function we generate all of the following,
+just in case. But they aren't always all needed, as noted below:
+
+[NB1: all of this applies only to *functions*. Thunks always
+have closure, info table, and entry code.]
+
+[NB2: All are needed if the function is *exported*, just to play safe.]
+
+
+* Fast-entry code ALWAYS NEEDED
+
+* Slow-entry code
+ Needed iff (a) we have any un-saturated calls to the function
+ OR (b) the function is passed as an arg
+ OR (c) we're in the parallel world and the function has free vars
+ [Reason: in parallel world, we always enter functions
+ with free vars via the closure.]
+
+* The function closure
+ Needed iff (a) we have any un-saturated calls to the function
+ OR (b) the function is passed as an arg
+ OR (c) if the function has free vars (ie not top level)
+
+ Why case (a) here? Because if the arg-satis check fails,
+ UpdatePAP stuffs a pointer to the function closure in the PAP.
+ [Could be changed; UpdatePAP could stuff in a code ptr instead,
+ but doesn't seem worth it.]
+
+ [NB: these conditions imply that we might need the closure
+ without the slow-entry code. Here's how.
+
+ f x y = let g w = ...x..y..w...
+ in
+ ...(g t)...
+
+ Here we need a closure for g which contains x and y,
+ but since the calls are all saturated we just jump to the
+ fast entry point for g, with R1 pointing to the closure for g.]
+
+
+* Standard info table
+ Needed iff (a) we have any un-saturated calls to the function
+ OR (b) the function is passed as an arg
+ OR (c) the function has free vars (ie not top level)
+
+ NB. In the sequential world, (c) is only required so that the function closure has
+ an info table to point to, to keep the storage manager happy.
+ If (c) alone is true we could fake up an info table by choosing
+ one of a standard family of info tables, whose entry code just
+ bombs out.
+
+ [NB In the parallel world (c) is needed regardless because
+ we enter functions with free vars via the closure.]
+
+ If (c) is retained, then we'll sometimes generate an info table
+ (for storage mgr purposes) without slow-entry code. Then we need
+ to use an error label in the info table to substitute for the absent
+ slow entry code.
+
+* Standard vap-entry code
+ Standard vap-entry info table
+ Needed iff we have any updatable thunks of the standard vap-entry shape.
+
+* Single-update vap-entry code
+ Single-update vap-entry info table
+ Needed iff we have any non-updatable thunks of the
+ standard vap-entry shape.
+
+
+\begin{code}
+staticClosureRequired
+ :: Id
+ -> StgBinderInfo
+ -> LambdaFormInfo
+ -> Bool
+staticClosureRequired binder (StgBinderInfo arg_occ unsat_occ _ _ _)
+ (LFReEntrant top_level _ _) -- It's a function
+ = ASSERT( top_level ) -- Assumption: it's a top-level, no-free-var binding
+ arg_occ -- There's an argument occurrence
+ || unsat_occ -- There's an unsaturated call
+ || externallyVisibleId binder
+
+staticClosureRequired binder other_binder_info other_lf_info = True
+
+slowFunEntryCodeRequired -- Assumption: it's a function, not a thunk.
+ :: Id
+ -> StgBinderInfo
+ -> Bool
+slowFunEntryCodeRequired binder (StgBinderInfo arg_occ unsat_occ _ _ _)
+ = arg_occ -- There's an argument occurrence
+ || unsat_occ -- There's an unsaturated call
+ || externallyVisibleId binder
+ {- HAS FREE VARS AND IS PARALLEL WORLD -}
+
+slowFunEntryCodeRequired binder NoStgBinderInfo = True
+
+funInfoTableRequired
+ :: Id
+ -> StgBinderInfo
+ -> LambdaFormInfo
+ -> Bool
+funInfoTableRequired binder (StgBinderInfo arg_occ unsat_occ _ _ _)
+ (LFReEntrant top_level _ _)
+ = not top_level
+ || arg_occ -- There's an argument occurrence
+ || unsat_occ -- There's an unsaturated call
+ || externallyVisibleId binder
+
+funInfoTableRequired other_binder_info binder other_lf_info = True
+
+-- We need the vector-apply entry points for a function if
+-- there's a vector-apply occurrence in this module
+
+stdVapRequired, noUpdVapRequired :: StgBinderInfo -> Bool
+
+stdVapRequired binder_info
+ = case binder_info of
+ StgBinderInfo _ _ std_vap_occ _ _ -> std_vap_occ
+ _ -> False
+
+noUpdVapRequired binder_info
+ = case binder_info of
+ StgBinderInfo _ _ _ no_upd_vap_occ _ -> no_upd_vap_occ
+ _ -> False
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[ClosureInfo-misc-funs]{Misc functions about @ClosureInfo@, etc.}
+%* *
+%************************************************************************
+
+\begin{code}
+isConstantRep, isSpecRep, isStaticRep, isPhantomRep, isIntLikeRep :: SMRep -> Bool
+isConstantRep (SpecialisedRep ConstantRep _ _ _) = True
+isConstantRep other = False
+
+isSpecRep (SpecialisedRep kind _ _ _) = True -- All the kinds of Spec closures
+isSpecRep other = False -- True indicates that the _VHS is 0 !
+
+isStaticRep (StaticRep _ _) = True
+isStaticRep _ = False
+
+isPhantomRep PhantomRep = True
+isPhantomRep _ = False
+
+isIntLikeRep (SpecialisedRep IntLikeRep _ _ _) = True
+isIntLikeRep other = False
+
+isStaticClosure :: ClosureInfo -> Bool
+isStaticClosure (MkClosureInfo _ _ rep) = isStaticRep rep
+
+closureId :: ClosureInfo -> Id
+closureId (MkClosureInfo id _ _) = id
+
+closureSMRep :: ClosureInfo -> SMRep
+closureSMRep (MkClosureInfo _ _ sm_rep) = sm_rep
+
+closureLFInfo :: ClosureInfo -> LambdaFormInfo
+closureLFInfo (MkClosureInfo _ lf_info _) = lf_info
+
+closureUpdReqd :: ClosureInfo -> Bool
+
+closureUpdReqd (MkClosureInfo _ (LFThunk _ _ upd _) _) = upd
+closureUpdReqd (MkClosureInfo _ LFBlackHole _) = True
+ -- Black-hole closures are allocated to receive the results of an
+ -- alg case with a named default... so they need to be updated.
+closureUpdReqd other_closure = False
+
+closureSingleEntry :: ClosureInfo -> Bool
+
+closureSingleEntry (MkClosureInfo _ (LFThunk _ _ upd _) _) = not upd
+closureSingleEntry other_closure = False
+\end{code}
+
+Note: @closureType@ returns appropriately specialised tycon and
+datacons.
+\begin{code}
+closureType :: ClosureInfo -> Maybe (TyCon, [UniType], [Id])
+
+-- First, a turgid special case. When we are generating the
+-- standard code and info-table for Vaps (which is done when the function
+-- defn is encountered), we don't have a convenient Id to hand whose
+-- type is that of (f x y z). So we need to figure out the type
+-- rather than take it from the Id. The Id is probably just "f"!
+
+closureType (MkClosureInfo id (LFThunk _ _ _ (VapThunk fun_id args _)) _)
+ = getUniDataSpecTyCon_maybe (funResultTy de_foralld_ty (length args))
+ where
+ (_, de_foralld_ty) = splitForalls (getIdUniType fun_id)
+
+closureType (MkClosureInfo id lf _) = getUniDataSpecTyCon_maybe (getIdUniType id)
+\end{code}
+
+@closureReturnsUnboxedType@ is used to check whether a closure, {\em
+once it has eaten its arguments}, returns an unboxed type. For
+example, the closure for a function:
+\begin{verbatim}
+ f :: Int -> Int#
+\end{verbatim}
+returns an unboxed type. This is important when dealing with stack
+overflow checks.
+\begin{code}
+closureReturnsUnboxedType :: ClosureInfo -> Bool
+
+closureReturnsUnboxedType (MkClosureInfo fun_id (LFReEntrant _ arity _) _)
+ = isPrimType (funResultTy de_foralld_ty arity)
+ where
+ (_, de_foralld_ty) = splitForalls (getIdUniType fun_id)
+
+closureReturnsUnboxedType other_closure = False
+ -- All non-function closures aren't functions,
+ -- and hence are boxed, since they are heap alloc'd
+\end{code}
+
+\begin{code}
+closureSemiTag :: ClosureInfo -> Int
+
+closureSemiTag (MkClosureInfo _ lf_info _)
+ = case lf_info of
+ LFCon data_con _ -> getDataConTag data_con - fIRST_TAG
+ LFTuple _ _ -> 0
+ LFIndirection -> fromInteger iND_TAG
+ _ -> fromInteger oTHER_TAG
+\end{code}
+
+Label generation.
+
+\begin{code}
+infoTableLabelFromCI :: ClosureInfo -> CLabel
+
+infoTableLabelFromCI (MkClosureInfo id lf_info rep)
+ = case lf_info of
+ LFCon con _ -> mkConInfoPtr con rep
+ LFTuple tup _ -> mkConInfoPtr tup rep
+
+ LFBlackHole -> mkBlackHoleInfoTableLabel
+
+ LFThunk _ _ upd_flag (VapThunk fun_id args _) -> mkVapInfoTableLabel fun_id upd_flag
+ -- Use the standard vap info table
+ -- for the function, rather than a one-off one
+ -- for this particular closure
+
+{- For now, we generate individual info table and entry code for selector thunks,
+ so their info table should be labelled in the standard way.
+ The only special thing about them is that the info table has a field which
+ tells the GC that it really is a selector.
+
+ Later, perhaps, we'll have some standard RTS code for selector-thunk info tables,
+ in which case this line will spring back to life.
+
+ LFThunk _ _ upd_flag (SelectorThunk _ _ offset) -> mkSelectorInfoTableLabel upd_flag offset
+ -- Ditto for selectors
+-}
+
+ other -> if isStaticRep rep
+ then mkStaticInfoTableLabel id
+ else mkInfoTableLabel id
+
+mkConInfoPtr :: Id -> SMRep -> CLabel
+mkConInfoPtr id rep =
+ case rep of
+ PhantomRep -> mkPhantomInfoTableLabel id
+ StaticRep _ _ -> mkStaticInfoTableLabel id
+ _ -> mkInfoTableLabel id
+
+mkConEntryPtr :: Id -> SMRep -> CLabel
+mkConEntryPtr id rep =
+ case rep of
+ StaticRep _ _ -> mkStaticConEntryLabel id
+ _ -> mkConEntryLabel id
+
+
+closureLabelFromCI (MkClosureInfo id _ _) = mkClosureLabel id
+
+entryLabelFromCI :: ClosureInfo -> CLabel
+entryLabelFromCI (MkClosureInfo id lf_info rep)
+ = case lf_info of
+ LFThunk _ _ upd_flag std_form_info -> thunkEntryLabel id std_form_info upd_flag
+ LFCon con _ -> mkConEntryPtr con rep
+ LFTuple tup _ -> mkConEntryPtr tup rep
+ other -> mkStdEntryLabel id
+
+-- thunkEntryLabel is a local help function, not exported. It's used from both
+-- entryLabelFromCI and getEntryConvention.
+-- I don't think it needs to deal with the SelectorThunk case
+-- Well, it's falling over now, so I've made it deal with it. (JSM)
+
+thunkEntryLabel thunk_id (VapThunk fun_id args _) is_updatable
+ = mkVapEntryLabel fun_id is_updatable
+thunkEntryLabel thunk_id _ is_updatable
+ = mkStdEntryLabel thunk_id
+
+fastLabelFromCI :: ClosureInfo -> CLabel
+fastLabelFromCI (MkClosureInfo id _ _) = mkFastEntryLabel id fun_arity
+ where
+ arity_maybe = arityMaybe (getIdArity id)
+ fun_arity = case arity_maybe of
+ Just x -> x
+ _ -> pprPanic "fastLabelFromCI:no arity:" (ppr PprShowAll id)
+\end{code}
+
+\begin{code}
+allocProfilingMsg :: ClosureInfo -> FAST_STRING
+
+allocProfilingMsg (MkClosureInfo _ lf_info _)
+ = case lf_info of
+ LFReEntrant _ _ _ -> SLIT("ALLOC_FUN")
+ LFCon _ _ -> SLIT("ALLOC_CON")
+ LFTuple _ _ -> SLIT("ALLOC_CON")
+ LFThunk _ _ _ _ -> SLIT("ALLOC_THK")
+ LFBlackHole -> SLIT("ALLOC_BH")
+ LFIndirection -> panic "ALLOC_IND"
+ LFImported -> panic "ALLOC_IMP"
+\end{code}
+
+We need a black-hole closure info to pass to @allocDynClosure@
+when we want to allocate the black hole on entry to a CAF.
+
+\begin{code}
+blackHoleClosureInfo (MkClosureInfo id _ _) = MkClosureInfo id LFBlackHole BlackHoleRep
+\end{code}
+
+The register liveness when returning from a constructor. For simplicity,
+we claim just [node] is live for all but PhantomRep's. In truth, this means
+that non-constructor info tables also claim node, but since their liveness
+information is never used, we don't care.
+
+\begin{code}
+
+dataConLiveness (MkClosureInfo con _ PhantomRep)
+ = case dataReturnConvAlg con of
+ ReturnInRegs regs -> mkLiveRegsBitMask regs
+ ReturnInHeap -> panic "dataConLiveness:PhantomRep in heap???"
+
+dataConLiveness _ = mkLiveRegsBitMask [node]
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[ClosureInfo-Profiling-funs]{Misc functions about for profiling info.}
+%* *
+%************************************************************************
+
+Profiling requires three pices of information to be determined for
+each closure's info table --- kind, description and type.
+
+The description is stored directly in the @CClosureInfoTable@ when the
+info table is built.
+
+The kind is determined from the @LambdaForm@ stored in the closure
+info using @closureKind@.
+
+The type is determined from the type information stored with the @Id@
+in the closure info using @closureTypeDescr@.
+
+\begin{code}
+closureKind :: ClosureInfo -> String
+
+closureKind (MkClosureInfo _ lf _)
+ = case lf of
+ LFReEntrant _ n _ -> if n > 0 then "FN_K" else "THK_K"
+ LFCon _ _ -> "CON_K"
+ LFTuple _ _ -> "CON_K"
+ LFThunk _ _ _ _ -> "THK_K"
+ LFBlackHole -> "THK_K" -- consider BHs as thunks for the moment... (ToDo?)
+ LFIndirection -> panic "IND_KIND"
+ LFImported -> panic "IMP_KIND"
+
+closureTypeDescr :: ClosureInfo -> String
+closureTypeDescr (MkClosureInfo id lf _)
+ = if (isDataCon id) then -- DataCon has function types
+ _UNPK_ (getOccurrenceName (getDataConTyCon id)) -- We want the TyCon not the ->
+ else
+ getUniTyDescription (getIdUniType id)
+\end{code}
+
diff --git a/ghc/compiler/codeGen/CodeGen.hi b/ghc/compiler/codeGen/CodeGen.hi
new file mode 100644
index 0000000000..28362e74a3
--- /dev/null
+++ b/ghc/compiler/codeGen/CodeGen.hi
@@ -0,0 +1,27 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface CodeGen where
+import AbsCSyn(AbstractC, CAddrMode, CStmtMacro, MagicId, RegRelative, ReturnInfo)
+import BasicLit(BasicLit)
+import CLabelInfo(CLabel)
+import ClosureInfo(ClosureInfo)
+import CmdLineOpts(GlobalSwitch, SwitchResult)
+import CostCentre(CostCentre)
+import FiniteMap(FiniteMap)
+import Id(Id, IdDetails)
+import IdInfo(IdInfo)
+import Maybes(Labda)
+import PreludePS(_PackedString)
+import PrimOps(PrimOp)
+import StgSyn(StgBinding, StgRhs)
+import TyCon(TyCon)
+import UniType(UniType)
+import UniqFM(UniqFM)
+import Unique(Unique)
+data AbstractC {-# GHC_PRAGMA AbsCNop | AbsCStmts AbstractC AbstractC | CAssign CAddrMode CAddrMode | CJump CAddrMode | CFallThrough CAddrMode | CReturn CAddrMode ReturnInfo | CSwitch CAddrMode [(BasicLit, AbstractC)] AbstractC | CCodeBlock CLabel AbstractC | CInitHdr ClosureInfo RegRelative CAddrMode Bool | COpStmt [CAddrMode] PrimOp [CAddrMode] Int [MagicId] | CSimultaneous AbstractC | CMacroStmt CStmtMacro [CAddrMode] | CCallProfCtrMacro _PackedString [CAddrMode] | CCallProfCCMacro _PackedString [CAddrMode] | CStaticClosure CLabel ClosureInfo CAddrMode [CAddrMode] | CClosureInfoAndCode ClosureInfo AbstractC (Labda AbstractC) CAddrMode [Char] | CRetVector CLabel [Labda CAddrMode] AbstractC | CRetUnVector CLabel CAddrMode | CFlatRetVector CLabel [CAddrMode] | CCostCentreDecl Bool CostCentre | CClosureUpdInfo AbstractC | CSplitMarker #-}
+data FiniteMap a b {-# GHC_PRAGMA EmptyFM | Branch a b Int# (FiniteMap a b) (FiniteMap a b) #-}
+data Id {-# GHC_PRAGMA Id Unique UniType IdInfo IdDetails #-}
+data StgBinding a b {-# GHC_PRAGMA StgNonRec a (StgRhs a b) | StgRec [(a, StgRhs a b)] #-}
+data UniqFM a {-# GHC_PRAGMA EmptyUFM | LeafUFM Int# a | NodeUFM Int# Int# (UniqFM a) (UniqFM a) #-}
+codeGen :: _PackedString -> ([CostCentre], [CostCentre]) -> [_PackedString] -> (GlobalSwitch -> SwitchResult) -> [TyCon] -> FiniteMap TyCon [[Labda UniType]] -> [StgBinding Id Id] -> AbstractC
+ {-# GHC_PRAGMA _A_ 7 _U_ 2112112 _N_ _S_ "LU(LL)LSLLL" _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/CodeGen.lhs b/ghc/compiler/codeGen/CodeGen.lhs
new file mode 100644
index 0000000000..a1aa854e7e
--- /dev/null
+++ b/ghc/compiler/codeGen/CodeGen.lhs
@@ -0,0 +1,177 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[CodeGen]{@CodeGen@: main module of the code generator}
+
+This module says how things get going at the top level.
+
+@codeGen@ is the interface to the outside world. The \tr{cgTop*}
+functions drive the mangling of top-level bindings.
+
+%************************************************************************
+%* *
+\subsection[codeGen-outside-interface]{The code generator's offering to the world}
+%* *
+%************************************************************************
+
+\begin{code}
+#include "HsVersions.h"
+
+module CodeGen (
+ codeGen,
+
+ -- and to make the interface self-sufficient...
+ UniqFM, AbstractC, StgBinding, Id, FiniteMap
+ ) where
+
+
+import StgSyn
+import CgMonad
+import AbsCSyn
+
+import CLabelInfo ( modnameToC )
+import CgClosure ( cgTopRhsClosure )
+import CgCon ( cgTopRhsCon )
+import CgConTbls ( genStaticConBits, TCE(..), UniqFM )
+import ClosureInfo ( LambdaFormInfo, mkClosureLFInfo )
+import CmdLineOpts ( GlobalSwitch(..), switchIsOn, stringSwitchSet, SwitchResult )
+import FiniteMap ( FiniteMap )
+import Maybes ( Maybe(..) )
+import PrimKind ( getKindSize )
+import Util
+\end{code}
+
+\begin{code}
+codeGen :: FAST_STRING -- module name
+ -> ([CostCentre], -- local cost-centres needing declaring/registering
+ [CostCentre]) -- "extern" cost-centres needing declaring
+ -> [FAST_STRING] -- import names
+ -> (GlobalSwitch -> SwitchResult)
+ -- global switch lookup function
+ -> [TyCon] -- tycons with data constructors to convert
+ -> FiniteMap TyCon [[Maybe UniType]]
+ -- tycon specialisation info
+ -> PlainStgProgram -- bindings to convert
+ -> AbstractC -- output
+
+codeGen mod_name (local_CCs, extern_CCs) import_names sw_lookup_fn gen_tycons tycon_specs stg_pgm
+ = let
+ switch_is_on = switchIsOn sw_lookup_fn
+ doing_profiling = switch_is_on SccProfilingOn
+ compiling_prelude = switch_is_on CompilingPrelude
+ splitting = switch_is_on (EnsureSplittableC (panic "codeGen:esc"))
+ in
+ if not doing_profiling then
+ let
+ cinfo = MkCompInfo switch_is_on mod_name
+ in
+ mkAbstractCs [
+ genStaticConBits cinfo gen_tycons tycon_specs,
+ initC cinfo (cgTopBindings splitting stg_pgm) ]
+
+ else -- yes, cost-centre profiling:
+ -- Besides the usual stuff, we must produce:
+ --
+ -- * Declarations for the cost-centres defined in this module;
+ -- * Code to participate in "registering" all the cost-centres
+ -- in the program (done at startup time when the pgm is run).
+ --
+ -- (The local cost-centres involved in this are passed
+ -- into the code-generator, as are the imported-modules' names.)
+ --
+ -- Note: we don't register/etc if compiling Prelude bits.
+ let
+ cinfo = MkCompInfo switch_is_on mod_name
+ in
+ mkAbstractCs [
+ if compiling_prelude
+ then AbsCNop
+ else mkAbstractCs [mkAbstractCs (map (CCostCentreDecl True) local_CCs),
+ mkAbstractCs (map (CCostCentreDecl False) extern_CCs),
+ mkCcRegister local_CCs import_names],
+
+ genStaticConBits cinfo gen_tycons tycon_specs,
+ initC cinfo (cgTopBindings splitting stg_pgm) ]
+ where
+ -----------------
+ grp_name = case (stringSwitchSet sw_lookup_fn SccGroup) of
+ Just xx -> _PK_ xx
+ Nothing -> mod_name -- default: module name
+
+ -----------------
+ mkCcRegister ccs import_names
+ = let
+ register_ccs = mkAbstractCs (map mk_register ccs)
+ register_imports = mkAbstractCs (map mk_import_register import_names)
+ in
+ mkAbstractCs [
+ CCallProfCCMacro SLIT("START_REGISTER_CCS") [CLitLit (modnameToC (SLIT("_reg") _APPEND_ mod_name)) AddrKind],
+ register_ccs,
+ register_imports,
+ CCallProfCCMacro SLIT("END_REGISTER_CCS") []
+ ]
+ where
+ mk_register cc
+ = CCallProfCCMacro SLIT("REGISTER_CC") [mkCCostCentre cc]
+
+ mk_import_register import_name
+ = CCallProfCCMacro SLIT("REGISTER_IMPORT") [CLitLit (modnameToC (SLIT("_reg") _APPEND_ import_name)) AddrKind]
+\end{code}
+
+%************************************************************************
+%* *
+\subsection[codegen-top-bindings]{Converting top-level STG bindings}
+%* *
+%************************************************************************
+
+@cgTopBindings@ is only used for top-level bindings, since they need
+to be allocated statically (not in the heap) and need to be labelled.
+No unboxed bindings can happen at top level.
+
+In the code below, the static bindings are accumulated in the
+@MkCgState@, and transferred into the ``statics'' slot by @forkStatics@.
+This is so that we can write the top level processing in a compositional
+style, with the increasing static environment being plumbed as a state
+variable.
+
+\begin{code}
+cgTopBindings :: Bool -> PlainStgProgram -> Code
+
+cgTopBindings splitting bindings = mapCs (cgTopBinding splitting) bindings
+
+cgTopBinding :: Bool -> PlainStgBinding -> Code
+
+cgTopBinding splitting (StgNonRec name rhs)
+ = absC maybe_split `thenC`
+ cgTopRhs name rhs `thenFC` \ (name, info) ->
+ addBindC name info
+ where
+ maybe_split = if splitting then CSplitMarker else AbsCNop
+
+cgTopBinding splitting (StgRec pairs)
+ = absC maybe_split `thenC`
+ fixC (\ new_binds -> addBindsC new_binds `thenC`
+ mapFCs ( \ (b,e) -> cgTopRhs b e ) pairs
+ ) `thenFC` \ new_binds ->
+ addBindsC new_binds
+ where
+ maybe_split = if splitting then CSplitMarker else AbsCNop
+
+-- Urgh! I tried moving the forkStatics call from the rhss of cgTopRhs
+-- to enclose the listFCs in cgTopBinding, but that tickled the
+-- statics "error" call in initC. I DON'T UNDERSTAND WHY!
+
+cgTopRhs :: Id -> PlainStgRhs -> FCode (Id, CgIdInfo)
+ -- the Id is passed along for setting up a binding...
+
+cgTopRhs name (StgRhsCon cc con args)
+ = forkStatics (cgTopRhsCon name con args (all zero_size args))
+ where
+ zero_size atom = getKindSize (getAtomKind atom) == 0
+
+cgTopRhs name (StgRhsClosure cc bi fvs upd_flag args body)
+ = ASSERT(null fvs) -- There should be no free variables
+ forkStatics (cgTopRhsClosure name cc bi args body lf_info)
+ where
+ lf_info = mkClosureLFInfo True{-top level-} [{-no fvs-}] upd_flag args body
+\end{code}
diff --git a/ghc/compiler/codeGen/Jmakefile b/ghc/compiler/codeGen/Jmakefile
new file mode 100644
index 0000000000..03e6c14122
--- /dev/null
+++ b/ghc/compiler/codeGen/Jmakefile
@@ -0,0 +1,19 @@
+/* this is a standalone Jmakefile; NOT part of ghc "make world" */
+
+LitStuffNeededHere(docs depend)
+InfoStuffNeededHere(docs)
+HaskellSuffixRules()
+
+LitSuffixRule(.lit,/*none*/) /* no language really */
+LitSuffixRule(.lhs,.hs) /* Haskell */
+LitSuffixRule(.lhc,.hc) /* Haskell assembler (C) */
+LitSuffixRule(.lprl,.prl) /* Perl */
+LitSuffixRule(.lsh,.sh) /* Bourne shell */
+LitSuffixRule(.lc,.c) /* C */
+LitSuffixRule(.lh,.h)
+LitSuffixRule(.llex,.lex) /* Lex */
+LitSuffixRule(.lflex,.flex) /* Flex */
+
+LIT2LATEX_OPTS=-ttgrind
+
+LitDocRootTargetWithNamedOutput(codegen,lit,codegen-standalone)
diff --git a/ghc/compiler/codeGen/SMRep.hi b/ghc/compiler/codeGen/SMRep.hi
new file mode 100644
index 0000000000..bad95d40e3
--- /dev/null
+++ b/ghc/compiler/codeGen/SMRep.hi
@@ -0,0 +1,37 @@
+{-# GHC_PRAGMA INTERFACE VERSION 5 #-}
+interface SMRep where
+import Outputable(Outputable)
+data SMRep = StaticRep Int Int | SpecialisedRep SMSpecRepKind Int Int SMUpdateKind | GenericRep Int Int SMUpdateKind | BigTupleRep Int | DataRep Int | DynamicRep | BlackHoleRep | PhantomRep | MuTupleRep Int
+data SMSpecRepKind = SpecRep | ConstantRep | CharLikeRep | IntLikeRep
+data SMUpdateKind = SMNormalForm | SMSingleEntry | SMUpdatable
+getSMInfoStr :: SMRep -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+getSMInitHdrStr :: SMRep -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+getSMUpdInplaceHdrStr :: SMRep -> [Char]
+ {-# GHC_PRAGMA _A_ 1 _U_ 1 _N_ _S_ "S" _N_ _N_ #-}
+ltSMRepHdr :: SMRep -> SMRep -> Bool
+ {-# GHC_PRAGMA _A_ 2 _U_ 11 _N_ _S_ "SS" _N_ _N_ #-}
+instance Eq SMRep
+ {-# GHC_PRAGMA _M_ SMRep {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 3 _!_ _TUP_2 [(SMRep -> SMRep -> Bool), (SMRep -> SMRep -> Bool)] [_CONSTM_ Eq (==) (SMRep), _CONSTM_ Eq (/=) (SMRep)] _N_
+ (==) = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ (/=) = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+instance Ord SMRep
+ {-# GHC_PRAGMA _M_ SMRep {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 9 _!_ _TUP_8 [{{Eq SMRep}}, (SMRep -> SMRep -> Bool), (SMRep -> SMRep -> Bool), (SMRep -> SMRep -> Bool), (SMRep -> SMRep -> Bool), (SMRep -> SMRep -> SMRep), (SMRep -> SMRep -> SMRep), (SMRep -> SMRep -> _CMP_TAG)] [_DFUN_ Eq (SMRep), _CONSTM_ Ord (<) (SMRep), _CONSTM_ Ord (<=) (SMRep), _CONSTM_ Ord (>=) (SMRep), _CONSTM_ Ord (>) (SMRep), _CONSTM_ Ord max (SMRep), _CONSTM_ Ord min (SMRep), _CONSTM_ Ord _tagCmp (SMRep)] _N_
+ (<) = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ (<=) = _A_ 2 _U_ 22 _N_ _S_ "SS" _N_ _N_,
+ (>=) = _A_ 2 _U_ 22 _N_ _S_ "SS" _F_ _IF_ARGS_ 0 2 XX 3 \ (u0 :: SMRep) (u1 :: SMRep) -> _APP_ _CONSTM_ Ord (<=) (SMRep) [ u1, u0 ] _N_,
+ (>) = _A_ 2 _U_ 22 _N_ _S_ "SS" _F_ _IF_ARGS_ 0 2 XX 3 \ (u0 :: SMRep) (u1 :: SMRep) -> _APP_ _CONSTM_ Ord (<) (SMRep) [ u1, u0 ] _N_,
+ max = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ min = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_,
+ _tagCmp = _A_ 2 _U_ 22 _N_ _N_ _N_ _N_ #-}
+instance Outputable SMRep
+ {-# GHC_PRAGMA _M_ SMRep {-dfun-} _A_ 2 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 1 _CONSTM_ Outputable ppr (SMRep) _N_
+ ppr = _A_ 2 _U_ 0220 _N_ _S_ "AL" {_A_ 1 _U_ 220 _N_ _N_ _N_ _N_} _N_ _N_ #-}
+instance Text SMRep
+ {-# GHC_PRAGMA _M_ SMRep {-dfun-} _A_ 0 _N_ _N_ _N_ _F_ _IF_ARGS_ 0 0 X 5 _!_ _TUP_4 [(Int -> [Char] -> [(SMRep, [Char])]), (Int -> SMRep -> [Char] -> [Char]), ([Char] -> [([SMRep], [Char])]), ([SMRep] -> [Char] -> [Char])] [_CONSTM_ Text readsPrec (SMRep), _CONSTM_ Text showsPrec (SMRep), _CONSTM_ Text readList (SMRep), _CONSTM_ Text showList (SMRep)] _N_
+ readsPrec = _A_ 2 _U_ 22 _N_ _S_ _!_ _F_ _IF_ARGS_ 0 2 XX 4 \ (u0 :: Int) (u1 :: [Char]) -> _APP_ _TYAPP_ patError# { (Int -> [Char] -> [(SMRep, [Char])]) } [ _NOREP_S_ "%DPreludeCore.Text.readsPrec\"", u0, u1 ] _N_,
+ showsPrec = _A_ 3 _U_ 012 _N_ _S_ "ASL" {_A_ 2 _U_ 12 _N_ _N_ _N_ _N_} _N_ _N_,
+ readList = _A_ 0 _U_ 2 _N_ _N_ _N_ _N_,
+ showList = _A_ 0 _U_ 12 _N_ _N_ _N_ _N_ #-}
+
diff --git a/ghc/compiler/codeGen/SMRep.lhs b/ghc/compiler/codeGen/SMRep.lhs
new file mode 100644
index 0000000000..fb5b113c2c
--- /dev/null
+++ b/ghc/compiler/codeGen/SMRep.lhs
@@ -0,0 +1,208 @@
+%
+% (c) The GRASP/AQUA Project, Glasgow University, 1992-1995
+%
+\section[SMRep]{Storage manager representations of closure}
+
+This is here, rather than in ClosureInfo, just to keep nhc happy.
+Other modules should access this info through ClosureInfo.
+
+\begin{code}
+#include "HsVersions.h"
+
+module SMRep (
+ SMRep(..), SMSpecRepKind(..), SMUpdateKind(..),
+ getSMInfoStr, getSMInitHdrStr, getSMUpdInplaceHdrStr,
+ ltSMRepHdr -- UNUSED, equivSMRepHdr
+ ) where
+
+import Outputable
+import Pretty
+import Util
+\end{code}
+
+%************************************************************************
+%* *
+\subsubsection[SMRep-datatype]{@SMRep@---storage manager representation}
+%* *
+%************************************************************************
+
+Ways in which a closure may be represented by the storage manager;
+this list slavishly follows the storage-manager interface document.
+
+\begin{code}
+data SMSpecRepKind
+ = SpecRep -- Normal Spec representation
+
+ | ConstantRep -- Common me up with single global copy
+ -- Used for nullary constructors
+
+ | CharLikeRep -- Common me up with entry from global table
+
+ | IntLikeRep -- Common me up with entry from global table,
+ -- if the intlike field is in range.
+
+data SMUpdateKind
+ = SMNormalForm -- Normal form, no update
+ | SMSingleEntry -- Single entry thunk, non-updatable
+ | SMUpdatable -- Shared thunk, updatable
+
+data SMRep
+ = StaticRep -- Don't move me, Oh garbage collector!
+ -- Used for all statically-allocated closures.
+ Int -- # ptr words (useful for interpreter, debugger, etc)
+ Int -- # non-ptr words
+
+ | SpecialisedRep -- GC routines know size etc
+ -- All have same _HS = SPEC_HS and no _VHS
+ SMSpecRepKind -- Which kind of specialised representation
+ Int -- # ptr words
+ Int -- # non-ptr words
+ SMUpdateKind -- Updatable?
+
+ | GenericRep -- GC routines consult sizes in info tbl
+ Int -- # ptr words
+ Int -- # non-ptr words
+ SMUpdateKind -- Updatable?
+
+ | BigTupleRep -- All ptrs, size in var-hdr field
+ -- Used for big tuples
+ Int -- # ptr words
+
+ | DataRep -- All non-ptrs, size in var-hdr field
+ -- Used for arbitrary-precision integers, strings
+ Int -- # non-ptr words
+
+ | DynamicRep -- Size and # ptrs in var-hdr field
+ -- Used by RTS for partial applications
+
+ | BlackHoleRep -- for black hole closures
+
+ | PhantomRep -- for "phantom" closures that only exist in registers
+
+ | MuTupleRep -- All ptrs, size in var-hdr field
+ -- Used for mutable tuples
+ Int -- # ptr words
+
+instance Eq SMRep where
+ (SpecialisedRep k1 a1 b1 _) == (SpecialisedRep k2 a2 b2 _) = (tagOf_SMSpecRepKind k1) _EQ_ (tagOf_SMSpecRepKind k2)
+ && a1 == a2 && b1 == b2
+ (GenericRep a1 b1 _) == (GenericRep a2 b2 _) = a1 == a2 && b1 == b2
+ (BigTupleRep a1) == (BigTupleRep a2) = a1 == a2
+ (MuTupleRep a1) == (MuTupleRep a2) = a1 == a2
+ (DataRep a1) == (DataRep a2) = a1 == a2
+ a == b = (tagOf_SMRep a) _EQ_ (tagOf_SMRep b)
+
+{- UNUSED:
+equivSMRepHdr :: SMRep -> SMRep -> Bool
+a `equivSMRepHdr` b = (tagOf_SMRep a) _EQ_ (tagOf_SMRep b)
+-}
+
+ltSMRepHdr :: SMRep -> SMRep -> Bool
+a `ltSMRepHdr` b = (tagOf_SMRep a) _LT_ (tagOf_SMRep b)
+
+instance Ord SMRep where
+ -- ToDo: cmp-ify? This instance seems a bit weird (WDP 94/10)
+ rep1 <= rep2 = rep1 < rep2 || rep1 == rep2
+ rep1 < rep2
+ = let tag1 = tagOf_SMRep rep1
+ tag2 = tagOf_SMRep rep2
+ in
+ if tag1 _LT_ tag2 then True
+ else if tag1 _GT_ tag2 then False
+ else {- tags equal -} rep1 `lt` rep2
+ where
+ (SpecialisedRep k1 a1 b1 _) `lt` (SpecialisedRep k2 a2 b2 _) =
+ t1 _LT_ t2 || (t1 _EQ_ t2 && (a1 < a2 || (a1 == a2 && b1 < b2)))
+ where t1 = tagOf_SMSpecRepKind k1
+ t2 = tagOf_SMSpecRepKind k2
+ (GenericRep a1 b1 _) `lt` (GenericRep a2 b2 _) = a1 < a2 || (a1 == a2 && b1 < b2)
+ (BigTupleRep a1) `lt` (BigTupleRep a2) = a1 < a2
+ (MuTupleRep a1) `lt` (MuTupleRep a2) = a1 < a2
+ (DataRep a1) `lt` (DataRep a2) = a1 < a2
+ a `lt` b = True
+
+tagOf_SMSpecRepKind SpecRep = (ILIT(1) :: FAST_INT)
+tagOf_SMSpecRepKind ConstantRep = ILIT(2)
+tagOf_SMSpecRepKind CharLikeRep = ILIT(3)
+tagOf_SMSpecRepKind IntLikeRep = ILIT(4)
+
+tagOf_SMRep (StaticRep _ _) = (ILIT(1) :: FAST_INT)
+tagOf_SMRep (SpecialisedRep k _ _ _) = ILIT(2)
+tagOf_SMRep (GenericRep _ _ _) = ILIT(3)
+tagOf_SMRep (BigTupleRep _) = ILIT(4)
+tagOf_SMRep (DataRep _) = ILIT(5)
+tagOf_SMRep DynamicRep = ILIT(6)
+tagOf_SMRep BlackHoleRep = ILIT(7)
+tagOf_SMRep PhantomRep = ILIT(8)
+tagOf_SMRep (MuTupleRep _) = ILIT(9)
+
+instance Text SMRep where
+ showsPrec d rep rest
+ = (case rep of
+ StaticRep _ _ -> "STATIC"
+ SpecialisedRep kind _ _ SMNormalForm -> "SPEC_N"
+ SpecialisedRep kind _ _ SMSingleEntry -> "SPEC_S"
+ SpecialisedRep kind _ _ SMUpdatable -> "SPEC_U"
+ GenericRep _ _ SMNormalForm -> "GEN_N"
+ GenericRep _ _ SMSingleEntry -> "GEN_S"
+ GenericRep _ _ SMUpdatable -> "GEN_U"
+ BigTupleRep _ -> "TUPLE"
+ DataRep _ -> "DATA"
+ DynamicRep -> "DYN"
+ BlackHoleRep -> "BH"
+ PhantomRep -> "INREGS"
+ MuTupleRep _ -> "MUTUPLE") ++ rest
+
+instance Outputable SMRep where
+ ppr sty rep = ppStr (show rep)
+
+getSMInfoStr :: SMRep -> String
+getSMInfoStr (StaticRep _ _) = "STATIC"
+getSMInfoStr (SpecialisedRep ConstantRep _ _ _) = "CONST"
+getSMInfoStr (SpecialisedRep CharLikeRep _ _ _) = "CHARLIKE"
+getSMInfoStr (SpecialisedRep IntLikeRep _ _ _) = "INTLIKE"
+getSMInfoStr (SpecialisedRep SpecRep _ _ SMNormalForm) = "SPEC_N"
+getSMInfoStr (SpecialisedRep SpecRep _ _ SMSingleEntry) = "SPEC_S"
+getSMInfoStr (SpecialisedRep SpecRep _ _ SMUpdatable) = "SPEC_U"
+getSMInfoStr (GenericRep _ _ SMNormalForm) = "GEN_N"
+getSMInfoStr (GenericRep _ _ SMSingleEntry) = "GEN_S"
+getSMInfoStr (GenericRep _ _ SMUpdatable) = "GEN_U"
+getSMInfoStr (BigTupleRep _) = "TUPLE"
+getSMInfoStr (DataRep _ ) = "DATA"
+getSMInfoStr DynamicRep = "DYN"
+getSMInfoStr BlackHoleRep = panic "getSMInfoStr.BlackHole"
+getSMInfoStr PhantomRep = "INREGS"
+getSMInfoStr (MuTupleRep _) = "MUTUPLE"
+
+getSMInitHdrStr :: SMRep -> String
+getSMInitHdrStr (SpecialisedRep IntLikeRep _ _ _) = "SET_INTLIKE"
+getSMInitHdrStr (SpecialisedRep SpecRep _ _ _) = "SET_SPEC"
+getSMInitHdrStr (GenericRep _ _ _) = "SET_GEN"
+getSMInitHdrStr (BigTupleRep _) = "SET_TUPLE"
+getSMInitHdrStr (DataRep _ ) = "SET_DATA"
+getSMInitHdrStr DynamicRep = "SET_DYN"
+getSMInitHdrStr BlackHoleRep = "SET_BH"
+#ifdef DEBUG
+getSMInitHdrStr (StaticRep _ _) = panic "getSMInitHdrStr.Static"
+getSMInitHdrStr PhantomRep = panic "getSMInitHdrStr.Phantom"
+getSMInitHdrStr (MuTupleRep _) = panic "getSMInitHdrStr.Mutuple"
+getSMInitHdrStr (SpecialisedRep ConstantRep _ _ _) = panic "getSMInitHdrStr.Constant"
+getSMInitHdrStr (SpecialisedRep CharLikeRep _ _ _) = panic "getSMInitHdrStr.CharLike"
+#endif
+
+getSMUpdInplaceHdrStr :: SMRep -> String
+getSMUpdInplaceHdrStr (SpecialisedRep ConstantRep _ _ _) = "INPLACE_UPD"
+getSMUpdInplaceHdrStr (SpecialisedRep CharLikeRep _ _ _) = "INPLACE_UPD"
+getSMUpdInplaceHdrStr (SpecialisedRep IntLikeRep _ _ _) = "INPLACE_UPD"
+getSMUpdInplaceHdrStr (SpecialisedRep SpecRep _ _ _) = "INPLACE_UPD"
+#ifdef DEBUG
+getSMUpdInplaceHdrStr (StaticRep _ _) = panic "getSMUpdInplaceHdrStr.Static"
+getSMUpdInplaceHdrStr (GenericRep _ _ _) = panic "getSMUpdInplaceHdrStr.Generic"
+getSMUpdInplaceHdrStr (BigTupleRep _ ) = panic "getSMUpdInplaceHdrStr.BigTuple"
+getSMUpdInplaceHdrStr (DataRep _ ) = panic "getSMUpdInplaceHdrStr.Data"
+getSMUpdInplaceHdrStr DynamicRep = panic "getSMUpdInplaceHdrStr.Dynamic"
+getSMUpdInplaceHdrStr BlackHoleRep = panic "getSMUpdInplaceHdrStr.BlackHole"
+getSMUpdInplaceHdrStr PhantomRep = panic "getSMUpdInplaceHdrStr.Phantom"
+getSMUpdInplaceHdrStr (MuTupleRep _ ) = panic "getSMUpdInplaceHdrStr.MuTuple"
+#endif
+\end{code}
diff --git a/ghc/compiler/codeGen/cgintro.lit b/ghc/compiler/codeGen/cgintro.lit
new file mode 100644
index 0000000000..4df253e4bc
--- /dev/null
+++ b/ghc/compiler/codeGen/cgintro.lit
@@ -0,0 +1,783 @@
+\section[codegen-intro]{Intro/background info for the code generator}
+
+\tr{NOTES.codeGen} LIVES!!!
+
+\begin{verbatim}
+=======================
+NEW! 10 Nov 93 Semi-tagging
+
+Rough idea
+
+ case x of -- NB just a variable scrutinised
+ [] -> ...
+ (p:ps) -> ...p... -- eg. ps not used
+
+generates
+
+ Node = a ptr to x
+ while TRUE do { switch TAG(Node) {
+
+ INDIRECTION_TAG : Node = Node[1]; break; -- Dereference indirection
+
+ OTHER_TAG : adjust stack; push return address; ENTER(Node)
+
+ 0 : adjust stack;
+ JUMP( Nil_case )
+
+ 1 : adjust stack;
+ R2 := Node[2] -- Get ps
+ JUMP( Cons_case )
+ }
+
+* The "return address" is a vector table, which contains pointers to
+ Nil_case and Cons_case.
+
+* The "adjust stack" in the case of OTHER_TAG is one word different to
+ that in the case of a constructor tag (0,1,...), because it needs to
+ take account of the return address. That's why the stack adjust
+ shows up in the branches, rather than before the switch.
+
+* In the case of *unvectored* returns, the "return address" will be
+ some code which switches on TagReg. Currently, the branches of the
+ case at the return address have the code for the alternatives
+ actually there:
+
+ switch TagReg {
+ 0 : code for nil case
+ 1 : code for cons case
+ }
+
+But with semi-tagging, we'll have to label each branch:
+
+ switch TagReg {
+ 0 : JUMP( Nil_case )
+ 1 : JUMP( Cons_case )
+ }
+
+So there's an extra jump. Boring. Boring. (But things are usually
+eval'd...in which case we save a jump.)
+
+* TAG is a macro which gets a "tag" from the info table. The tag
+ encodes whether the thing is (a) an indirection, (b) evaluated
+ constructor with tag N, or (c) something else. The "something else"
+ usually indicates something unevaluated, but it might also include
+ FETCH_MEs etc. Anything which must be entered.
+
+* Maybe we should get the info ptr out of Node, into a temporary
+ InfoPtrReg, so that TAG and ENTER share the info-ptr fetch.
+
+* We only load registers which are live in the alternatives. So at
+ the start of an alternative, either the unused fields *will* be in
+ regs (if we came via enter/return) or they *won't* (if we came via
+ the semi-tagging switch). If they aren't, GC had better not follow
+ them. So we can't arrange that all live ptrs are neatly lined up in
+ the first N regs any more. So GC has to take a liveness
+ bit-pattern, not just a "number of live regs" number.
+
+* We need to know which of the constructors fields are live in the
+ alternatives. Hence STG code has to be elaborated to keep live vars
+ for each alternative, or to tag each bound-var in the alternatives
+ with whether or not it is used.
+
+* The code generator needs to be able to construct unique labels for
+ the case alternatives. (Previously this was done by the AbsC
+ flattening pass.) Reason: we now have an explicit join point at the
+ start of each alternative.
+
+* There's some question about how tags are mapped. Is 0 the first
+ tag? (Good when switching on TagReg when there are only two
+ constructors.) What is OTHER_TAG and INDIRECTION_TAG?
+
+* This whole deal can be freely mixed with un-semi-tagged code.
+ There should be a compiler flag to control it.
+
+=======================
+Many of the details herein are moldy and dubious, but the general
+principles are still mostly sound.
+\end{verbatim}
+
+%************************************************************************
+%* *
+\subsection{LIST OF OPTIMISATIONS TO DO}
+%* *
+%************************************************************************
+
+\begin{itemize}
+\item
+Register return conventions.
+
+\item
+Optimisations for Enter when
+ \begin{itemize}
+ \item
+ know code ptr, so don't indirect via Node
+ \item
+ know how many args
+ \item
+ top level closures don't load Node
+ \end{itemize}
+\item
+Strings.
+
+\item
+Case of unboxed op with more than one alternative, should generate
+a switch or an if statement.
+\end{itemize}
+
+{\em Medium}
+
+\begin{itemize}
+\item
+Don't allocate constructors with no args.
+Instead have a single global one.
+
+\item
+Have global closures for all characters, and all small numbers.
+\end{itemize}
+
+
+{\em Small}
+
+\begin{itemize}
+\item
+When a closure is one of its own free variables, don't waste a field
+on it. Instead just use Node.
+\end{itemize}
+
+
+%************************************************************************
+%* *
+\subsection{ENTERING THE GARBAGE COLLECTOR}
+%* *
+%************************************************************************
+
+[WDP: OLD]
+
+There are the following ways to get into the garbage collector:
+
+\begin{verbatim}
+_HEAP_OVERFLOW_ReturnViaNode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Used for the GC trap at closure entry.
+
+ - Node is only live ptr
+ - After GC, enter Node
+
+_HEAP_OVERFLOW_ReturnDirect0, _HEAP_OVERFLOW_ReturnDirect1, ...
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Used: for fast entry of functions, and
+ case alternative where values are returned in regs
+
+ - PtrReg1..n are live ptrs
+ - ReturnReg points to start of code (before hp oflo check)
+ - After GC, jump to ReturnReg
+ - TagReg is preserved, in case this is an unvectored return
+
+
+_HEAP_OVERFLOW_CaseReturnViaNode
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *** GRIP ONLY ***
+
+Used for case alternatives which return node in heap
+
+ - Node is only live ptr
+ - RetVecReg points to return vector
+ - After GC, push RetVecReg and enter Node
+\end{verbatim}
+
+Exactly equivalent to @GC_ReturnViaNode@, preceded by pushing @ReturnVectorReg@.
+
+The only reason we re-enter Node is so that in a GRIP-ish world, the
+closure pointed to be Node is re-loaded into local store if necessary.
+
+%************************************************************************
+%* *
+\subsection{UPDATES}
+%* *
+%************************************************************************
+
+[New stuff 27 Nov 91]
+
+\subsubsection{Return conventions}
+%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+When executing the update continuation code for a constructor,
+@RetVecReg@ points to the {\em beginning of} the return vector. This is to
+enable the update code to find the normal continuation code.
+(@RetVecReg@ is set up by the code which jumps to the update continuation
+code.)
+
+\subsubsection{Stack arrangement}
+%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Each stack has a ``stack update ptr'', SuA and SuB, which point to the
+topmost word of the stack just after an update frame has been pushed.
+
+A standard update frame (on the B stack) looks like this
+(stack grows downward in this picture):
+
+\begin{verbatim}
+ | |
+ |---------------------------------------|
+ | Saved SuA |
+ |---------------------------------------|
+ | Saved SuB |
+ |---------------------------------------|
+ | Pointer to closure to be updated |
+ |---------------------------------------|
+ | Pointer to Update return vector |
+ |---------------------------------------|
+\end{verbatim}
+
+The SuB therefore points to the Update return vector component of the
+topmost update frame.
+
+A {\em constructor} update frame, which is pushed only by closures
+which know they will evaluate to a data object, looks just the
+same, but without the saved SuA pointer.
+
+\subsubsection{Pushing update frames}
+%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+An update is pushed right at the start of the code for an updatable
+closure. But {\em after} the stack overflow check. (The B-stack oflo
+check should thereby include allowance for the update frame itself.)
+
+\subsubsection{Return vectors}
+%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Every ``return address'' pushed on the stack by a boxed \tr{case} is a
+pointer to a vector of one or more pairs of code pointers:
+
+\begin{verbatim}
+ ------> -----------------
+ | Cont1 |
+ |---------------|
+ | Update1 |
+ -----------------
+ | Cont2 |
+ |---------------|
+ | Update2 |
+ -----------------
+ ...etc...
+\end{verbatim}
+
+Each pair consists of a {\em continuation} code pointer and an
+{\em update} code pointer.
+
+For data types with only one constructor, or too many constructors for
+vectoring, the return vector consists of a single pair.
+
+When the \tr{data} decl for each data type is compiled, as well as
+making info tables for each constructor, an update code sequence for
+each constructor (or a single one, if unvectored) is also created.
+
+ToDo: ** record naming convention for these code sequences somewhere **
+
+When the update code is entered, it uses the value stored in the
+return registers used by that constructor to update the thing pointed
+to by the update frame (all of which except for the return address is
+still on the B stack). If it can do an update in place (ie
+constructor takes 3 words or fewer) it does so.
+
+In the unvectored case, this code first has to do a switch on the tag,
+UNLESS the return is in the heap, in which case simply overwrite with
+an indirection to the thing Node points to.
+
+Tricky point: if the update code can't update in place it has to
+allocate a new object, by performing a heap-oflo check and jumping to
+the appropriate heap-overflow entry point depending on which RetPtr
+registers are live (just as when compiling a case alternative).
+
+When the update code is entered, a register @ReturnReg@ is assumed to
+contain the ``return address'' popped from the B stack. This is so
+that the update code can enter the normal continuation code when it is
+done.
+
+For standard update frames, the A and B stack update ptrs are restored
+from the saved versions before returning, too.
+
+\subsubsection{Update return vector}
+%~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Both standard and constructor update frames have as their topmost word
+a pointer to a static, fixed, update return vector.
+
+The ``continuation'' entry of each pair in this vector sets UpdReg to
+point to the thing to be updated (gotten from the update frame), pops
+the update frame, and returns to the ``update'' entry of the
+corresponding pair in the next return vector (now exposed on top of B
+stk).
+
+The ``update'' entry of each pair in this vector overwrites the thing
+to be updated with an indirection to the thing UpdReg points to, and
+then returns in the same was as the "continuation" entry above.
+
+There need to be enough pairs in the update return vector to cater for
+any constructor at all.
+
+
+*************************
+
+Things which need to be altered if you change the number of constructors
+which switches off vectored returns:
+\begin{verbatim}
+ Extra cases in update return vector (file xxx)
+ The value xxxx in yyyy.lhs
+ others?
+\end{verbatim}
+**************************
+
+%************************************************************************
+%* *
+\subsection{HEAP OBJECTS}
+%* *
+%************************************************************************
+
+The heap consists of {\em closures}.
+A closure can be either:
+\begin{itemize}
+\item
+a {\em suspension}, which is an unevaluated thunk.
+\item
+a {\em constructed object} (or just constructor); created by let(recs) and
+by updating.
+\item
+a {\em partial application} (only updating creates these).
+\end{itemize}
+
+Closures are laid out with the {\em info pointer} at the lowest
+address (but see notes on the Global Address field for parallel
+system). [We don't try to localise knowledge of this! It is a royal
+pain having to cope with closures laid out backwards.]
+
+Ptr fields occur first (before non-ptr ones).
+
+Non-normal-form closures are always at least 3 words in size (excl
+global address), so they can be updated with a list cell (should they
+evaluate to that).
+
+Normal form (constructor) closures are always at least 2 words in size
+(excl global address), so they have room enough for forwarding ptrs
+during GC, and FETCHME boxes after flushing.
+
+1-word closures for normal-form closures in static space. Explain
+more.
+
+Ideally, the info pointer of a closure would point to...
+\begin{verbatim}
+ |-------------|
+ | info table |
+ |-------------|
+info ptr ---> code
+\end{verbatim}
+
+But when C is the target code we can't guarantee the relative
+positions of code and data. So the info ptr points to
+\begin{verbatim}
+ |-------------|
+info ptr ---->| ------------------------> code
+ |-------------|
+ | info table |
+ |-------------|
+\end{verbatim}
+
+That is, there's an extra indirection involved; and the info table
+occurs AFTER the info pointer rather than before. The info table
+entries are ``reversed'' too, so that bigger negative offsets in the
+``usual'' case turn into bigger positive offsets.
+
+SUSPENSIONS
+
+The simplest form of suspension is
+\begin{verbatim}
+ info-ptr, ptr free vars, non-ptr free vars
+\end{verbatim}
+
+where the info table for info-ptr gives
+\begin{itemize}
+\item
+the total number of words of free vars
+\item
+the number of words of ptr free vars (== number of ptr free vars)
+in its extra-info part.
+\end{itemize}
+
+Optimised versions omit the size info from the info table, and instead
+use specialised GC routines.
+
+
+%************************************************************************
+%* *
+\subsection{NAMING CONVENTIONS for compiled code}
+%* *
+%************************************************************************
+
+
+Given a top-level closure called f defined in module M,
+
+\begin{verbatim}
+ _M_f_closure labels the closure itself
+ (only for top-level (ie static) closures)
+
+ _M_f_entry labels the slow entry point of the code
+ _M_f_fast labels the fast entry point of the code
+
+ _M_f_info labels the info pointer for the closure for f
+ (NB the info ptr of a closure isn't public
+ in the sense that these labels
+ are. It is private to a module, and
+ its name can be a secret.)
+\end{verbatim}
+
+These names are the REAL names that the linker sees. The initial underscores
+are attached by the C compiler.
+
+A non-top-level closure has the same names, but as well as the \tr{f}
+the labels have the unique number, so that different local closures
+which share a name don't get confused. The reason we need a naming
+convention at all is that with a little optimisation a tail call may
+jump direct to the fast entry of a locally-defined closure.
+
+\tr{f} may be a constructor, in the case of closures which are the curried
+versions of the constructor.
+
+For constructor closures, we have the following naming conventions, where
+the constructor is C defined in module M:
+
+\begin{verbatim}
+ _M_C_con_info is the info ptr for the constructor
+ _M_C_con_entry is the corresponding code entry point
+\end{verbatim}
+
+%************************************************************************
+%* *
+\subsection{ENTRY CONVENTIONS}
+%* *
+%************************************************************************
+
+\begin{description}
+\item[Constructor objects:]
+ On entry to the code for a constructor (\tr{_M_C_con_entry}), Node
+ points to the constructor object. [Even if the constructor has arity
+ zero...]
+
+\item[Non-top-level suspensions (both fast and slow entries):]
+ Node points to the closure.
+
+\item[Top-level suspensions, slow entry:]
+ ReturnReg points to the slow entry point itself
+
+\item[..ditto, fast entry:]
+ No entry convention
+\end{description}
+
+
+%************************************************************************
+%* *
+\subsection{CONSTRUCTOR RETURN CONVENTIONS}
+%* *
+%************************************************************************
+
+There is lots of excitement concerning the way in which constructors
+are returned to case expressions.
+
+{\em Simplest version}
+%=====================
+
+The return address on the stack points directly to some code. It
+expects:
+
+\begin{verbatim}
+Boxed objects:
+ PtrReg1 points to the constructed value (in the heap) (unless arity=0)
+ Tag contains its tag (unless # of constructors = 1)
+
+Unboxed Ints: IntReg contains the int
+ Float: FloatReg contains the returned value
+\end{verbatim}
+
+{\em Small improvement: vectoring}
+%=================================
+
+If there are fewer than (say) 8 constructors in the type, the return
+address points to a vector of return addresses. The constructor does
+a vectored return. No CSwitch.
+
+Complication: updates. Update frames are built before the type of the
+thing which will be returned is known. Hence their return address
+UPDATE has to be able to handle anything (vectored and nonvectored).
+
+Hence the vector table goes BACKWARD from ONE WORD BEFORE the word
+pointed to by the return address.
+
+{\em Big improvement: contents in registers}
+%===========================================
+
+Constructor with few enough components (eg 8ish) return their
+arguments in registers. [If there is only one constructor in the
+type, the tag register can be pressed into service for this purpose.]
+
+Complication: updates. Update frames are built before the type of the
+thing which will be returned is known. Hence their return address
+UPDATE has to be able to handle anything.
+
+So, a return address is a pointer to a PAIR of return addresses (or
+maybe a pointer to some code immediately preceded by a pointer to some
+code).
+
+The ``main'' return address is just as before.
+
+The ``update'' return address expects just the same regs to be in use
+as the ``main'' address, BUT AS WELL the magic loc UpdPtr points to a
+closure to be updated. It carries out the update, and contines with
+the main return address.
+
+The ``main'' code for UPDATE just loads UpdPtr the thing to be
+updated, and returns to the "update" entry of the next thing on the
+stack.
+
+The ``update'' entry for UPDATE just overwrites the thing to be
+updated with an indirection to UpdPtr.
+
+These two improvements can be combined orthogonally.
+
+
+%************************************************************************
+%* *
+\subsection{REGISTERS}
+%* *
+%************************************************************************
+
+Separate registers for
+\begin{verbatim}
+ C stack (incl interrupt handling, if this is not done on
+ another stk) (if interrupts don't mangle the C stack,
+ we could save it for most of the time and reuse the
+ register)
+
+ Arg stack
+ Basic value and control stack
+ These two grow towards each other, so they are each
+ other's limits!
+
+ Heap pointer
+\end{verbatim}
+
+And probably also
+\begin{verbatim}
+ Heap limit
+\end{verbatim}
+
+
+%************************************************************************
+%* *
+\subsection{THE OFFSET SWAMP}
+%* *
+%************************************************************************
+
+There are THREE kinds of offset:
+\begin{description}
+\item[virtual offsets:]
+
+ start at 1 at base of frame, and increase towards top of stack.
+
+ don't change when you adjust sp/hp.
+
+ independent of stack direction.
+
+ only exist inside the code generator, pre Abstract C
+
+ for multi-word objects, the offset identifies the word of the
+ object with smallest offset
+
+\item[reg-relative offsets:]
+
+ start at 0 for elt to which sp points, and increase ``into the
+ interesting stuff.''
+
+ Specifically, towards
+ \begin{itemize}
+ \item
+ bottom of stack (for SpA, SpB)
+ \item
+ beginning of heap (for Hp)
+ \item
+ end of closure (for Node)
+ \end{itemize}
+
+ offset for a particular item changes when you adjust sp.
+
+ independent of stack direction.
+
+ exist in abstract C CVal and CAddr addressing modes
+
+ for multi-word objects, the offset identifies the word of the
+ object with smallest offset
+
+\item[real offsets:]
+
+ either the negation or identity of sp-relative offset.
+
+ start at 0 for elt to which sp points, and either increase or
+ decrease towards bottom of stk, depending on stk direction
+
+ exist in real C, usually as a macro call passing an sp-rel offset
+
+ for multi-word objects, the offset identifies the word of the
+ object with lowest address
+\end{description}
+
+%************************************************************************
+%* *
+\subsection{STACKS}
+%* *
+%************************************************************************
+
+There are two stacks, as in the STG paper.
+\begin{description}
+\item[A stack:]
+contains only closure pointers. Its stack ptr is SpA.
+
+\item[B stack:]
+contains basic values, return addresses, update frames.
+Its stack ptr is SpB.
+\end{description}
+
+SpA and SpB point to the topmost allocated word of stack (though they
+may not be up to date in the middle of a basic block).
+
+\subsubsection{STACK ALLOCATION}
+
+A stack and B stack grow towards each other, so they overflow when
+they collide.
+
+The A stack grows downward; the B stack grows upward. [We'll try to
+localise stuff which uses this info.]
+
+We can check for stack {\em overflow} not just at the start of a basic
+block, but at the start of an entire expression evaluation. The
+high-water marks of case-expression alternatives can be max'd.
+
+Within the code for a closure, the ``stack frame'' is deemed to start
+with the last argument taken by the closure (ie the one deepest in the
+stack). Stack slots are can then be identified by ``virtual offsets''
+from the base of the frame; the bottom-most word of the frame has
+offset 1.
+
+For multi-word slots (B stack only) the offset identifies the word
+with the smallest virtual offset. [If B grows upward, this is the word
+with the lowest physical address too.]
+
+Since there are two stacks, a ``stack frame'' really consists of two
+stack frames, one on each stack.
+
+For each stack, we keep track of the following:
+
+\begin{verbatim}
+* virtSp virtual stack ptr offset of topmost occupied stack slot
+ (initialised to 0 if no args)
+
+* realSp real stack ptr offset of real stack ptr reg
+ (initialised to 0 if no args)
+
+* tailSp tail-call ptr offset of topmost slot to be retained
+ at next tail call, excluding the
+ argument to the tail call itself
+
+* hwSp high-water mark largest value taken by virtSp
+ in this closure body
+\end{verbatim}
+
+The real stack pointer is (for now) only adjusted at the tail call itself,
+at which point it is made to point to the topmost occupied word of the stack.
+
+We can't always adjust it at the beginning, because we don't
+necessarily know which tail call will be made (a conditional might
+intervene). So stuff is actually put on the stack ``above'' the stack
+pointer. This is ok because interrupts are serviced on a different
+stack.
+
+The code generator works entirely in terms of stack {\em virtual
+offsets}. The conversion to real addressing modes is done solely when
+we look up a binding. When we move a stack pointer, the offsets of
+variables currently bound to stack offsets in the environment will
+change. We provide operations in the @cgBindings@ type to perform
+this offset-change (to wit, @shiftStkOffsets@), leaving open whether
+it is done pronto, or kept separate and applied to lookups.
+
+Stack overflow checking takes place at the start of a closure body, using
+the high-water mark information gotten from the closure body.
+
+
+%************************************************************************
+%* *
+\subsection{HEAP ALLOCATION}
+%* *
+%************************************************************************
+
+Heap ptr reg (Hp) points to the last word of allocated space (and not
+to the first word of free space).
+
+The heap limit register (HpLim) points to the last word of available
+space.
+
+A basic block allocates a chunk of heap called a ``heap frame''.
+The word of the frame nearest to the previously-allocated stuff
+has virtual offset 1, and offsets increase from 1 to the size of the
+frame in words.
+
+Closures are allocated with their code pointers having the lowest virtual
+offset.
+
+NOTE: this means that closures are only laid out with code ptr at
+lowest PHYSICAL address if the heap grows upwards.
+
+Heap ptr reg is moved at the beginning of a basic block to account for
+the allocation of the whole frame. At this time a heap exhaustion
+check is made (has the heap ptr gone past the heap limit?). In the
+basic block, indexed accesses off the heap ptr fill in this newly
+allocated block. [Bias to RISC here: no cheap auto-inc mode, and free
+indexing.]
+
+We maintain the following information during code generation:
+
+\begin{verbatim}
+* virtHp virtual heap ptr offset of last word
+ of the frame allocated so far
+ Starts at 0 and increases.
+* realHp virtual offset of
+ the real Hp register
+\end{verbatim}
+
+Since virtHp only ever increases, it doubles as the heap high water mark.
+
+\subsubsection{BINDINGS}
+
+The code generator maintains info for each name about where it is.
+Each variable maps to:
+
+\begin{verbatim}
+ - its kind
+
+ - its volatile location:- a temporary variable
+ - a virtual heap offset n, meaning the
+ ADDRESS OF a word in the current
+ heap frame
+ - absent
+
+ - its stable location: - a virtual stack offset n, meaning the
+ CONTENTS OF an object in the
+ current stack frame
+ - absent
+\end{verbatim}
+
+\subsubsection{ENTERING AN OBJECT}
+
+When a closure is entered at the normal entry point, the magic locs
+\begin{verbatim}
+ Node points to the closure (unless it is a top-level closure)
+ ReturnReg points to the code being jumped to
+\end{verbatim}
+At the fast entry point, Node is still set up, but ReturnReg may not be.
+[Not sure about this.]