summaryrefslogtreecommitdiff
path: root/compiler/nativeGen
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/nativeGen')
-rw-r--r--compiler/nativeGen/AsmCodeGen.lhs22
-rw-r--r--compiler/nativeGen/Instruction.hs5
-rw-r--r--compiler/nativeGen/PIC.hs60
-rw-r--r--compiler/nativeGen/PPC/CodeGen.hs62
-rw-r--r--compiler/nativeGen/PPC/Instr.hs29
-rw-r--r--compiler/nativeGen/PPC/Ppr.hs40
-rw-r--r--compiler/nativeGen/PPC/Regs.hs7
-rw-r--r--compiler/nativeGen/RegAlloc/Graph/Main.hs2
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/Base.hs10
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs28
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs458
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/Main.hs214
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/StackMap.hs6
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/State.hs139
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/X86/FreeRegs.hs27
-rw-r--r--compiler/nativeGen/RegAlloc/Linear/X86_64/FreeRegs.hs52
-rw-r--r--compiler/nativeGen/RegAlloc/Liveness.hs15
-rw-r--r--compiler/nativeGen/SPARC/Base.hs6
-rw-r--r--compiler/nativeGen/SPARC/CodeGen.hs31
-rw-r--r--compiler/nativeGen/SPARC/CodeGen/Amode.hs3
-rw-r--r--compiler/nativeGen/SPARC/CodeGen/Base.hs9
-rw-r--r--compiler/nativeGen/SPARC/CodeGen/CondCode.hs5
-rw-r--r--compiler/nativeGen/SPARC/CodeGen/Gen32.hs12
-rw-r--r--compiler/nativeGen/SPARC/Instr.hs17
-rw-r--r--compiler/nativeGen/SPARC/Ppr.hs40
-rw-r--r--compiler/nativeGen/SPARC/Stack.hs15
-rw-r--r--compiler/nativeGen/X86/CodeGen.hs191
-rw-r--r--compiler/nativeGen/X86/Instr.hs53
-rw-r--r--compiler/nativeGen/X86/Ppr.hs10
-rw-r--r--compiler/nativeGen/X86/Regs.hs17
30 files changed, 852 insertions, 733 deletions
diff --git a/compiler/nativeGen/AsmCodeGen.lhs b/compiler/nativeGen/AsmCodeGen.lhs
index 6b1e93f271..8c608f1bf1 100644
--- a/compiler/nativeGen/AsmCodeGen.lhs
+++ b/compiler/nativeGen/AsmCodeGen.lhs
@@ -139,7 +139,7 @@ data NcgImpl statics instr jumpDest = NcgImpl {
shortcutStatics :: (BlockId -> Maybe jumpDest) -> statics -> statics,
shortcutJump :: (BlockId -> Maybe jumpDest) -> instr -> instr,
pprNatCmmDecl :: NatCmmDecl statics instr -> SDoc,
- maxSpillSlots :: Int,
+ maxSpillSlots :: DynFlags -> Int,
allocatableRegs :: Platform -> [RealReg],
ncg_x86fp_kludge :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
ncgExpandTop :: [NatCmmDecl statics instr] -> [NatCmmDecl statics instr],
@@ -160,7 +160,7 @@ nativeCodeGen dflags h us cmms
,shortcutStatics = X86.Instr.shortcutStatics
,shortcutJump = X86.Instr.shortcutJump
,pprNatCmmDecl = X86.Ppr.pprNatCmmDecl
- ,maxSpillSlots = X86.Instr.maxSpillSlots (target32Bit platform)
+ ,maxSpillSlots = X86.Instr.maxSpillSlots
,allocatableRegs = X86.Regs.allocatableRegs
,ncg_x86fp_kludge = id
,ncgExpandTop = id
@@ -378,7 +378,7 @@ cmmNativeGen dflags ncgImpl us cmm count
-- rewrite assignments to global regs
let fixed_cmm =
{-# SCC "fixStgRegisters" #-}
- fixStgRegisters platform cmm
+ fixStgRegisters dflags cmm
-- cmm to cmm optimisations
let (opt_cmm, imports) =
@@ -428,7 +428,7 @@ cmmNativeGen dflags ncgImpl us cmm count
$ Color.regAlloc
dflags
alloc_regs
- (mkUniqSet [0 .. maxSpillSlots ncgImpl])
+ (mkUniqSet [0 .. maxSpillSlots ncgImpl dflags])
withLiveness
-- dump out what happened during register allocation
@@ -955,13 +955,13 @@ cmmExprConFold referenceKind expr = do
-- SDM: re-enabled for now, while cmmRewriteAssignments is turned off
let expr' = if False -- dopt Opt_TryNewCodeGen dflags
then expr
- else cmmExprCon (targetPlatform dflags) expr
+ else cmmExprCon dflags expr
cmmExprNative referenceKind expr'
-cmmExprCon :: Platform -> CmmExpr -> CmmExpr
-cmmExprCon platform (CmmLoad addr rep) = CmmLoad (cmmExprCon platform addr) rep
-cmmExprCon platform (CmmMachOp mop args)
- = cmmMachOpFold platform mop (map (cmmExprCon platform) args)
+cmmExprCon :: DynFlags -> CmmExpr -> CmmExpr
+cmmExprCon dflags (CmmLoad addr rep) = CmmLoad (cmmExprCon dflags addr) rep
+cmmExprCon dflags (CmmMachOp mop args)
+ = cmmMachOpFold dflags mop (map (cmmExprCon dflags) args)
cmmExprCon _ other = other
-- handles both PIC and non-PIC cases... a very strange mixture
@@ -993,9 +993,9 @@ cmmExprNative referenceKind expr = do
-> do
dynRef <- cmmMakeDynamicReference dflags addImportCmmOpt referenceKind lbl
-- need to optimize here, since it's late
- return $ cmmMachOpFold platform (MO_Add wordWidth) [
+ return $ cmmMachOpFold dflags (MO_Add (wordWidth dflags)) [
dynRef,
- (CmmLit $ CmmInt (fromIntegral off) wordWidth)
+ (CmmLit $ CmmInt (fromIntegral off) (wordWidth dflags))
]
-- On powerpc (non-PIC), it's easier to jump directly to a label than
diff --git a/compiler/nativeGen/Instruction.hs b/compiler/nativeGen/Instruction.hs
index 292cf82f6a..64ba32c6dc 100644
--- a/compiler/nativeGen/Instruction.hs
+++ b/compiler/nativeGen/Instruction.hs
@@ -13,6 +13,7 @@ where
import Reg
import BlockId
+import DynFlags
import OldCmm
import Platform
@@ -105,7 +106,7 @@ class Instruction instr where
-- | An instruction to spill a register into a spill slot.
mkSpillInstr
- :: Platform
+ :: DynFlags
-> Reg -- ^ the reg to spill
-> Int -- ^ the current stack delta
-> Int -- ^ spill slot to use
@@ -114,7 +115,7 @@ class Instruction instr where
-- | An instruction to reload a register from a spill slot.
mkLoadInstr
- :: Platform
+ :: DynFlags
-> Reg -- ^ the reg to reload.
-> Int -- ^ the current stack delta
-> Int -- ^ the spill slot to use
diff --git a/compiler/nativeGen/PIC.hs b/compiler/nativeGen/PIC.hs
index 67945669f5..af4bb9e9ed 100644
--- a/compiler/nativeGen/PIC.hs
+++ b/compiler/nativeGen/PIC.hs
@@ -65,6 +65,7 @@ import Reg
import NCGMonad
+import Hoopl
import OldCmm
import CLabel ( CLabel, ForeignLabelSource(..), pprCLabel,
mkDynamicLinkerLabel, DynamicLinkerLabelInfo(..),
@@ -74,7 +75,6 @@ import CLabel ( CLabel, ForeignLabelSource(..), pprCLabel,
import CLabel ( mkForeignLabel )
-import StaticFlags ( opt_Static )
import BasicTypes
import Outputable
@@ -133,7 +133,7 @@ cmmMakeDynamicReference' dflags addImport referenceKind lbl
AccessViaSymbolPtr -> do
let symbolPtr = mkDynamicLinkerLabel SymbolPtr lbl
addImport symbolPtr
- return $ CmmLoad (cmmMakePicReference dflags symbolPtr) bWord
+ return $ CmmLoad (cmmMakePicReference dflags symbolPtr) (bWord dflags)
AccessDirectly -> case referenceKind of
-- for data, we might have to make some calculations:
@@ -160,8 +160,8 @@ cmmMakePicReference dflags lbl
= CmmLit $ CmmLabel lbl
- | (dopt Opt_PIC dflags || not opt_Static) && absoluteLabel lbl
- = CmmMachOp (MO_Add wordWidth)
+ | (dopt Opt_PIC dflags || not (dopt Opt_Static dflags)) && absoluteLabel lbl
+ = CmmMachOp (MO_Add (wordWidth dflags))
[ CmmReg (CmmGlobal PicBaseReg)
, CmmLit $ picRelative
(platformArch $ targetPlatform dflags)
@@ -213,14 +213,14 @@ howToAccessLabel
-- To access the function at SYMBOL from our local module, we just need to
-- dereference the local __imp_SYMBOL.
--
--- If opt_Static is set then we assume that all our code will be linked
+-- If Opt_Static is set then we assume that all our code will be linked
-- into the same .exe file. In this case we always access symbols directly,
-- and never use __imp_SYMBOL.
--
howToAccessLabel dflags _ OSMinGW32 _ lbl
-- Assume all symbols will be in the same PE, so just access them directly.
- | opt_Static
+ | dopt Opt_Static dflags
= AccessDirectly
-- If the target symbol is in another PE we need to access it via the
@@ -306,7 +306,7 @@ howToAccessLabel dflags _ os _ _
-- if we don't dynamically link to Haskell code,
-- it actually manages to do so without messing thins up.
| osElfTarget os
- , not (dopt Opt_PIC dflags) && opt_Static
+ , not (dopt Opt_PIC dflags) && dopt Opt_Static dflags
= AccessDirectly
howToAccessLabel dflags arch os DataReference lbl
@@ -428,12 +428,12 @@ needImportedSymbols dflags arch os
-- PowerPC Linux: -fPIC or -dynamic
| osElfTarget os
, arch == ArchPPC
- = dopt Opt_PIC dflags || not opt_Static
+ = dopt Opt_PIC dflags || not (dopt Opt_Static dflags)
-- i386 (and others?): -dynamic but not -fPIC
| osElfTarget os
, arch /= ArchPPC_64
- = not opt_Static && not (dopt Opt_PIC dflags)
+ = not (dopt Opt_Static dflags) && not (dopt Opt_PIC dflags)
| otherwise
= False
@@ -622,7 +622,7 @@ pprImportedSymbol _ (Platform { platformOS = OSDarwin }) _
-- section.
-- The "official" GOT mechanism (label@got) isn't intended to be used
-- in position dependent code, so we have to create our own "fake GOT"
--- when not Opt_PIC && not opt_Static.
+-- when not Opt_PIC && not (dopt Opt_Static dflags).
--
-- 2) PowerPC Linux is just plain broken.
-- While it's theoretically possible to use GOT offsets larger
@@ -641,11 +641,11 @@ pprImportedSymbol _ platform@(Platform { platformArch = ArchPPC_64 }) _
| osElfTarget (platformOS platform)
= empty
-pprImportedSymbol _ platform importedLbl
+pprImportedSymbol dflags platform importedLbl
| osElfTarget (platformOS platform)
= case dynamicLinkerLabelInfo importedLbl of
Just (SymbolPtr, lbl)
- -> let symbolSize = case wordWidth of
+ -> let symbolSize = case wordWidth dflags of
W32 -> sLit "\t.long"
W64 -> sLit "\t.quad"
_ -> panic "Unknown wordRep in pprImportedSymbol"
@@ -703,8 +703,9 @@ initializePicBase_ppc ArchPPC os picReg
(CmmProc info lab (ListGraph blocks) : statics)
| osElfTarget os
= do
+ dflags <- getDynFlags
gotOffLabel <- getNewLabelNat
- tmp <- getNewRegNat $ intSize wordWidth
+ tmp <- getNewRegNat $ intSize (wordWidth dflags)
let
gotOffset = CmmData Text $ Statics gotOffLabel [
CmmStaticLit (CmmLabelDiffOff gotLabel
@@ -752,18 +753,37 @@ initializePicBase_x86
-> NatM [NatCmmDecl (Alignment, CmmStatics) X86.Instr]
initializePicBase_x86 ArchX86 os picReg
- (CmmProc info lab (ListGraph blocks) : statics)
+ (CmmProc info lab (ListGraph blocks) : statics)
| osElfTarget os
- = return (CmmProc info lab (ListGraph (b':tail blocks)) : statics)
- where BasicBlock bID insns = head blocks
- b' = BasicBlock bID (X86.FETCHGOT picReg : insns)
+ = return (CmmProc info lab (ListGraph blocks') : statics)
+ where blocks' = case blocks of
+ [] -> []
+ (b:bs) -> fetchGOT b : map maybeFetchGOT bs
+
+ -- we want to add a FETCHGOT instruction to the beginning of
+ -- every block that is an entry point, which corresponds to
+ -- the blocks that have entries in the info-table mapping.
+ maybeFetchGOT b@(BasicBlock bID _)
+ | bID `mapMember` info = fetchGOT b
+ | otherwise = b
+
+ fetchGOT (BasicBlock bID insns) =
+ BasicBlock bID (X86.FETCHGOT picReg : insns)
initializePicBase_x86 ArchX86 OSDarwin picReg
(CmmProc info lab (ListGraph blocks) : statics)
- = return (CmmProc info lab (ListGraph (b':tail blocks)) : statics)
+ = return (CmmProc info lab (ListGraph blocks') : statics)
- where BasicBlock bID insns = head blocks
- b' = BasicBlock bID (X86.FETCHPC picReg : insns)
+ where blocks' = case blocks of
+ [] -> []
+ (b:bs) -> fetchPC b : map maybeFetchPC bs
+
+ maybeFetchPC b@(BasicBlock bID _)
+ | bID `mapMember` info = fetchPC b
+ | otherwise = b
+
+ fetchPC (BasicBlock bID insns) =
+ BasicBlock bID (X86.FETCHPC picReg : insns)
initializePicBase_x86 _ _ _ _
= panic "initializePicBase_x86: not needed"
diff --git a/compiler/nativeGen/PPC/CodeGen.hs b/compiler/nativeGen/PPC/CodeGen.hs
index ce4a54ca9b..1f036aa43e 100644
--- a/compiler/nativeGen/PPC/CodeGen.hs
+++ b/compiler/nativeGen/PPC/CodeGen.hs
@@ -124,7 +124,7 @@ stmtToInstrs stmt = do
| target32Bit (targetPlatform dflags) &&
isWord64 ty -> assignReg_I64Code reg src
| otherwise -> assignReg_IntCode size reg src
- where ty = cmmRegType reg
+ where ty = cmmRegType dflags reg
size = cmmTypeSize ty
CmmStore addr src
@@ -132,7 +132,7 @@ stmtToInstrs stmt = do
| target32Bit (targetPlatform dflags) &&
isWord64 ty -> assignMem_I64Code addr src
| otherwise -> assignMem_IntCode size addr src
- where ty = cmmExprType src
+ where ty = cmmExprType dflags src
size = cmmTypeSize ty
CmmCall target result_regs args _
@@ -206,9 +206,9 @@ temporary, then do the other computation, and then use the temporary:
-- | Convert a BlockId to some CmmStatic data
-jumpTableEntry :: Maybe BlockId -> CmmStatic
-jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth)
-jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
+jumpTableEntry :: DynFlags -> Maybe BlockId -> CmmStatic
+jumpTableEntry dflags Nothing = CmmStaticLit (CmmInt 0 (wordWidth dflags))
+jumpTableEntry _ (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
where blockLabel = mkAsmTempLabel (getUnique blockid)
@@ -218,12 +218,12 @@ jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
-- Expand CmmRegOff. ToDo: should we do it this way around, or convert
-- CmmExprs into CmmRegOff?
-mangleIndexTree :: CmmExpr -> CmmExpr
-mangleIndexTree (CmmRegOff reg off)
+mangleIndexTree :: DynFlags -> CmmExpr -> CmmExpr
+mangleIndexTree dflags (CmmRegOff reg off)
= CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
- where width = typeWidth (cmmRegType reg)
+ where width = typeWidth (cmmRegType dflags reg)
-mangleIndexTree _
+mangleIndexTree _ _
= panic "PPC.CodeGen.mangleIndexTree: no match"
-- -----------------------------------------------------------------------------
@@ -370,11 +370,11 @@ getRegister' _ (CmmReg (CmmGlobal PicBaseReg))
return (Fixed archWordSize reg nilOL)
getRegister' dflags (CmmReg reg)
- = return (Fixed (cmmTypeSize (cmmRegType reg))
+ = return (Fixed (cmmTypeSize (cmmRegType dflags reg))
(getRegisterReg (targetPlatform dflags) reg) nilOL)
getRegister' dflags tree@(CmmRegOff _ _)
- = getRegister' dflags (mangleIndexTree tree)
+ = getRegister' dflags (mangleIndexTree dflags tree)
-- for 32-bit architectuers, support some 64 -> 32 bit conversions:
-- TO_W_(x), TO_W_(x >> 32)
@@ -561,8 +561,8 @@ getRegister' _ (CmmLit (CmmFloat f frep)) = do
`consOL` (addr_code `snocOL` LD size dst addr)
return (Any size code)
-getRegister' _ (CmmLit lit)
- = let rep = cmmLitType lit
+getRegister' dflags (CmmLit lit)
+ = let rep = cmmLitType dflags lit
imm = litToImm lit
code dst = toOL [
LIS dst (HA imm),
@@ -607,7 +607,8 @@ temporary, then do the other computation, and then use the temporary:
-}
getAmode :: CmmExpr -> NatM Amode
-getAmode tree@(CmmRegOff _ _) = getAmode (mangleIndexTree tree)
+getAmode tree@(CmmRegOff _ _) = do dflags <- getDynFlags
+ getAmode (mangleIndexTree dflags tree)
getAmode (CmmMachOp (MO_Sub W32) [x, CmmLit (CmmInt i _)])
| Just off <- makeImmediate W32 True (-i)
@@ -844,14 +845,14 @@ genCCall target dest_regs argsAndHints
= do dflags <- getDynFlags
let platform = targetPlatform dflags
case platformOS platform of
- OSLinux -> genCCall' platform GCPLinux target dest_regs argsAndHints
- OSDarwin -> genCCall' platform GCPDarwin target dest_regs argsAndHints
+ OSLinux -> genCCall' dflags GCPLinux target dest_regs argsAndHints
+ OSDarwin -> genCCall' dflags GCPDarwin target dest_regs argsAndHints
_ -> panic "PPC.CodeGen.genCCall: not defined for this os"
data GenCCallPlatform = GCPLinux | GCPDarwin
genCCall'
- :: Platform
+ :: DynFlags
-> GenCCallPlatform
-> CmmCallTarget -- function to call
-> [HintedCmmFormal] -- where to put the result
@@ -902,7 +903,7 @@ genCCall' _ _ (CmmPrim MO_WriteBarrier _) _ _
genCCall' _ _ (CmmPrim _ (Just stmts)) _ _
= stmtsToInstrs stmts
-genCCall' platform gcp target dest_regs argsAndHints
+genCCall' dflags gcp target dest_regs argsAndHints
= ASSERT (not $ any (`elem` [II16]) $ map cmmTypeSize argReps)
-- we rely on argument promotion in the codeGen
do
@@ -934,6 +935,8 @@ genCCall' platform gcp target dest_regs argsAndHints
`snocOL` BCTRL usedRegs
`appOL` codeAfter)
where
+ platform = targetPlatform dflags
+
initialStackOffset = case gcp of
GCPDarwin -> 24
GCPLinux -> 8
@@ -955,7 +958,7 @@ genCCall' platform gcp target dest_regs argsAndHints
= argsAndHints
args = map hintlessCmm argsAndHints'
- argReps = map cmmExprType args
+ argReps = map (cmmExprType dflags) args
roundTo a x | x `mod` a == 0 = x
| otherwise = x + a - (x `mod` a)
@@ -1060,23 +1063,23 @@ genCCall' platform gcp target dest_regs argsAndHints
GCPDarwin ->
case cmmTypeSize rep of
II8 -> (1, 0, 4, gprs)
+ II16 -> (1, 0, 4, gprs)
II32 -> (1, 0, 4, gprs)
-- The Darwin ABI requires that we skip a
-- corresponding number of GPRs when we use
-- the FPRs.
FF32 -> (1, 1, 4, fprs)
FF64 -> (2, 1, 8, fprs)
- II16 -> panic "genCCall' passArguments II16"
II64 -> panic "genCCall' passArguments II64"
FF80 -> panic "genCCall' passArguments FF80"
GCPLinux ->
case cmmTypeSize rep of
II8 -> (1, 0, 4, gprs)
+ II16 -> (1, 0, 4, gprs)
II32 -> (1, 0, 4, gprs)
-- ... the SysV ABI doesn't.
FF32 -> (0, 1, 4, fprs)
FF64 -> (0, 1, 8, fprs)
- II16 -> panic "genCCall' passArguments II16"
II64 -> panic "genCCall' passArguments II64"
FF80 -> panic "genCCall' passArguments FF80"
@@ -1089,7 +1092,7 @@ genCCall' platform gcp target dest_regs argsAndHints
| isWord64 rep -> toOL [MR (getHiVRegFromLo r_dest) r3,
MR r_dest r4]
| otherwise -> unitOL (MR r_dest r3)
- where rep = cmmRegType (CmmLocal dest)
+ where rep = cmmRegType dflags (CmmLocal dest)
r_dest = getRegisterReg platform (CmmLocal dest)
_ -> panic "genCCall' moveResult: Bad dest_regs"
@@ -1194,9 +1197,9 @@ generateJumpTableForInstr :: DynFlags -> Instr
generateJumpTableForInstr dflags (BCTR ids (Just lbl)) =
let jumpTable
| dopt Opt_PIC dflags = map jumpTableEntryRel ids
- | otherwise = map jumpTableEntry ids
+ | otherwise = map (jumpTableEntry dflags) ids
where jumpTableEntryRel Nothing
- = CmmStaticLit (CmmInt 0 wordWidth)
+ = CmmStaticLit (CmmInt 0 (wordWidth dflags))
jumpTableEntryRel (Just blockid)
= CmmStaticLit (CmmLabelDiffOff blockLabel lbl 0)
where blockLabel = mkAsmTempLabel (getUnique blockid)
@@ -1376,10 +1379,10 @@ coerceInt2FP fromRep toRep x = do
[CmmStaticLit (CmmInt 0x43300000 W32),
CmmStaticLit (CmmInt 0x80000000 W32)],
XORIS itmp src (ImmInt 0x8000),
- ST II32 itmp (spRel 3),
+ ST II32 itmp (spRel dflags 3),
LIS itmp (ImmInt 0x4330),
- ST II32 itmp (spRel 2),
- LD FF64 ftmp (spRel 2)
+ ST II32 itmp (spRel dflags 2),
+ LD FF64 ftmp (spRel dflags 2)
] `appOL` addr_code `appOL` toOL [
LD FF64 dst addr,
FSUB FF64 dst ftmp dst
@@ -1401,6 +1404,7 @@ coerceInt2FP fromRep toRep x = do
coerceFP2Int :: Width -> Width -> CmmExpr -> NatM Register
coerceFP2Int _ toRep x = do
+ dflags <- getDynFlags
-- the reps don't really matter: F*->FF64 and II32->I* are no-ops
(src, code) <- getSomeReg x
tmp <- getNewRegNat FF64
@@ -1409,7 +1413,7 @@ coerceFP2Int _ toRep x = do
-- convert to int in FP reg
FCTIWZ tmp src,
-- store value (64bit) from FP to stack
- ST FF64 tmp (spRel 2),
+ ST FF64 tmp (spRel dflags 2),
-- read low word of value (high word is undefined)
- LD II32 dst (spRel 3)]
+ LD II32 dst (spRel dflags 3)]
return (Any (intSize toRep) code')
diff --git a/compiler/nativeGen/PPC/Instr.hs b/compiler/nativeGen/PPC/Instr.hs
index 1af08a6076..464a88a08b 100644
--- a/compiler/nativeGen/PPC/Instr.hs
+++ b/compiler/nativeGen/PPC/Instr.hs
@@ -34,8 +34,8 @@ import RegClass
import Reg
import CodeGen.Platform
-import Constants (rESERVED_C_STACK_BYTES)
import BlockId
+import DynFlags
import OldCmm
import FastString
import CLabel
@@ -355,14 +355,15 @@ ppc_patchJumpInstr insn patchF
-- | An instruction to spill a register into a spill slot.
ppc_mkSpillInstr
- :: Platform
+ :: DynFlags
-> Reg -- register to spill
-> Int -- current stack delta
-> Int -- spill slot to use
-> Instr
-ppc_mkSpillInstr platform reg delta slot
- = let off = spillSlotToOffset slot
+ppc_mkSpillInstr dflags reg delta slot
+ = let platform = targetPlatform dflags
+ off = spillSlotToOffset dflags slot
in
let sz = case targetClassOfReg platform reg of
RcInteger -> II32
@@ -372,14 +373,15 @@ ppc_mkSpillInstr platform reg delta slot
ppc_mkLoadInstr
- :: Platform
+ :: DynFlags
-> Reg -- register to load
-> Int -- current stack delta
-> Int -- spill slot to use
-> Instr
-ppc_mkLoadInstr platform reg delta slot
- = let off = spillSlotToOffset slot
+ppc_mkLoadInstr dflags reg delta slot
+ = let platform = targetPlatform dflags
+ off = spillSlotToOffset dflags slot
in
let sz = case targetClassOfReg platform reg of
RcInteger -> II32
@@ -391,20 +393,21 @@ ppc_mkLoadInstr platform reg delta slot
spillSlotSize :: Int
spillSlotSize = 8
-maxSpillSlots :: Int
-maxSpillSlots = ((rESERVED_C_STACK_BYTES - 64) `div` spillSlotSize) - 1
+maxSpillSlots :: DynFlags -> Int
+maxSpillSlots dflags
+ = ((rESERVED_C_STACK_BYTES dflags - 64) `div` spillSlotSize) - 1
-- convert a spill slot number to a *byte* offset, with no sign:
-- decide on a per arch basis whether you are spilling above or below
-- the C stack pointer.
-spillSlotToOffset :: Int -> Int
-spillSlotToOffset slot
- | slot >= 0 && slot < maxSpillSlots
+spillSlotToOffset :: DynFlags -> Int -> Int
+spillSlotToOffset dflags slot
+ | slot >= 0 && slot < maxSpillSlots dflags
= 64 + spillSlotSize * slot
| otherwise
= pprPanic "spillSlotToOffset:"
( text "invalid spill location: " <> int slot
- $$ text "maxSpillSlots: " <> int maxSpillSlots)
+ $$ text "maxSpillSlots: " <> int (maxSpillSlots dflags))
--------------------------------------------------------------------------------
diff --git a/compiler/nativeGen/PPC/Ppr.hs b/compiler/nativeGen/PPC/Ppr.hs
index 55cc6d2a0d..576e19db1a 100644
--- a/compiler/nativeGen/PPC/Ppr.hs
+++ b/compiler/nativeGen/PPC/Ppr.hs
@@ -31,6 +31,7 @@ import RegClass
import TargetReg
import OldCmm
+import BlockId
import CLabel
@@ -50,7 +51,7 @@ pprNatCmmDecl :: NatCmmDecl CmmStatics Instr -> SDoc
pprNatCmmDecl (CmmData section dats) =
pprSectionHeader section $$ pprDatas dats
-pprNatCmmDecl proc@(CmmProc _ lbl (ListGraph blocks)) =
+pprNatCmmDecl proc@(CmmProc top_info lbl (ListGraph blocks)) =
case topInfoTable proc of
Nothing ->
case blocks of
@@ -59,19 +60,15 @@ pprNatCmmDecl proc@(CmmProc _ lbl (ListGraph blocks)) =
blocks -> -- special case for code without info table:
pprSectionHeader Text $$
pprLabel lbl $$ -- blocks guaranteed not null, so label needed
- vcat (map pprBasicBlock blocks)
+ vcat (map (pprBasicBlock top_info) blocks)
- Just (Statics info_lbl info) ->
+ Just (Statics info_lbl _) ->
sdocWithPlatform $ \platform ->
- pprSectionHeader Text $$
- (
- (if platformHasSubsectionsViaSymbols platform
- then ppr (mkDeadStripPreventer info_lbl) <> char ':'
- else empty) $$
- vcat (map pprData info) $$
- pprLabel info_lbl
- ) $$
- vcat (map pprBasicBlock blocks) $$
+ (if platformHasSubsectionsViaSymbols platform
+ then pprSectionHeader Text $$
+ ppr (mkDeadStripPreventer info_lbl) <> char ':'
+ else empty) $$
+ vcat (map (pprBasicBlock top_info) blocks) $$
-- above: Even the first block gets a label, because with branch-chain
-- elimination, it might be the target of a goto.
(if platformHasSubsectionsViaSymbols platform
@@ -89,10 +86,18 @@ pprNatCmmDecl proc@(CmmProc _ lbl (ListGraph blocks)) =
else empty)
-pprBasicBlock :: NatBasicBlock Instr -> SDoc
-pprBasicBlock (BasicBlock blockid instrs) =
- pprLabel (mkAsmTempLabel (getUnique blockid)) $$
- vcat (map pprInstr instrs)
+pprBasicBlock :: BlockEnv CmmStatics -> NatBasicBlock Instr -> SDoc
+pprBasicBlock info_env (BasicBlock blockid instrs)
+ = maybe_infotable $$
+ pprLabel (mkAsmTempLabel (getUnique blockid)) $$
+ vcat (map pprInstr instrs)
+ where
+ maybe_infotable = case mapLookup blockid info_env of
+ Nothing -> empty
+ Just (Statics info_lbl info) ->
+ pprSectionHeader Text $$
+ vcat (map pprData info) $$
+ pprLabel info_lbl
@@ -292,7 +297,8 @@ pprSectionHeader seg
pprDataItem :: CmmLit -> SDoc
pprDataItem lit
- = vcat (ppr_item (cmmTypeSize $ cmmLitType lit) lit)
+ = sdocWithDynFlags $ \dflags ->
+ vcat (ppr_item (cmmTypeSize $ cmmLitType dflags lit) lit)
where
imm = litToImm lit
diff --git a/compiler/nativeGen/PPC/Regs.hs b/compiler/nativeGen/PPC/Regs.hs
index 7dccb6040e..d4123aca84 100644
--- a/compiler/nativeGen/PPC/Regs.hs
+++ b/compiler/nativeGen/PPC/Regs.hs
@@ -55,8 +55,8 @@ import CLabel ( CLabel )
import Unique
import CodeGen.Platform
+import DynFlags
import Outputable
-import Constants
import FastBool
import FastTypes
import Platform
@@ -194,10 +194,11 @@ addrOffset addr off
-- temporaries and for excess call arguments. @fpRel@, where
-- applicable, is the same but for the frame pointer.
-spRel :: Int -- desired stack offset in words, positive or negative
+spRel :: DynFlags
+ -> Int -- desired stack offset in words, positive or negative
-> AddrMode
-spRel n = AddrRegImm sp (ImmInt (n * wORD_SIZE))
+spRel dflags n = AddrRegImm sp (ImmInt (n * wORD_SIZE dflags))
-- argRegs is the set of regs which are read for an n-argument call to C.
diff --git a/compiler/nativeGen/RegAlloc/Graph/Main.hs b/compiler/nativeGen/RegAlloc/Graph/Main.hs
index 32b5e41402..1611a710fb 100644
--- a/compiler/nativeGen/RegAlloc/Graph/Main.hs
+++ b/compiler/nativeGen/RegAlloc/Graph/Main.hs
@@ -174,7 +174,7 @@ regAlloc_spin dflags spinCount triv regsFree slotsFree debug_codeGraphs code
-- strip off liveness information,
-- and rewrite SPILL/RELOAD pseudos into real instructions along the way
- let code_final = map (stripLive platform) code_spillclean
+ let code_final = map (stripLive dflags) code_spillclean
-- record what happened in this stage for debugging
let stat =
diff --git a/compiler/nativeGen/RegAlloc/Linear/Base.hs b/compiler/nativeGen/RegAlloc/Linear/Base.hs
index 432acdf314..e58331347c 100644
--- a/compiler/nativeGen/RegAlloc/Linear/Base.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/Base.hs
@@ -13,7 +13,6 @@ module RegAlloc.Linear.Base (
-- the allocator monad
RA_State(..),
- RegM(..)
)
where
@@ -22,6 +21,7 @@ import RegAlloc.Linear.StackMap
import RegAlloc.Liveness
import Reg
+import DynFlags
import Outputable
import Unique
import UniqFM
@@ -126,11 +126,7 @@ data RA_State freeRegs
-- | Record why things were spilled, for -ddrop-asm-stats.
-- Just keep a list here instead of a map of regs -> reasons.
-- We don't want to slow down the allocator if we're not going to emit the stats.
- , ra_spills :: [SpillReason] }
-
-
--- | The register allocator monad type.
-newtype RegM freeRegs a
- = RegM { unReg :: RA_State freeRegs -> (# RA_State freeRegs, a #) }
+ , ra_spills :: [SpillReason]
+ , ra_DynFlags :: DynFlags }
diff --git a/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs b/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs
index 887af1758a..fffdef761b 100644
--- a/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/FreeRegs.hs
@@ -18,6 +18,7 @@ where
import Reg
import RegClass
+import DynFlags
import Panic
import Platform
@@ -33,9 +34,10 @@ import Platform
-- getFreeRegs cls f = filter ( (==cls) . regClass . RealReg ) f
-- allocateReg f r = filter (/= r) f
-import qualified RegAlloc.Linear.PPC.FreeRegs as PPC
-import qualified RegAlloc.Linear.SPARC.FreeRegs as SPARC
-import qualified RegAlloc.Linear.X86.FreeRegs as X86
+import qualified RegAlloc.Linear.PPC.FreeRegs as PPC
+import qualified RegAlloc.Linear.SPARC.FreeRegs as SPARC
+import qualified RegAlloc.Linear.X86.FreeRegs as X86
+import qualified RegAlloc.Linear.X86_64.FreeRegs as X86_64
import qualified PPC.Instr
import qualified SPARC.Instr
@@ -53,6 +55,12 @@ instance FR X86.FreeRegs where
frInitFreeRegs = X86.initFreeRegs
frReleaseReg = \_ -> X86.releaseReg
+instance FR X86_64.FreeRegs where
+ frAllocateReg = \_ -> X86_64.allocateReg
+ frGetFreeRegs = X86_64.getFreeRegs
+ frInitFreeRegs = X86_64.initFreeRegs
+ frReleaseReg = \_ -> X86_64.releaseReg
+
instance FR PPC.FreeRegs where
frAllocateReg = \_ -> PPC.allocateReg
frGetFreeRegs = \_ -> PPC.getFreeRegs
@@ -65,13 +73,13 @@ instance FR SPARC.FreeRegs where
frInitFreeRegs = SPARC.initFreeRegs
frReleaseReg = SPARC.releaseReg
-maxSpillSlots :: Platform -> Int
-maxSpillSlots platform
- = case platformArch platform of
- ArchX86 -> X86.Instr.maxSpillSlots True -- 32bit
- ArchX86_64 -> X86.Instr.maxSpillSlots False -- not 32bit
- ArchPPC -> PPC.Instr.maxSpillSlots
- ArchSPARC -> SPARC.Instr.maxSpillSlots
+maxSpillSlots :: DynFlags -> Int
+maxSpillSlots dflags
+ = case platformArch (targetPlatform dflags) of
+ ArchX86 -> X86.Instr.maxSpillSlots dflags
+ ArchX86_64 -> X86.Instr.maxSpillSlots dflags
+ ArchPPC -> PPC.Instr.maxSpillSlots dflags
+ ArchSPARC -> SPARC.Instr.maxSpillSlots dflags
ArchARM _ _ _ -> panic "maxSpillSlots ArchARM"
ArchPPC_64 -> panic "maxSpillSlots ArchPPC_64"
ArchUnknown -> panic "maxSpillSlots ArchUnknown"
diff --git a/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs b/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
index ea415e2661..6294743c48 100644
--- a/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/JoinToTargets.hs
@@ -1,24 +1,13 @@
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
-
-- | Handles joining of a jump instruction to its targets.
--- The first time we encounter a jump to a particular basic block, we
--- record the assignment of temporaries. The next time we encounter a
--- jump to the same block, we compare our current assignment to the
--- stored one. They might be different if spilling has occrred in one
--- branch; so some fixup code will be required to match up the assignments.
+-- The first time we encounter a jump to a particular basic block, we
+-- record the assignment of temporaries. The next time we encounter a
+-- jump to the same block, we compare our current assignment to the
+-- stored one. They might be different if spilling has occrred in one
+-- branch; so some fixup code will be required to match up the assignments.
--
-module RegAlloc.Linear.JoinToTargets (
- joinToTargets
-)
-
-where
+module RegAlloc.Linear.JoinToTargets (joinToTargets) where
import RegAlloc.Linear.State
import RegAlloc.Linear.Base
@@ -30,96 +19,94 @@ import Reg
import BlockId
import OldCmm hiding (RegSet)
import Digraph
+import DynFlags
import Outputable
-import Platform
import Unique
import UniqFM
import UniqSet
-- | For a jump instruction at the end of a block, generate fixup code so its
--- vregs are in the correct regs for its destination.
+-- vregs are in the correct regs for its destination.
--
joinToTargets
- :: (FR freeRegs, Instruction instr)
- => Platform
- -> BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
- -- that are known to be live on the entry to each block.
+ :: (FR freeRegs, Instruction instr)
+ => BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
+ -- that are known to be live on the entry to each block.
- -> BlockId -- ^ id of the current block
- -> instr -- ^ branch instr on the end of the source block.
+ -> BlockId -- ^ id of the current block
+ -> instr -- ^ branch instr on the end of the source block.
- -> RegM freeRegs ([NatBasicBlock instr] -- fresh blocks of fixup code.
- , instr) -- the original branch instruction, but maybe patched to jump
- -- to a fixup block first.
+ -> RegM freeRegs ([NatBasicBlock instr] -- fresh blocks of fixup code.
+ , instr) -- the original branch
+ -- instruction, but maybe
+ -- patched to jump
+ -- to a fixup block first.
-joinToTargets platform block_live id instr
+joinToTargets block_live id instr
- -- we only need to worry about jump instructions.
- | not $ isJumpishInstr instr
- = return ([], instr)
+ -- we only need to worry about jump instructions.
+ | not $ isJumpishInstr instr
+ = return ([], instr)
- | otherwise
- = joinToTargets' platform block_live [] id instr (jumpDestsOfInstr instr)
+ | otherwise
+ = joinToTargets' block_live [] id instr (jumpDestsOfInstr instr)
-----
joinToTargets'
- :: (FR freeRegs, Instruction instr)
- => Platform
- -> BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
- -- that are known to be live on the entry to each block.
+ :: (FR freeRegs, Instruction instr)
+ => BlockMap RegSet -- ^ maps the unique of the blockid to the set of vregs
+ -- that are known to be live on the entry to each block.
- -> [NatBasicBlock instr] -- ^ acc blocks of fixup code.
+ -> [NatBasicBlock instr] -- ^ acc blocks of fixup code.
- -> BlockId -- ^ id of the current block
- -> instr -- ^ branch instr on the end of the source block.
+ -> BlockId -- ^ id of the current block
+ -> instr -- ^ branch instr on the end of the source block.
- -> [BlockId] -- ^ branch destinations still to consider.
+ -> [BlockId] -- ^ branch destinations still to consider.
- -> RegM freeRegs ( [NatBasicBlock instr]
- , instr)
+ -> RegM freeRegs ([NatBasicBlock instr], instr)
-- no more targets to consider. all done.
-joinToTargets' _ _ new_blocks _ instr []
- = return (new_blocks, instr)
+joinToTargets' _ new_blocks _ instr []
+ = return (new_blocks, instr)
-- handle a branch target.
-joinToTargets' platform block_live new_blocks block_id instr (dest:dests)
- = do
- -- get the map of where the vregs are stored on entry to each basic block.
- block_assig <- getBlockAssigR
-
- -- get the assignment on entry to the branch instruction.
- assig <- getAssigR
-
- -- adjust the current assignment to remove any vregs that are not live
- -- on entry to the destination block.
- let Just live_set = mapLookup dest block_live
- let still_live uniq _ = uniq `elemUniqSet_Directly` live_set
- let adjusted_assig = filterUFM_Directly still_live assig
-
- -- and free up those registers which are now free.
- let to_free =
- [ r | (reg, loc) <- ufmToList assig
- , not (elemUniqSet_Directly reg live_set)
- , r <- regsOfLoc loc ]
-
- case mapLookup dest block_assig of
- Nothing
- -> joinToTargets_first
- platform block_live new_blocks block_id instr dest dests
- block_assig adjusted_assig to_free
-
- Just (_, dest_assig)
- -> joinToTargets_again
- platform block_live new_blocks block_id instr dest dests
- adjusted_assig dest_assig
+joinToTargets' block_live new_blocks block_id instr (dest:dests)
+ = do
+ -- get the map of where the vregs are stored on entry to each basic block.
+ block_assig <- getBlockAssigR
+
+ -- get the assignment on entry to the branch instruction.
+ assig <- getAssigR
+
+ -- adjust the current assignment to remove any vregs that are not live
+ -- on entry to the destination block.
+ let Just live_set = mapLookup dest block_live
+ let still_live uniq _ = uniq `elemUniqSet_Directly` live_set
+ let adjusted_assig = filterUFM_Directly still_live assig
+
+ -- and free up those registers which are now free.
+ let to_free =
+ [ r | (reg, loc) <- ufmToList assig
+ , not (elemUniqSet_Directly reg live_set)
+ , r <- regsOfLoc loc ]
+
+ case mapLookup dest block_assig of
+ Nothing
+ -> joinToTargets_first
+ block_live new_blocks block_id instr dest dests
+ block_assig adjusted_assig to_free
+
+ Just (_, dest_assig)
+ -> joinToTargets_again
+ block_live new_blocks block_id instr dest dests
+ adjusted_assig dest_assig
-- this is the first time we jumped to this block.
joinToTargets_first :: (FR freeRegs, Instruction instr)
- => Platform
- -> BlockMap RegSet
+ => BlockMap RegSet
-> [NatBasicBlock instr]
-> BlockId
-> instr
@@ -129,24 +116,26 @@ joinToTargets_first :: (FR freeRegs, Instruction instr)
-> RegMap Loc
-> [RealReg]
-> RegM freeRegs ([NatBasicBlock instr], instr)
-joinToTargets_first platform block_live new_blocks block_id instr dest dests
- block_assig src_assig
- to_free
+joinToTargets_first block_live new_blocks block_id instr dest dests
+ block_assig src_assig
+ to_free
- = do -- free up the regs that are not live on entry to this block.
- freeregs <- getFreeRegsR
- let freeregs' = foldr (frReleaseReg platform) freeregs to_free
-
- -- remember the current assignment on entry to this block.
- setBlockAssigR (mapInsert dest (freeregs', src_assig) block_assig)
+ = do dflags <- getDynFlags
+ let platform = targetPlatform dflags
- joinToTargets' platform block_live new_blocks block_id instr dests
+ -- free up the regs that are not live on entry to this block.
+ freeregs <- getFreeRegsR
+ let freeregs' = foldr (frReleaseReg platform) freeregs to_free
+
+ -- remember the current assignment on entry to this block.
+ setBlockAssigR (mapInsert dest (freeregs', src_assig) block_assig)
+
+ joinToTargets' block_live new_blocks block_id instr dests
-- we've jumped to this block before
joinToTargets_again :: (Instruction instr, FR freeRegs)
- => Platform
- -> BlockMap RegSet
+ => BlockMap RegSet
-> [NatBasicBlock instr]
-> BlockId
-> instr
@@ -156,82 +145,82 @@ joinToTargets_again :: (Instruction instr, FR freeRegs)
-> UniqFM Loc
-> RegM freeRegs ([NatBasicBlock instr], instr)
joinToTargets_again
- platform block_live new_blocks block_id instr dest dests
+ block_live new_blocks block_id instr dest dests
src_assig dest_assig
- -- the assignments already match, no problem.
- | ufmToList dest_assig == ufmToList src_assig
- = joinToTargets' platform block_live new_blocks block_id instr dests
-
- -- assignments don't match, need fixup code
- | otherwise
- = do
-
- -- make a graph of what things need to be moved where.
- let graph = makeRegMovementGraph src_assig dest_assig
-
- -- look for cycles in the graph. This can happen if regs need to be swapped.
- -- Note that we depend on the fact that this function does a
- -- bottom up traversal of the tree-like portions of the graph.
- --
- -- eg, if we have
- -- R1 -> R2 -> R3
- --
- -- ie move value in R1 to R2 and value in R2 to R3.
- --
- -- We need to do the R2 -> R3 move before R1 -> R2.
- --
- let sccs = stronglyConnCompFromEdgedVerticesR graph
-
-{- -- debugging
- pprTrace
- ("joinToTargets: making fixup code")
- (vcat [ text " in block: " <> ppr block_id
- , text " jmp instruction: " <> ppr instr
- , text " src assignment: " <> ppr src_assig
- , text " dest assignment: " <> ppr dest_assig
- , text " movement graph: " <> ppr graph
- , text " sccs of graph: " <> ppr sccs
- , text ""])
- (return ())
+ -- the assignments already match, no problem.
+ | ufmToList dest_assig == ufmToList src_assig
+ = joinToTargets' block_live new_blocks block_id instr dests
+
+ -- assignments don't match, need fixup code
+ | otherwise
+ = do
+
+ -- make a graph of what things need to be moved where.
+ let graph = makeRegMovementGraph src_assig dest_assig
+
+ -- look for cycles in the graph. This can happen if regs need to be swapped.
+ -- Note that we depend on the fact that this function does a
+ -- bottom up traversal of the tree-like portions of the graph.
+ --
+ -- eg, if we have
+ -- R1 -> R2 -> R3
+ --
+ -- ie move value in R1 to R2 and value in R2 to R3.
+ --
+ -- We need to do the R2 -> R3 move before R1 -> R2.
+ --
+ let sccs = stronglyConnCompFromEdgedVerticesR graph
+
+{- -- debugging
+ pprTrace
+ ("joinToTargets: making fixup code")
+ (vcat [ text " in block: " <> ppr block_id
+ , text " jmp instruction: " <> ppr instr
+ , text " src assignment: " <> ppr src_assig
+ , text " dest assignment: " <> ppr dest_assig
+ , text " movement graph: " <> ppr graph
+ , text " sccs of graph: " <> ppr sccs
+ , text ""])
+ (return ())
-}
- delta <- getDeltaR
- fixUpInstrs_ <- mapM (handleComponent platform delta instr) sccs
- let fixUpInstrs = concat fixUpInstrs_
-
- -- make a new basic block containing the fixup code.
- -- A the end of the current block we will jump to the fixup one,
- -- then that will jump to our original destination.
- fixup_block_id <- getUniqueR
- let block = BasicBlock (mkBlockId fixup_block_id)
- $ fixUpInstrs ++ mkJumpInstr dest
-
-{- pprTrace
- ("joinToTargets: fixup code is:")
- (vcat [ ppr block
- , text ""])
- (return ())
+ delta <- getDeltaR
+ fixUpInstrs_ <- mapM (handleComponent delta instr) sccs
+ let fixUpInstrs = concat fixUpInstrs_
+
+ -- make a new basic block containing the fixup code.
+ -- A the end of the current block we will jump to the fixup one,
+ -- then that will jump to our original destination.
+ fixup_block_id <- getUniqueR
+ let block = BasicBlock (mkBlockId fixup_block_id)
+ $ fixUpInstrs ++ mkJumpInstr dest
+
+{- pprTrace
+ ("joinToTargets: fixup code is:")
+ (vcat [ ppr block
+ , text ""])
+ (return ())
-}
- -- if we didn't need any fixups, then don't include the block
- case fixUpInstrs of
- [] -> joinToTargets' platform block_live new_blocks block_id instr dests
+ -- if we didn't need any fixups, then don't include the block
+ case fixUpInstrs of
+ [] -> joinToTargets' block_live new_blocks block_id instr dests
- -- patch the original branch instruction so it goes to our
- -- fixup block instead.
- _ -> let instr' = patchJumpInstr instr
- (\bid -> if bid == dest
- then mkBlockId fixup_block_id
- else bid) -- no change!
-
- in joinToTargets' platform block_live (block : new_blocks) block_id instr' dests
+ -- patch the original branch instruction so it goes to our
+ -- fixup block instead.
+ _ -> let instr' = patchJumpInstr instr
+ (\bid -> if bid == dest
+ then mkBlockId fixup_block_id
+ else bid) -- no change!
+
+ in joinToTargets' block_live (block : new_blocks) block_id instr' dests
-- | Construct a graph of register\/spill movements.
--
--- Cyclic components seem to occur only very rarely.
+-- Cyclic components seem to occur only very rarely.
--
--- We cut some corners by not handling memory-to-memory moves.
--- This shouldn't happen because every temporary gets its own stack slot.
+-- We cut some corners by not handling memory-to-memory moves.
+-- This shouldn't happen because every temporary gets its own stack slot.
--
makeRegMovementGraph :: RegMap Loc -> RegMap Loc -> [(Unique, Loc, [Loc])]
makeRegMovementGraph adjusted_assig dest_assig
@@ -242,95 +231,96 @@ makeRegMovementGraph adjusted_assig dest_assig
-- | Expand out the destination, so InBoth destinations turn into
--- a combination of InReg and InMem.
+-- a combination of InReg and InMem.
--- The InBoth handling is a little tricky here. If the destination is
--- InBoth, then we must ensure that the value ends up in both locations.
--- An InBoth destination must conflict with an InReg or InMem source, so
--- we expand an InBoth destination as necessary.
+-- The InBoth handling is a little tricky here. If the destination is
+-- InBoth, then we must ensure that the value ends up in both locations.
+-- An InBoth destination must conflict with an InReg or InMem source, so
+-- we expand an InBoth destination as necessary.
--
--- An InBoth source is slightly different: we only care about the register
--- that the source value is in, so that we can move it to the destinations.
+-- An InBoth source is slightly different: we only care about the register
+-- that the source value is in, so that we can move it to the destinations.
--
-expandNode
- :: a
- -> Loc -- ^ source of move
- -> Loc -- ^ destination of move
- -> [(a, Loc, [Loc])]
+expandNode
+ :: a
+ -> Loc -- ^ source of move
+ -> Loc -- ^ destination of move
+ -> [(a, Loc, [Loc])]
expandNode vreg loc@(InReg src) (InBoth dst mem)
- | src == dst = [(vreg, loc, [InMem mem])]
- | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
+ | src == dst = [(vreg, loc, [InMem mem])]
+ | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
expandNode vreg loc@(InMem src) (InBoth dst mem)
- | src == mem = [(vreg, loc, [InReg dst])]
- | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
+ | src == mem = [(vreg, loc, [InReg dst])]
+ | otherwise = [(vreg, loc, [InReg dst, InMem mem])]
expandNode _ (InBoth _ src) (InMem dst)
- | src == dst = [] -- guaranteed to be true
+ | src == dst = [] -- guaranteed to be true
expandNode _ (InBoth src _) (InReg dst)
- | src == dst = []
+ | src == dst = []
expandNode vreg (InBoth src _) dst
- = expandNode vreg (InReg src) dst
+ = expandNode vreg (InReg src) dst
expandNode vreg src dst
- | src == dst = []
- | otherwise = [(vreg, src, [dst])]
+ | src == dst = []
+ | otherwise = [(vreg, src, [dst])]
-- | Generate fixup code for a particular component in the move graph
--- This component tells us what values need to be moved to what
--- destinations. We have eliminated any possibility of single-node
--- cycles in expandNode above.
+-- This component tells us what values need to be moved to what
+-- destinations. We have eliminated any possibility of single-node
+-- cycles in expandNode above.
--
-handleComponent
- :: Instruction instr
- => Platform -> Int -> instr -> SCC (Unique, Loc, [Loc]) -> RegM freeRegs [instr]
+handleComponent
+ :: Instruction instr
+ => Int -> instr -> SCC (Unique, Loc, [Loc])
+ -> RegM freeRegs [instr]
-- If the graph is acyclic then we won't get the swapping problem below.
--- In this case we can just do the moves directly, and avoid having to
--- go via a spill slot.
+-- In this case we can just do the moves directly, and avoid having to
+-- go via a spill slot.
--
-handleComponent platform delta _ (AcyclicSCC (vreg, src, dsts))
- = mapM (makeMove platform delta vreg src) dsts
+handleComponent delta _ (AcyclicSCC (vreg, src, dsts))
+ = mapM (makeMove delta vreg src) dsts
-- Handle some cyclic moves.
--- This can happen if we have two regs that need to be swapped.
--- eg:
--- vreg source loc dest loc
--- (vreg1, InReg r1, [InReg r2])
--- (vreg2, InReg r2, [InReg r1])
+-- This can happen if we have two regs that need to be swapped.
+-- eg:
+-- vreg source loc dest loc
+-- (vreg1, InReg r1, [InReg r2])
+-- (vreg2, InReg r2, [InReg r1])
+--
+-- To avoid needing temp register, we just spill all the source regs, then
+-- reaload them into their destination regs.
--
--- To avoid needing temp register, we just spill all the source regs, then
--- reaload them into their destination regs.
---
--- Note that we can not have cycles that involve memory locations as
--- sources as single destination because memory locations (stack slots)
--- are allocated exclusively for a virtual register and therefore can not
--- require a fixup.
+-- Note that we can not have cycles that involve memory locations as
+-- sources as single destination because memory locations (stack slots)
+-- are allocated exclusively for a virtual register and therefore can not
+-- require a fixup.
--
-handleComponent platform delta instr
- (CyclicSCC ( (vreg, InReg sreg, (InReg dreg: _)) : rest))
+handleComponent delta instr
+ (CyclicSCC ((vreg, InReg sreg, (InReg dreg: _)) : rest))
-- dest list may have more than one element, if the reg is also InMem.
= do
- -- spill the source into its slot
- (instrSpill, slot)
- <- spillR platform (RegReal sreg) vreg
+ -- spill the source into its slot
+ (instrSpill, slot)
+ <- spillR (RegReal sreg) vreg
- -- reload into destination reg
- instrLoad <- loadR platform (RegReal dreg) slot
-
- remainingFixUps <- mapM (handleComponent platform delta instr)
- (stronglyConnCompFromEdgedVerticesR rest)
+ -- reload into destination reg
+ instrLoad <- loadR (RegReal dreg) slot
- -- make sure to do all the reloads after all the spills,
- -- so we don't end up clobbering the source values.
- return ([instrSpill] ++ concat remainingFixUps ++ [instrLoad])
+ remainingFixUps <- mapM (handleComponent delta instr)
+ (stronglyConnCompFromEdgedVerticesR rest)
-handleComponent _ _ _ (CyclicSCC _)
+ -- make sure to do all the reloads after all the spills,
+ -- so we don't end up clobbering the source values.
+ return ([instrSpill] ++ concat remainingFixUps ++ [instrLoad])
+
+handleComponent _ _ (CyclicSCC _)
= panic "Register Allocator: handleComponent cyclic"
@@ -338,29 +328,31 @@ handleComponent _ _ _ (CyclicSCC _)
--
makeMove
:: Instruction instr
- => Platform
- -> Int -- ^ current C stack delta.
+ => Int -- ^ current C stack delta.
-> Unique -- ^ unique of the vreg that we're moving.
-> Loc -- ^ source location.
-> Loc -- ^ destination location.
-> RegM freeRegs instr -- ^ move instruction.
-makeMove platform _ vreg (InReg src) (InReg dst)
- = do recordSpill (SpillJoinRR vreg)
- return $ mkRegRegMoveInstr platform (RegReal src) (RegReal dst)
-
-makeMove platform delta vreg (InMem src) (InReg dst)
- = do recordSpill (SpillJoinRM vreg)
- return $ mkLoadInstr platform (RegReal dst) delta src
-
-makeMove platform delta vreg (InReg src) (InMem dst)
- = do recordSpill (SpillJoinRM vreg)
- return $ mkSpillInstr platform (RegReal src) delta dst
-
--- we don't handle memory to memory moves.
--- they shouldn't happen because we don't share stack slots between vregs.
-makeMove _ _ vreg src dst
- = panic $ "makeMove " ++ show vreg ++ " (" ++ show src ++ ") ("
- ++ show dst ++ ")"
- ++ " we don't handle mem->mem moves."
+makeMove delta vreg src dst
+ = do dflags <- getDynFlags
+ let platform = targetPlatform dflags
+
+ case (src, dst) of
+ (InReg s, InReg d) ->
+ do recordSpill (SpillJoinRR vreg)
+ return $ mkRegRegMoveInstr platform (RegReal s) (RegReal d)
+ (InMem s, InReg d) ->
+ do recordSpill (SpillJoinRM vreg)
+ return $ mkLoadInstr dflags (RegReal d) delta s
+ (InReg s, InMem d) ->
+ do recordSpill (SpillJoinRM vreg)
+ return $ mkSpillInstr dflags (RegReal s) delta d
+ _ ->
+ -- we don't handle memory to memory moves.
+ -- they shouldn't happen because we don't share
+ -- stack slots between vregs.
+ panic ("makeMove " ++ show vreg ++ " (" ++ show src ++ ") ("
+ ++ show dst ++ ")"
+ ++ " we don't handle mem->mem moves.")
diff --git a/compiler/nativeGen/RegAlloc/Linear/Main.hs b/compiler/nativeGen/RegAlloc/Linear/Main.hs
index c2f89de641..3f92ed975b 100644
--- a/compiler/nativeGen/RegAlloc/Linear/Main.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/Main.hs
@@ -106,9 +106,10 @@ import RegAlloc.Linear.StackMap
import RegAlloc.Linear.FreeRegs
import RegAlloc.Linear.Stats
import RegAlloc.Linear.JoinToTargets
-import qualified RegAlloc.Linear.PPC.FreeRegs as PPC
-import qualified RegAlloc.Linear.SPARC.FreeRegs as SPARC
-import qualified RegAlloc.Linear.X86.FreeRegs as X86
+import qualified RegAlloc.Linear.PPC.FreeRegs as PPC
+import qualified RegAlloc.Linear.SPARC.FreeRegs as SPARC
+import qualified RegAlloc.Linear.X86.FreeRegs as X86
+import qualified RegAlloc.Linear.X86_64.FreeRegs as X86_64
import TargetReg
import RegAlloc.Liveness
import Instruction
@@ -188,52 +189,51 @@ linearRegAlloc
linearRegAlloc dflags first_id block_live sccs
= let platform = targetPlatform dflags
in case platformArch platform of
- ArchX86 -> linearRegAlloc' platform (frInitFreeRegs platform :: X86.FreeRegs) first_id block_live sccs
- ArchX86_64 -> linearRegAlloc' platform (frInitFreeRegs platform :: X86.FreeRegs) first_id block_live sccs
- ArchSPARC -> linearRegAlloc' platform (frInitFreeRegs platform :: SPARC.FreeRegs) first_id block_live sccs
- ArchPPC -> linearRegAlloc' platform (frInitFreeRegs platform :: PPC.FreeRegs) first_id block_live sccs
+ ArchX86 -> linearRegAlloc' dflags (frInitFreeRegs platform :: X86.FreeRegs) first_id block_live sccs
+ ArchX86_64 -> linearRegAlloc' dflags (frInitFreeRegs platform :: X86_64.FreeRegs) first_id block_live sccs
+ ArchSPARC -> linearRegAlloc' dflags (frInitFreeRegs platform :: SPARC.FreeRegs) first_id block_live sccs
+ ArchPPC -> linearRegAlloc' dflags (frInitFreeRegs platform :: PPC.FreeRegs) first_id block_live sccs
ArchARM _ _ _ -> panic "linearRegAlloc ArchARM"
ArchPPC_64 -> panic "linearRegAlloc ArchPPC_64"
ArchUnknown -> panic "linearRegAlloc ArchUnknown"
linearRegAlloc'
:: (FR freeRegs, Outputable instr, Instruction instr)
- => Platform
+ => DynFlags
-> freeRegs
-> BlockId -- ^ the first block
-> BlockMap RegSet -- ^ live regs on entry to each basic block
-> [SCC (LiveBasicBlock instr)] -- ^ instructions annotated with "deaths"
-> UniqSM ([NatBasicBlock instr], RegAllocStats)
-linearRegAlloc' platform initFreeRegs first_id block_live sccs
+linearRegAlloc' dflags initFreeRegs first_id block_live sccs
= do us <- getUs
let (_, _, stats, blocks) =
- runR emptyBlockMap initFreeRegs emptyRegMap (emptyStackMap platform) us
- $ linearRA_SCCs platform first_id block_live [] sccs
+ runR dflags emptyBlockMap initFreeRegs emptyRegMap (emptyStackMap dflags) us
+ $ linearRA_SCCs first_id block_live [] sccs
return (blocks, stats)
linearRA_SCCs :: (FR freeRegs, Instruction instr, Outputable instr)
- => Platform
- -> BlockId
+ => BlockId
-> BlockMap RegSet
-> [NatBasicBlock instr]
-> [SCC (LiveBasicBlock instr)]
-> RegM freeRegs [NatBasicBlock instr]
-linearRA_SCCs _ _ _ blocksAcc []
+linearRA_SCCs _ _ blocksAcc []
= return $ reverse blocksAcc
-linearRA_SCCs platform first_id block_live blocksAcc (AcyclicSCC block : sccs)
- = do blocks' <- processBlock platform block_live block
- linearRA_SCCs platform first_id block_live
+linearRA_SCCs first_id block_live blocksAcc (AcyclicSCC block : sccs)
+ = do blocks' <- processBlock block_live block
+ linearRA_SCCs first_id block_live
((reverse blocks') ++ blocksAcc)
sccs
-linearRA_SCCs platform first_id block_live blocksAcc (CyclicSCC blocks : sccs)
+linearRA_SCCs first_id block_live blocksAcc (CyclicSCC blocks : sccs)
= do
- blockss' <- process platform first_id block_live blocks [] (return []) False
- linearRA_SCCs platform first_id block_live
+ blockss' <- process first_id block_live blocks [] (return []) False
+ linearRA_SCCs first_id block_live
(reverse (concat blockss') ++ blocksAcc)
sccs
@@ -250,8 +250,7 @@ linearRA_SCCs platform first_id block_live blocksAcc (CyclicSCC blocks : sccs)
-}
process :: (FR freeRegs, Instruction instr, Outputable instr)
- => Platform
- -> BlockId
+ => BlockId
-> BlockMap RegSet
-> [GenBasicBlock (LiveInstr instr)]
-> [GenBasicBlock (LiveInstr instr)]
@@ -259,10 +258,10 @@ process :: (FR freeRegs, Instruction instr, Outputable instr)
-> Bool
-> RegM freeRegs [[NatBasicBlock instr]]
-process _ _ _ [] [] accum _
+process _ _ [] [] accum _
= return $ reverse accum
-process platform first_id block_live [] next_round accum madeProgress
+process first_id block_live [] next_round accum madeProgress
| not madeProgress
{- BUGS: There are so many unreachable blocks in the code the warnings are overwhelming.
@@ -272,10 +271,10 @@ process platform first_id block_live [] next_round accum madeProgress
= return $ reverse accum
| otherwise
- = process platform first_id block_live
+ = process first_id block_live
next_round [] accum False
-process platform first_id block_live (b@(BasicBlock id _) : blocks)
+process first_id block_live (b@(BasicBlock id _) : blocks)
next_round accum madeProgress
= do
block_assig <- getBlockAssigR
@@ -283,11 +282,11 @@ process platform first_id block_live (b@(BasicBlock id _) : blocks)
if isJust (mapLookup id block_assig)
|| id == first_id
then do
- b' <- processBlock platform block_live b
- process platform first_id block_live blocks
+ b' <- processBlock block_live b
+ process first_id block_live blocks
next_round (b' : accum) True
- else process platform first_id block_live blocks
+ else process first_id block_live blocks
(b : next_round) accum madeProgress
@@ -295,24 +294,25 @@ process platform first_id block_live (b@(BasicBlock id _) : blocks)
--
processBlock
:: (FR freeRegs, Outputable instr, Instruction instr)
- => Platform
- -> BlockMap RegSet -- ^ live regs on entry to each basic block
+ => BlockMap RegSet -- ^ live regs on entry to each basic block
-> LiveBasicBlock instr -- ^ block to do register allocation on
-> RegM freeRegs [NatBasicBlock instr] -- ^ block with registers allocated
-processBlock platform block_live (BasicBlock id instrs)
- = do initBlock platform id block_live
+processBlock block_live (BasicBlock id instrs)
+ = do initBlock id block_live
(instrs', fixups)
- <- linearRA platform block_live [] [] id instrs
+ <- linearRA block_live [] [] id instrs
return $ BasicBlock id instrs' : fixups
-- | Load the freeregs and current reg assignment into the RegM state
-- for the basic block with this BlockId.
initBlock :: FR freeRegs
- => Platform -> BlockId -> BlockMap RegSet -> RegM freeRegs ()
-initBlock platform id block_live
- = do block_assig <- getBlockAssigR
+ => BlockId -> BlockMap RegSet -> RegM freeRegs ()
+initBlock id block_live
+ = do dflags <- getDynFlags
+ let platform = targetPlatform dflags
+ block_assig <- getBlockAssigR
case mapLookup id block_assig of
-- no prior info about this block: we must consider
-- any fixed regs to be allocated, but we can ignore
@@ -337,8 +337,7 @@ initBlock platform id block_live
-- | Do allocation for a sequence of instructions.
linearRA
:: (FR freeRegs, Outputable instr, Instruction instr)
- => Platform
- -> BlockMap RegSet -- ^ map of what vregs are live on entry to each block.
+ => BlockMap RegSet -- ^ map of what vregs are live on entry to each block.
-> [instr] -- ^ accumulator for instructions already processed.
-> [NatBasicBlock instr] -- ^ accumulator for blocks of fixup code.
-> BlockId -- ^ id of the current block, for debugging.
@@ -349,25 +348,23 @@ linearRA
, [NatBasicBlock instr]) -- fresh blocks of fixup code.
-linearRA _ _ accInstr accFixup _ []
+linearRA _ accInstr accFixup _ []
= return
( reverse accInstr -- instrs need to be returned in the correct order.
, accFixup) -- it doesn't matter what order the fixup blocks are returned in.
-linearRA platform block_live accInstr accFixups id (instr:instrs)
+linearRA block_live accInstr accFixups id (instr:instrs)
= do
- (accInstr', new_fixups)
- <- raInsn platform block_live accInstr id instr
+ (accInstr', new_fixups) <- raInsn block_live accInstr id instr
- linearRA platform block_live accInstr' (new_fixups ++ accFixups) id instrs
+ linearRA block_live accInstr' (new_fixups ++ accFixups) id instrs
-- | Do allocation for a single instruction.
raInsn
:: (FR freeRegs, Outputable instr, Instruction instr)
- => Platform
- -> BlockMap RegSet -- ^ map of what vregs are love on entry to each block.
+ => BlockMap RegSet -- ^ map of what vregs are love on entry to each block.
-> [instr] -- ^ accumulator for instructions already processed.
-> BlockId -- ^ the id of the current block, for debugging
-> LiveInstr instr -- ^ the instr to have its regs allocated, with liveness info.
@@ -375,17 +372,17 @@ raInsn
( [instr] -- new instructions
, [NatBasicBlock instr]) -- extra fixup blocks
-raInsn _ _ new_instrs _ (LiveInstr ii Nothing)
+raInsn _ new_instrs _ (LiveInstr ii Nothing)
| Just n <- takeDeltaInstr ii
= do setDeltaR n
return (new_instrs, [])
-raInsn _ _ new_instrs _ (LiveInstr ii Nothing)
+raInsn _ new_instrs _ (LiveInstr ii Nothing)
| isMetaInstr ii
= return (new_instrs, [])
-raInsn platform block_live new_instrs id (LiveInstr (Instr instr) (Just live))
+raInsn block_live new_instrs id (LiveInstr (Instr instr) (Just live))
= do
assig <- getAssigR
@@ -420,12 +417,12 @@ raInsn platform block_live new_instrs id (LiveInstr (Instr instr) (Just live))
-}
return (new_instrs, [])
- _ -> genRaInsn platform block_live new_instrs id instr
+ _ -> genRaInsn block_live new_instrs id instr
(uniqSetToList $ liveDieRead live)
(uniqSetToList $ liveDieWrite live)
-raInsn _ _ _ _ instr
+raInsn _ _ _ instr
= pprPanic "raInsn" (text "no match for:" <> ppr instr)
@@ -435,8 +432,7 @@ isInReg src assig | Just (InReg _) <- lookupUFM assig src = True
genRaInsn :: (FR freeRegs, Instruction instr, Outputable instr)
- => Platform
- -> BlockMap RegSet
+ => BlockMap RegSet
-> [instr]
-> BlockId
-> instr
@@ -444,8 +440,10 @@ genRaInsn :: (FR freeRegs, Instruction instr, Outputable instr)
-> [Reg]
-> RegM freeRegs ([instr], [NatBasicBlock instr])
-genRaInsn platform block_live new_instrs block_id instr r_dying w_dying =
- case regUsageOfInstr platform instr of { RU read written ->
+genRaInsn block_live new_instrs block_id instr r_dying w_dying = do
+ dflags <- getDynFlags
+ let platform = targetPlatform dflags
+ case regUsageOfInstr platform instr of { RU read written ->
do
let real_written = [ rr | (RegReal rr) <- written ]
let virt_written = [ vr | (RegVirtual vr) <- written ]
@@ -471,32 +469,32 @@ genRaInsn platform block_live new_instrs block_id instr r_dying w_dying =
-- (a), (b) allocate real regs for all regs read by this instruction.
(r_spills, r_allocd) <-
- allocateRegsAndSpill platform True{-reading-} virt_read [] [] virt_read
+ allocateRegsAndSpill True{-reading-} virt_read [] [] virt_read
-- (c) save any temporaries which will be clobbered by this instruction
- clobber_saves <- saveClobberedTemps platform real_written r_dying
+ clobber_saves <- saveClobberedTemps real_written r_dying
-- (d) Update block map for new destinations
-- NB. do this before removing dead regs from the assignment, because
-- these dead regs might in fact be live in the jump targets (they're
-- only dead in the code that follows in the current basic block).
(fixup_blocks, adjusted_instr)
- <- joinToTargets platform block_live block_id instr
+ <- joinToTargets block_live block_id instr
-- (e) Delete all register assignments for temps which are read
-- (only) and die here. Update the free register list.
- releaseRegs platform r_dying
+ releaseRegs r_dying
-- (f) Mark regs which are clobbered as unallocatable
- clobberRegs platform real_written
+ clobberRegs real_written
-- (g) Allocate registers for temporaries *written* (only)
(w_spills, w_allocd) <-
- allocateRegsAndSpill platform False{-writing-} virt_written [] [] virt_written
+ allocateRegsAndSpill False{-writing-} virt_written [] [] virt_written
-- (h) Release registers for temps which are written here and not
-- used again.
- releaseRegs platform w_dying
+ releaseRegs w_dying
let
-- (i) Patch the instruction
@@ -539,20 +537,23 @@ genRaInsn platform block_live new_instrs block_id instr r_dying w_dying =
-- -----------------------------------------------------------------------------
-- releaseRegs
-releaseRegs :: FR freeRegs => Platform -> [Reg] -> RegM freeRegs ()
-releaseRegs platform regs = do
+releaseRegs :: FR freeRegs => [Reg] -> RegM freeRegs ()
+releaseRegs regs = do
+ dflags <- getDynFlags
+ let platform = targetPlatform dflags
assig <- getAssigR
free <- getFreeRegsR
+ let loop _ free _ | free `seq` False = undefined
+ loop assig free [] = do setAssigR assig; setFreeRegsR free; return ()
+ loop assig free (RegReal rr : rs) = loop assig (frReleaseReg platform rr free) rs
+ loop assig free (r:rs) =
+ case lookupUFM assig r of
+ Just (InBoth real _) -> loop (delFromUFM assig r)
+ (frReleaseReg platform real free) rs
+ Just (InReg real) -> loop (delFromUFM assig r)
+ (frReleaseReg platform real free) rs
+ _ -> loop (delFromUFM assig r) free rs
loop assig free regs
- where
- loop _ free _ | free `seq` False = undefined
- loop assig free [] = do setAssigR assig; setFreeRegsR free; return ()
- loop assig free (RegReal rr : rs) = loop assig (frReleaseReg platform rr free) rs
- loop assig free (r:rs) =
- case lookupUFM assig r of
- Just (InBoth real _) -> loop (delFromUFM assig r) (frReleaseReg platform real free) rs
- Just (InReg real) -> loop (delFromUFM assig r) (frReleaseReg platform real free) rs
- _other -> loop (delFromUFM assig r) free rs
-- -----------------------------------------------------------------------------
@@ -571,16 +572,15 @@ releaseRegs platform regs = do
saveClobberedTemps
:: (Outputable instr, Instruction instr, FR freeRegs)
- => Platform
- -> [RealReg] -- real registers clobbered by this instruction
+ => [RealReg] -- real registers clobbered by this instruction
-> [Reg] -- registers which are no longer live after this insn
-> RegM freeRegs [instr] -- return: instructions to spill any temps that will
-- be clobbered.
-saveClobberedTemps _ [] _
+saveClobberedTemps [] _
= return []
-saveClobberedTemps platform clobbered dying
+saveClobberedTemps clobbered dying
= do
assig <- getAssigR
let to_spill
@@ -598,7 +598,9 @@ saveClobberedTemps platform clobbered dying
= return (instrs, assig)
clobber assig instrs ((temp, reg) : rest)
- = do
+ = do dflags <- getDynFlags
+ let platform = targetPlatform dflags
+
freeRegs <- getFreeRegsR
let regclass = targetClassOfRealReg platform reg
freeRegs_thisClass = frGetFreeRegs platform regclass freeRegs
@@ -619,7 +621,7 @@ saveClobberedTemps platform clobbered dying
-- (2) no free registers: spill the value
[] -> do
- (spill, slot) <- spillR platform (RegReal reg) temp
+ (spill, slot) <- spillR (RegReal reg) temp
-- record why this reg was spilled for profiling
recordSpill (SpillClobber temp)
@@ -633,12 +635,14 @@ saveClobberedTemps platform clobbered dying
-- | Mark all these real regs as allocated,
-- and kick out their vreg assignments.
--
-clobberRegs :: FR freeRegs => Platform -> [RealReg] -> RegM freeRegs ()
-clobberRegs _ []
+clobberRegs :: FR freeRegs => [RealReg] -> RegM freeRegs ()
+clobberRegs []
= return ()
-clobberRegs platform clobbered
- = do
+clobberRegs clobbered
+ = do dflags <- getDynFlags
+ let platform = targetPlatform dflags
+
freeregs <- getFreeRegsR
setFreeRegsR $! foldr (frAllocateReg platform) freeregs clobbered
@@ -684,24 +688,23 @@ data SpillLoc = ReadMem StackSlot -- reading from register only in memory
allocateRegsAndSpill
:: (FR freeRegs, Outputable instr, Instruction instr)
- => Platform
- -> Bool -- True <=> reading (load up spilled regs)
+ => Bool -- True <=> reading (load up spilled regs)
-> [VirtualReg] -- don't push these out
-> [instr] -- spill insns
-> [RealReg] -- real registers allocated (accum.)
-> [VirtualReg] -- temps to allocate
-> RegM freeRegs ( [instr] , [RealReg])
-allocateRegsAndSpill _ _ _ spills alloc []
+allocateRegsAndSpill _ _ spills alloc []
= return (spills, reverse alloc)
-allocateRegsAndSpill platform reading keep spills alloc (r:rs)
+allocateRegsAndSpill reading keep spills alloc (r:rs)
= do assig <- getAssigR
- let doSpill = allocRegsAndSpill_spill platform reading keep spills alloc r rs assig
+ let doSpill = allocRegsAndSpill_spill reading keep spills alloc r rs assig
case lookupUFM assig r of
-- case (1a): already in a register
Just (InReg my_reg) ->
- allocateRegsAndSpill platform reading keep spills (my_reg:alloc) rs
+ allocateRegsAndSpill reading keep spills (my_reg:alloc) rs
-- case (1b): already in a register (and memory)
-- NB1. if we're writing this register, update its assignment to be
@@ -710,7 +713,7 @@ allocateRegsAndSpill platform reading keep spills alloc (r:rs)
-- are also read by the same instruction.
Just (InBoth my_reg _)
-> do when (not reading) (setAssigR (addToUFM assig r (InReg my_reg)))
- allocateRegsAndSpill platform reading keep spills (my_reg:alloc) rs
+ allocateRegsAndSpill reading keep spills (my_reg:alloc) rs
-- Not already in a register, so we need to find a free one...
Just (InMem slot) | reading -> doSpill (ReadMem slot)
@@ -729,8 +732,7 @@ allocateRegsAndSpill platform reading keep spills alloc (r:rs)
-- reading is redundant with reason, but we keep it around because it's
-- convenient and it maintains the recursive structure of the allocator. -- EZY
allocRegsAndSpill_spill :: (FR freeRegs, Instruction instr, Outputable instr)
- => Platform
- -> Bool
+ => Bool
-> [VirtualReg]
-> [instr]
-> [RealReg]
@@ -739,8 +741,9 @@ allocRegsAndSpill_spill :: (FR freeRegs, Instruction instr, Outputable instr)
-> UniqFM Loc
-> SpillLoc
-> RegM freeRegs ([instr], [RealReg])
-allocRegsAndSpill_spill platform reading keep spills alloc r rs assig spill_loc
- = do
+allocRegsAndSpill_spill reading keep spills alloc r rs assig spill_loc
+ = do dflags <- getDynFlags
+ let platform = targetPlatform dflags
freeRegs <- getFreeRegsR
let freeRegs_thisClass = frGetFreeRegs platform (classOfVirtualReg r) freeRegs
@@ -748,12 +751,12 @@ allocRegsAndSpill_spill platform reading keep spills alloc r rs assig spill_loc
-- case (2): we have a free register
(my_reg : _) ->
- do spills' <- loadTemp platform r spill_loc my_reg spills
+ do spills' <- loadTemp r spill_loc my_reg spills
setAssigR (addToUFM assig r $! newLocation spill_loc my_reg)
setFreeRegsR $ frAllocateReg platform my_reg freeRegs
- allocateRegsAndSpill platform reading keep spills' (my_reg : alloc) rs
+ allocateRegsAndSpill reading keep spills' (my_reg : alloc) rs
-- case (3): we need to push something out to free up a register
@@ -780,19 +783,19 @@ allocRegsAndSpill_spill platform reading keep spills alloc r rs assig spill_loc
-- we have a temporary that is in both register and mem,
-- just free up its register for use.
| (temp, my_reg, slot) : _ <- candidates_inBoth
- = do spills' <- loadTemp platform r spill_loc my_reg spills
+ = do spills' <- loadTemp r spill_loc my_reg spills
let assig1 = addToUFM assig temp (InMem slot)
let assig2 = addToUFM assig1 r $! newLocation spill_loc my_reg
setAssigR assig2
- allocateRegsAndSpill platform reading keep spills' (my_reg:alloc) rs
+ allocateRegsAndSpill reading keep spills' (my_reg:alloc) rs
-- otherwise, we need to spill a temporary that currently
-- resides in a register.
| (temp_to_push_out, (my_reg :: RealReg)) : _
<- candidates_inReg
= do
- (spill_insn, slot) <- spillR platform (RegReal my_reg) temp_to_push_out
+ (spill_insn, slot) <- spillR (RegReal my_reg) temp_to_push_out
let spill_store = (if reading then id else reverse)
[ -- COMMENT (fsLit "spill alloc")
spill_insn ]
@@ -806,9 +809,9 @@ allocRegsAndSpill_spill platform reading keep spills alloc r rs assig spill_loc
setAssigR assig2
-- if need be, load up a spilled temp into the reg we've just freed up.
- spills' <- loadTemp platform r spill_loc my_reg spills
+ spills' <- loadTemp r spill_loc my_reg spills
- allocateRegsAndSpill platform reading keep
+ allocateRegsAndSpill reading keep
(spill_store ++ spills')
(my_reg:alloc) rs
@@ -835,19 +838,18 @@ newLocation _ my_reg = InReg my_reg
-- | Load up a spilled temporary if we need to (read from memory).
loadTemp
:: (Outputable instr, Instruction instr)
- => Platform
- -> VirtualReg -- the temp being loaded
+ => VirtualReg -- the temp being loaded
-> SpillLoc -- the current location of this temp
-> RealReg -- the hreg to load the temp into
-> [instr]
-> RegM freeRegs [instr]
-loadTemp platform vreg (ReadMem slot) hreg spills
+loadTemp vreg (ReadMem slot) hreg spills
= do
- insn <- loadR platform (RegReal hreg) slot
+ insn <- loadR (RegReal hreg) slot
recordSpill (SpillLoad $ getUnique vreg)
return $ {- COMMENT (fsLit "spill load") : -} insn : spills
-loadTemp _ _ _ _ spills =
+loadTemp _ _ _ spills =
return spills
diff --git a/compiler/nativeGen/RegAlloc/Linear/StackMap.hs b/compiler/nativeGen/RegAlloc/Linear/StackMap.hs
index ea05cf0d0f..b1fc3c169e 100644
--- a/compiler/nativeGen/RegAlloc/Linear/StackMap.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/StackMap.hs
@@ -28,8 +28,8 @@ where
import RegAlloc.Linear.FreeRegs
+import DynFlags
import Outputable
-import Platform
import UniqFM
import Unique
@@ -47,8 +47,8 @@ data StackMap
-- | An empty stack map, with all slots available.
-emptyStackMap :: Platform -> StackMap
-emptyStackMap platform = StackMap [0 .. maxSpillSlots platform] emptyUFM
+emptyStackMap :: DynFlags -> StackMap
+emptyStackMap dflags = StackMap [0 .. maxSpillSlots dflags] emptyUFM
-- | If this vreg unique already has a stack assignment then return the slot number,
diff --git a/compiler/nativeGen/RegAlloc/Linear/State.hs b/compiler/nativeGen/RegAlloc/Linear/State.hs
index ca2ecd3883..a608a947e7 100644
--- a/compiler/nativeGen/RegAlloc/Linear/State.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/State.hs
@@ -1,39 +1,31 @@
-- | State monad for the linear register allocator.
--- Here we keep all the state that the register allocator keeps track
--- of as it walks the instructions in a basic block.
-
-{-# OPTIONS_GHC -fno-warn-orphans #-}
-{-# OPTIONS -fno-warn-tabs #-}
--- The above warning supression flag is a temporary kludge.
--- While working on this module you are encouraged to remove it and
--- detab the module (please do the detabbing in a separate patch). See
--- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces
--- for details
+-- Here we keep all the state that the register allocator keeps track
+-- of as it walks the instructions in a basic block.
module RegAlloc.Linear.State (
- RA_State(..),
- RegM,
- runR,
-
- spillR,
- loadR,
-
- getFreeRegsR,
- setFreeRegsR,
-
- getAssigR,
- setAssigR,
-
- getBlockAssigR,
- setBlockAssigR,
-
- setDeltaR,
- getDeltaR,
-
- getUniqueR,
-
- recordSpill
+ RA_State(..),
+ RegM,
+ runR,
+
+ spillR,
+ loadR,
+
+ getFreeRegsR,
+ setFreeRegsR,
+
+ getAssigR,
+ setAssigR,
+
+ getBlockAssigR,
+ setBlockAssigR,
+
+ setDeltaR,
+ getDeltaR,
+
+ getUniqueR,
+
+ recordSpill
)
where
@@ -44,67 +36,79 @@ import RegAlloc.Liveness
import Instruction
import Reg
-import Platform
+import DynFlags
import Unique
import UniqSupply
+-- | The register allocator monad type.
+newtype RegM freeRegs a
+ = RegM { unReg :: RA_State freeRegs -> (# RA_State freeRegs, a #) }
+
+
-- | The RegM Monad
instance Monad (RegM freeRegs) where
m >>= k = RegM $ \s -> case unReg m s of { (# s, a #) -> unReg (k a) s }
return a = RegM $ \s -> (# s, a #)
+instance HasDynFlags (RegM a) where
+ getDynFlags = RegM $ \s -> (# s, ra_DynFlags s #)
+
-- | Run a computation in the RegM register allocator monad.
-runR :: BlockAssignment freeRegs
- -> freeRegs
- -> RegMap Loc
- -> StackMap
- -> UniqSupply
- -> RegM freeRegs a
- -> (BlockAssignment freeRegs, StackMap, RegAllocStats, a)
-
-runR block_assig freeregs assig stack us thing =
- case unReg thing
- (RA_State
- { ra_blockassig = block_assig
- , ra_freeregs = freeregs
- , ra_assig = assig
- , ra_delta = 0{-???-}
- , ra_stack = stack
- , ra_us = us
- , ra_spills = [] })
+runR :: DynFlags
+ -> BlockAssignment freeRegs
+ -> freeRegs
+ -> RegMap Loc
+ -> StackMap
+ -> UniqSupply
+ -> RegM freeRegs a
+ -> (BlockAssignment freeRegs, StackMap, RegAllocStats, a)
+
+runR dflags block_assig freeregs assig stack us thing =
+ case unReg thing
+ (RA_State
+ { ra_blockassig = block_assig
+ , ra_freeregs = freeregs
+ , ra_assig = assig
+ , ra_delta = 0{-???-}
+ , ra_stack = stack
+ , ra_us = us
+ , ra_spills = []
+ , ra_DynFlags = dflags })
of
- (# state'@RA_State
- { ra_blockassig = block_assig
- , ra_stack = stack' }
- , returned_thing #)
-
- -> (block_assig, stack', makeRAStats state', returned_thing)
+ (# state'@RA_State
+ { ra_blockassig = block_assig
+ , ra_stack = stack' }
+ , returned_thing #)
+
+ -> (block_assig, stack', makeRAStats state', returned_thing)
-- | Make register allocator stats from its final state.
makeRAStats :: RA_State freeRegs -> RegAllocStats
makeRAStats state
- = RegAllocStats
- { ra_spillInstrs = binSpillReasons (ra_spills state) }
+ = RegAllocStats
+ { ra_spillInstrs = binSpillReasons (ra_spills state) }
spillR :: Instruction instr
- => Platform -> Reg -> Unique -> RegM freeRegs (instr, Int)
+ => Reg -> Unique -> RegM freeRegs (instr, Int)
-spillR platform reg temp = RegM $ \ s@RA_State{ra_delta=delta, ra_stack=stack} ->
- let (stack',slot) = getStackSlotFor stack temp
- instr = mkSpillInstr platform reg delta slot
+spillR reg temp = RegM $ \ s@RA_State{ra_delta=delta, ra_stack=stack} ->
+ let dflags = ra_DynFlags s
+ (stack',slot) = getStackSlotFor stack temp
+ instr = mkSpillInstr dflags reg delta slot
in
(# s{ra_stack=stack'}, (instr,slot) #)
loadR :: Instruction instr
- => Platform -> Reg -> Int -> RegM freeRegs instr
+ => Reg -> Int -> RegM freeRegs instr
-loadR platform reg slot = RegM $ \ s@RA_State{ra_delta=delta} ->
- (# s, mkLoadInstr platform reg delta slot #)
+loadR reg slot = RegM $ \ s@RA_State{ra_delta=delta} ->
+ let dflags = ra_DynFlags s
+ in (# s, mkLoadInstr dflags reg delta slot #)
getFreeRegsR :: RegM freeRegs freeRegs
getFreeRegsR = RegM $ \ s@RA_State{ra_freeregs = freeregs} ->
@@ -146,4 +150,5 @@ getUniqueR = RegM $ \s ->
-- | Record that a spill instruction was inserted, for profiling.
recordSpill :: SpillReason -> RegM freeRegs ()
recordSpill spill
- = RegM $ \s -> (# s { ra_spills = spill : ra_spills s}, () #)
+ = RegM $ \s -> (# s { ra_spills = spill : ra_spills s}, () #)
+
diff --git a/compiler/nativeGen/RegAlloc/Linear/X86/FreeRegs.hs b/compiler/nativeGen/RegAlloc/Linear/X86/FreeRegs.hs
index 6309b24b45..0fcd658120 100644
--- a/compiler/nativeGen/RegAlloc/Linear/X86/FreeRegs.hs
+++ b/compiler/nativeGen/RegAlloc/Linear/X86/FreeRegs.hs
@@ -1,5 +1,5 @@
--- | Free regs map for i386 and x86_64
+-- | Free regs map for i386
module RegAlloc.Linear.X86.FreeRegs
where
@@ -12,29 +12,25 @@ import Platform
import Data.Word
import Data.Bits
-type FreeRegs
-#ifdef i386_TARGET_ARCH
- = Word32
-#else
- = Word64
-#endif
+newtype FreeRegs = FreeRegs Word32
+ deriving Show
noFreeRegs :: FreeRegs
-noFreeRegs = 0
+noFreeRegs = FreeRegs 0
releaseReg :: RealReg -> FreeRegs -> FreeRegs
-releaseReg (RealRegSingle n) f
- = f .|. (1 `shiftL` n)
+releaseReg (RealRegSingle n) (FreeRegs f)
+ = FreeRegs (f .|. (1 `shiftL` n))
releaseReg _ _
- = panic "RegAlloc.Linear.X86.FreeRegs.realeaseReg: no reg"
+ = panic "RegAlloc.Linear.X86.FreeRegs.releaseReg: no reg"
initFreeRegs :: Platform -> FreeRegs
initFreeRegs platform
= foldr releaseReg noFreeRegs (allocatableRegs platform)
-getFreeRegs :: Platform -> RegClass -> FreeRegs -> [RealReg] -- lazilly
-getFreeRegs platform cls f = go f 0
+getFreeRegs :: Platform -> RegClass -> FreeRegs -> [RealReg] -- lazily
+getFreeRegs platform cls (FreeRegs f) = go f 0
where go 0 _ = []
go n m
@@ -47,10 +43,9 @@ getFreeRegs platform cls f = go f 0
-- in order to find a floating-point one.
allocateReg :: RealReg -> FreeRegs -> FreeRegs
-allocateReg (RealRegSingle r) f
- = f .&. complement (1 `shiftL` r)
+allocateReg (RealRegSingle r) (FreeRegs f)
+ = FreeRegs (f .&. complement (1 `shiftL` r))
allocateReg _ _
= panic "RegAlloc.Linear.X86.FreeRegs.allocateReg: no reg"
-
diff --git a/compiler/nativeGen/RegAlloc/Linear/X86_64/FreeRegs.hs b/compiler/nativeGen/RegAlloc/Linear/X86_64/FreeRegs.hs
new file mode 100644
index 0000000000..c04fce9645
--- /dev/null
+++ b/compiler/nativeGen/RegAlloc/Linear/X86_64/FreeRegs.hs
@@ -0,0 +1,52 @@
+
+-- | Free regs map for x86_64
+module RegAlloc.Linear.X86_64.FreeRegs
+where
+
+import X86.Regs
+import RegClass
+import Reg
+import Panic
+import Platform
+
+import Data.Word
+import Data.Bits
+
+newtype FreeRegs = FreeRegs Word64
+ deriving Show
+
+noFreeRegs :: FreeRegs
+noFreeRegs = FreeRegs 0
+
+releaseReg :: RealReg -> FreeRegs -> FreeRegs
+releaseReg (RealRegSingle n) (FreeRegs f)
+ = FreeRegs (f .|. (1 `shiftL` n))
+
+releaseReg _ _
+ = panic "RegAlloc.Linear.X86_64.FreeRegs.releaseReg: no reg"
+
+initFreeRegs :: Platform -> FreeRegs
+initFreeRegs platform
+ = foldr releaseReg noFreeRegs (allocatableRegs platform)
+
+getFreeRegs :: Platform -> RegClass -> FreeRegs -> [RealReg] -- lazily
+getFreeRegs platform cls (FreeRegs f) = go f 0
+
+ where go 0 _ = []
+ go n m
+ | n .&. 1 /= 0 && classOfRealReg platform (RealRegSingle m) == cls
+ = RealRegSingle m : (go (n `shiftR` 1) $! (m+1))
+
+ | otherwise
+ = go (n `shiftR` 1) $! (m+1)
+ -- ToDo: there's no point looking through all the integer registers
+ -- in order to find a floating-point one.
+
+allocateReg :: RealReg -> FreeRegs -> FreeRegs
+allocateReg (RealRegSingle r) (FreeRegs f)
+ = FreeRegs (f .&. complement (1 `shiftL` r))
+
+allocateReg _ _
+ = panic "RegAlloc.Linear.X86_64.FreeRegs.allocateReg: no reg"
+
+
diff --git a/compiler/nativeGen/RegAlloc/Liveness.hs b/compiler/nativeGen/RegAlloc/Liveness.hs
index 2483e12213..ac58944f1c 100644
--- a/compiler/nativeGen/RegAlloc/Liveness.hs
+++ b/compiler/nativeGen/RegAlloc/Liveness.hs
@@ -39,6 +39,7 @@ import OldCmm hiding (RegSet)
import OldPprCmm()
import Digraph
+import DynFlags
import Outputable
import Platform
import Unique
@@ -461,11 +462,11 @@ slurpReloadCoalesce live
-- | Strip away liveness information, yielding NatCmmDecl
stripLive
:: (Outputable statics, Outputable instr, Instruction instr)
- => Platform
+ => DynFlags
-> LiveCmmDecl statics instr
-> NatCmmDecl statics instr
-stripLive platform live
+stripLive dflags live
= stripCmm live
where stripCmm :: (Outputable statics, Outputable instr, Instruction instr)
@@ -481,7 +482,7 @@ stripLive platform live
= partition ((== first_id) . blockId) final_blocks
in CmmProc info label
- (ListGraph $ map (stripLiveBlock platform) $ first' : rest')
+ (ListGraph $ map (stripLiveBlock dflags) $ first' : rest')
-- procs used for stg_split_markers don't contain any blocks, and have no first_id.
stripCmm (CmmProc (LiveInfo info Nothing _ _) label [])
@@ -496,11 +497,11 @@ stripLive platform live
stripLiveBlock
:: Instruction instr
- => Platform
+ => DynFlags
-> LiveBasicBlock instr
-> NatBasicBlock instr
-stripLiveBlock platform (BasicBlock i lis)
+stripLiveBlock dflags (BasicBlock i lis)
= BasicBlock i instrs'
where (instrs', _)
@@ -511,11 +512,11 @@ stripLiveBlock platform (BasicBlock i lis)
spillNat acc (LiveInstr (SPILL reg slot) _ : instrs)
= do delta <- get
- spillNat (mkSpillInstr platform reg delta slot : acc) instrs
+ spillNat (mkSpillInstr dflags reg delta slot : acc) instrs
spillNat acc (LiveInstr (RELOAD slot reg) _ : instrs)
= do delta <- get
- spillNat (mkLoadInstr platform reg delta slot : acc) instrs
+ spillNat (mkLoadInstr dflags reg delta slot : acc) instrs
spillNat acc (LiveInstr (Instr instr) _ : instrs)
| Just i <- takeDeltaInstr instr
diff --git a/compiler/nativeGen/SPARC/Base.hs b/compiler/nativeGen/SPARC/Base.hs
index de11b9f77c..aa7b057e69 100644
--- a/compiler/nativeGen/SPARC/Base.hs
+++ b/compiler/nativeGen/SPARC/Base.hs
@@ -25,7 +25,7 @@ module SPARC.Base (
where
-import qualified Constants
+import DynFlags
import Panic
import Data.Int
@@ -40,9 +40,9 @@ wordLengthInBits
= wordLength * 8
-- Size of the available spill area
-spillAreaLength :: Int
+spillAreaLength :: DynFlags -> Int
spillAreaLength
- = Constants.rESERVED_C_STACK_BYTES
+ = rESERVED_C_STACK_BYTES
-- | We need 8 bytes because our largest registers are 64 bit.
spillSlotSize :: Int
diff --git a/compiler/nativeGen/SPARC/CodeGen.hs b/compiler/nativeGen/SPARC/CodeGen.hs
index a3409dd28b..9d6aeaafc9 100644
--- a/compiler/nativeGen/SPARC/CodeGen.hs
+++ b/compiler/nativeGen/SPARC/CodeGen.hs
@@ -111,7 +111,9 @@ stmtsToInstrs stmts
stmtToInstrs :: CmmStmt -> NatM InstrBlock
-stmtToInstrs stmt = case stmt of
+stmtToInstrs stmt = do
+ dflags <- getDynFlags
+ case stmt of
CmmNop -> return nilOL
CmmComment s -> return (unitOL (COMMENT s))
@@ -119,14 +121,14 @@ stmtToInstrs stmt = case stmt of
| isFloatType ty -> assignReg_FltCode size reg src
| isWord64 ty -> assignReg_I64Code reg src
| otherwise -> assignReg_IntCode size reg src
- where ty = cmmRegType reg
+ where ty = cmmRegType dflags reg
size = cmmTypeSize ty
CmmStore addr src
| isFloatType ty -> assignMem_FltCode size addr src
| isWord64 ty -> assignMem_I64Code addr src
| otherwise -> assignMem_IntCode size addr src
- where ty = cmmExprType src
+ where ty = cmmExprType dflags src
size = cmmTypeSize ty
CmmCall target result_regs args _
@@ -163,9 +165,9 @@ temporary, then do the other computation, and then use the temporary:
-- | Convert a BlockId to some CmmStatic data
-jumpTableEntry :: Maybe BlockId -> CmmStatic
-jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth)
-jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
+jumpTableEntry :: DynFlags -> Maybe BlockId -> CmmStatic
+jumpTableEntry dflags Nothing = CmmStaticLit (CmmInt 0 (wordWidth dflags))
+jumpTableEntry _ (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
where blockLabel = mkAsmTempLabel (getUnique blockid)
@@ -203,11 +205,12 @@ assignReg_IntCode _ reg src = do
-- Floating point assignment to memory
assignMem_FltCode :: Size -> CmmExpr -> CmmExpr -> NatM InstrBlock
assignMem_FltCode pk addr src = do
+ dflags <- getDynFlags
Amode dst__2 code1 <- getAmode addr
(src__2, code2) <- getSomeReg src
tmp1 <- getNewRegNat pk
let
- pk__2 = cmmExprType src
+ pk__2 = cmmExprType dflags src
code__2 = code1 `appOL` code2 `appOL`
if sizeToWidth pk == typeWidth pk__2
then unitOL (ST pk src__2 dst__2)
@@ -321,8 +324,8 @@ genSwitch dflags expr ids
generateJumpTableForInstr :: DynFlags -> Instr
-> Maybe (NatCmmDecl CmmStatics Instr)
-generateJumpTableForInstr _ (JMP_TBL _ ids label) =
- let jumpTable = map jumpTableEntry ids
+generateJumpTableForInstr dflags (JMP_TBL _ ids label) =
+ let jumpTable = map (jumpTableEntry dflags) ids
in Just (CmmData ReadOnlyData (Statics label jumpTable))
generateJumpTableForInstr _ _ = Nothing
@@ -458,17 +461,21 @@ genCCall target dest_regs argsAndHints
-- | Generate code to calculate an argument, and move it into one
-- or two integer vregs.
arg_to_int_vregs :: CmmExpr -> NatM (OrdList Instr, [Reg])
-arg_to_int_vregs arg
+arg_to_int_vregs arg = do dflags <- getDynFlags
+ arg_to_int_vregs' dflags arg
+
+arg_to_int_vregs' :: DynFlags -> CmmExpr -> NatM (OrdList Instr, [Reg])
+arg_to_int_vregs' dflags arg
-- If the expr produces a 64 bit int, then we can just use iselExpr64
- | isWord64 (cmmExprType arg)
+ | isWord64 (cmmExprType dflags arg)
= do (ChildCode64 code r_lo) <- iselExpr64 arg
let r_hi = getHiVRegFromLo r_lo
return (code, [r_hi, r_lo])
| otherwise
= do (src, code) <- getSomeReg arg
- let pk = cmmExprType arg
+ let pk = cmmExprType dflags arg
case cmmTypeSize pk of
diff --git a/compiler/nativeGen/SPARC/CodeGen/Amode.hs b/compiler/nativeGen/SPARC/CodeGen/Amode.hs
index 92e70eb4dc..139064ccbd 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Amode.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Amode.hs
@@ -33,7 +33,8 @@ getAmode
-> NatM Amode
getAmode tree@(CmmRegOff _ _)
- = getAmode (mangleIndexTree tree)
+ = do dflags <- getDynFlags
+ getAmode (mangleIndexTree dflags tree)
getAmode (CmmMachOp (MO_Sub _) [x, CmmLit (CmmInt i _)])
| fits13Bits (-i)
diff --git a/compiler/nativeGen/SPARC/CodeGen/Base.hs b/compiler/nativeGen/SPARC/CodeGen/Base.hs
index 469361139b..367d9230ba 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Base.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Base.hs
@@ -29,6 +29,7 @@ import Size
import Reg
import CodeGen.Platform
+import DynFlags
import OldCmm
import OldPprCmm ()
import Platform
@@ -114,13 +115,13 @@ getRegisterReg platform (CmmGlobal mid)
-- Expand CmmRegOff. ToDo: should we do it this way around, or convert
-- CmmExprs into CmmRegOff?
-mangleIndexTree :: CmmExpr -> CmmExpr
+mangleIndexTree :: DynFlags -> CmmExpr -> CmmExpr
-mangleIndexTree (CmmRegOff reg off)
+mangleIndexTree dflags (CmmRegOff reg off)
= CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
- where width = typeWidth (cmmRegType reg)
+ where width = typeWidth (cmmRegType dflags reg)
-mangleIndexTree _
+mangleIndexTree _ _
= panic "SPARC.CodeGen.Base.mangleIndexTree: no match"
diff --git a/compiler/nativeGen/SPARC/CodeGen/CondCode.hs b/compiler/nativeGen/SPARC/CodeGen/CondCode.hs
index 74f20196df..d459d98212 100644
--- a/compiler/nativeGen/SPARC/CodeGen/CondCode.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/CondCode.hs
@@ -93,14 +93,15 @@ condIntCode cond x y = do
condFltCode :: Cond -> CmmExpr -> CmmExpr -> NatM CondCode
condFltCode cond x y = do
+ dflags <- getDynFlags
(src1, code1) <- getSomeReg x
(src2, code2) <- getSomeReg y
tmp <- getNewRegNat FF64
let
promote x = FxTOy FF32 FF64 x tmp
- pk1 = cmmExprType x
- pk2 = cmmExprType y
+ pk1 = cmmExprType dflags x
+ pk2 = cmmExprType dflags y
code__2 =
if pk1 `cmmEqType` pk2 then
diff --git a/compiler/nativeGen/SPARC/CodeGen/Gen32.hs b/compiler/nativeGen/SPARC/CodeGen/Gen32.hs
index c2c47e99aa..f7c7419e15 100644
--- a/compiler/nativeGen/SPARC/CodeGen/Gen32.hs
+++ b/compiler/nativeGen/SPARC/CodeGen/Gen32.hs
@@ -57,11 +57,12 @@ getRegister :: CmmExpr -> NatM Register
getRegister (CmmReg reg)
= do dflags <- getDynFlags
let platform = targetPlatform dflags
- return (Fixed (cmmTypeSize (cmmRegType reg))
- (getRegisterReg platform reg) nilOL)
+ return (Fixed (cmmTypeSize (cmmRegType dflags reg))
+ (getRegisterReg platform reg) nilOL)
getRegister tree@(CmmRegOff _ _)
- = getRegister (mangleIndexTree tree)
+ = do dflags <- getDynFlags
+ getRegister (mangleIndexTree dflags tree)
getRegister (CmmMachOp (MO_UU_Conv W64 W32)
[CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]]) = do
@@ -490,14 +491,15 @@ trivialFCode
-> NatM Register
trivialFCode pk instr x y = do
+ dflags <- getDynFlags
(src1, code1) <- getSomeReg x
(src2, code2) <- getSomeReg y
tmp <- getNewRegNat FF64
let
promote x = FxTOy FF32 FF64 x tmp
- pk1 = cmmExprType x
- pk2 = cmmExprType y
+ pk1 = cmmExprType dflags x
+ pk2 = cmmExprType dflags y
code__2 dst =
if pk1 `cmmEqType` pk2 then
diff --git a/compiler/nativeGen/SPARC/Instr.hs b/compiler/nativeGen/SPARC/Instr.hs
index 021b2fb772..9404badea6 100644
--- a/compiler/nativeGen/SPARC/Instr.hs
+++ b/compiler/nativeGen/SPARC/Instr.hs
@@ -46,6 +46,7 @@ import Size
import CLabel
import CodeGen.Platform
import BlockId
+import DynFlags
import OldCmm
import FastString
import FastBool
@@ -372,15 +373,16 @@ sparc_patchJumpInstr insn patchF
-- | Make a spill instruction.
-- On SPARC we spill below frame pointer leaving 2 words/spill
sparc_mkSpillInstr
- :: Platform
+ :: DynFlags
-> Reg -- ^ register to spill
-> Int -- ^ current stack delta
-> Int -- ^ spill slot to use
-> Instr
-sparc_mkSpillInstr platform reg _ slot
- = let off = spillSlotToOffset slot
- off_w = 1 + (off `div` 4)
+sparc_mkSpillInstr dflags reg _ slot
+ = let platform = targetPlatform dflags
+ off = spillSlotToOffset dflags slot
+ off_w = 1 + (off `div` 4)
sz = case targetClassOfReg platform reg of
RcInteger -> II32
RcFloat -> FF32
@@ -392,14 +394,15 @@ sparc_mkSpillInstr platform reg _ slot
-- | Make a spill reload instruction.
sparc_mkLoadInstr
- :: Platform
+ :: DynFlags
-> Reg -- ^ register to load into
-> Int -- ^ current stack delta
-> Int -- ^ spill slot to use
-> Instr
-sparc_mkLoadInstr platform reg _ slot
- = let off = spillSlotToOffset slot
+sparc_mkLoadInstr dflags reg _ slot
+ = let platform = targetPlatform dflags
+ off = spillSlotToOffset dflags slot
off_w = 1 + (off `div` 4)
sz = case targetClassOfReg platform reg of
RcInteger -> II32
diff --git a/compiler/nativeGen/SPARC/Ppr.hs b/compiler/nativeGen/SPARC/Ppr.hs
index e57e5e2725..55afac0ee2 100644
--- a/compiler/nativeGen/SPARC/Ppr.hs
+++ b/compiler/nativeGen/SPARC/Ppr.hs
@@ -38,6 +38,7 @@ import PprBase
import OldCmm
import OldPprCmm()
import CLabel
+import BlockId
import Unique ( Uniquable(..), pprUnique )
import Outputable
@@ -52,7 +53,7 @@ pprNatCmmDecl :: NatCmmDecl CmmStatics Instr -> SDoc
pprNatCmmDecl (CmmData section dats) =
pprSectionHeader section $$ pprDatas dats
-pprNatCmmDecl proc@(CmmProc _ lbl (ListGraph blocks)) =
+pprNatCmmDecl proc@(CmmProc top_info lbl (ListGraph blocks)) =
case topInfoTable proc of
Nothing ->
case blocks of
@@ -61,19 +62,15 @@ pprNatCmmDecl proc@(CmmProc _ lbl (ListGraph blocks)) =
blocks -> -- special case for code without info table:
pprSectionHeader Text $$
pprLabel lbl $$ -- blocks guaranteed not null, so label needed
- vcat (map pprBasicBlock blocks)
+ vcat (map (pprBasicBlock top_info) blocks)
- Just (Statics info_lbl info) ->
+ Just (Statics info_lbl _) ->
sdocWithPlatform $ \platform ->
- pprSectionHeader Text $$
- (
- (if platformHasSubsectionsViaSymbols platform
- then ppr (mkDeadStripPreventer info_lbl) <> char ':'
- else empty) $$
- vcat (map pprData info) $$
- pprLabel info_lbl
- ) $$
- vcat (map pprBasicBlock blocks) $$
+ (if platformHasSubsectionsViaSymbols platform
+ then pprSectionHeader Text $$
+ ppr (mkDeadStripPreventer info_lbl) <> char ':'
+ else empty) $$
+ vcat (map (pprBasicBlock top_info) blocks) $$
-- above: Even the first block gets a label, because with branch-chain
-- elimination, it might be the target of a goto.
(if platformHasSubsectionsViaSymbols platform
@@ -91,10 +88,18 @@ pprNatCmmDecl proc@(CmmProc _ lbl (ListGraph blocks)) =
else empty)
-pprBasicBlock :: NatBasicBlock Instr -> SDoc
-pprBasicBlock (BasicBlock blockid instrs) =
- pprLabel (mkAsmTempLabel (getUnique blockid)) $$
- vcat (map pprInstr instrs)
+pprBasicBlock :: BlockEnv CmmStatics -> NatBasicBlock Instr -> SDoc
+pprBasicBlock info_env (BasicBlock blockid instrs)
+ = maybe_infotable $$
+ pprLabel (mkAsmTempLabel (getUnique blockid)) $$
+ vcat (map pprInstr instrs)
+ where
+ maybe_infotable = case mapLookup blockid info_env of
+ Nothing -> empty
+ Just (Statics info_lbl info) ->
+ pprSectionHeader Text $$
+ vcat (map pprData info) $$
+ pprLabel info_lbl
pprDatas :: CmmStatics -> SDoc
@@ -333,7 +338,8 @@ pprSectionHeader seg
-- | Pretty print a data item.
pprDataItem :: CmmLit -> SDoc
pprDataItem lit
- = vcat (ppr_item (cmmTypeSize $ cmmLitType lit) lit)
+ = sdocWithDynFlags $ \dflags ->
+ vcat (ppr_item (cmmTypeSize $ cmmLitType dflags lit) lit)
where
imm = litToImm lit
diff --git a/compiler/nativeGen/SPARC/Stack.hs b/compiler/nativeGen/SPARC/Stack.hs
index 7f75693889..65dfef0e25 100644
--- a/compiler/nativeGen/SPARC/Stack.hs
+++ b/compiler/nativeGen/SPARC/Stack.hs
@@ -20,6 +20,7 @@ import SPARC.Regs
import SPARC.Base
import SPARC.Imm
+import DynFlags
import Outputable
-- | Get an AddrMode relative to the address in sp.
@@ -42,15 +43,15 @@ fpRel n
-- | Convert a spill slot number to a *byte* offset, with no sign.
--
-spillSlotToOffset :: Int -> Int
-spillSlotToOffset slot
- | slot >= 0 && slot < maxSpillSlots
+spillSlotToOffset :: DynFlags -> Int -> Int
+spillSlotToOffset dflags slot
+ | slot >= 0 && slot < maxSpillSlots dflags
= 64 + spillSlotSize * slot
| otherwise
= pprPanic "spillSlotToOffset:"
( text "invalid spill location: " <> int slot
- $$ text "maxSpillSlots: " <> int maxSpillSlots)
+ $$ text "maxSpillSlots: " <> int (maxSpillSlots dflags))
-- | The maximum number of spill slots available on the C stack.
@@ -59,7 +60,7 @@ spillSlotToOffset slot
-- Why do we reserve 64 bytes, instead of using the whole thing??
-- -- BL 2009/02/15
--
-maxSpillSlots :: Int
-maxSpillSlots
- = ((spillAreaLength - 64) `div` spillSlotSize) - 1
+maxSpillSlots :: DynFlags -> Int
+maxSpillSlots dflags
+ = ((spillAreaLength dflags - 64) `div` spillSlotSize) - 1
diff --git a/compiler/nativeGen/X86/CodeGen.hs b/compiler/nativeGen/X86/CodeGen.hs
index e8f2eccd6b..b83ede89aa 100644
--- a/compiler/nativeGen/X86/CodeGen.hs
+++ b/compiler/nativeGen/X86/CodeGen.hs
@@ -52,7 +52,6 @@ import Outputable
import Unique
import FastString
import FastBool ( isFastTrue )
-import Constants ( wORD_SIZE )
import DynFlags
import Util
@@ -141,6 +140,7 @@ stmtsToInstrs stmts
stmtToInstrs :: CmmStmt -> NatM InstrBlock
stmtToInstrs stmt = do
+ dflags <- getDynFlags
is32Bit <- is32BitPlatform
case stmt of
CmmNop -> return nilOL
@@ -150,14 +150,14 @@ stmtToInstrs stmt = do
| isFloatType ty -> assignReg_FltCode size reg src
| is32Bit && isWord64 ty -> assignReg_I64Code reg src
| otherwise -> assignReg_IntCode size reg src
- where ty = cmmRegType reg
+ where ty = cmmRegType dflags reg
size = cmmTypeSize ty
CmmStore addr src
| isFloatType ty -> assignMem_FltCode size addr src
| is32Bit && isWord64 ty -> assignMem_I64Code addr src
| otherwise -> assignMem_IntCode size addr src
- where ty = cmmExprType src
+ where ty = cmmExprType dflags src
size = cmmTypeSize ty
CmmCall target result_regs args _
@@ -168,15 +168,15 @@ stmtToInstrs stmt = do
CmmSwitch arg ids -> do dflags <- getDynFlags
genSwitch dflags arg ids
CmmJump arg gregs -> do dflags <- getDynFlags
- let platform = targetPlatform dflags
- genJump arg (jumpRegs platform gregs)
+ genJump arg (jumpRegs dflags gregs)
CmmReturn ->
panic "stmtToInstrs: return statement should have been cps'd away"
-jumpRegs :: Platform -> Maybe [GlobalReg] -> [Reg]
-jumpRegs platform Nothing = allHaskellArgRegs platform
-jumpRegs platform (Just gregs) = [ RegReal r | Just r <- map (globalRegMaybe platform) gregs ]
+jumpRegs :: DynFlags -> Maybe [GlobalReg] -> [Reg]
+jumpRegs dflags Nothing = allHaskellArgRegs dflags
+jumpRegs dflags (Just gregs) = [ RegReal r | Just r <- map (globalRegMaybe platform) gregs ]
+ where platform = targetPlatform dflags
--------------------------------------------------------------------------------
-- | 'InstrBlock's are the insn sequences generated by the insn selectors.
@@ -274,9 +274,9 @@ is32BitInteger i = i64 <= 0x7fffffff && i64 >= -0x80000000
-- | Convert a BlockId to some CmmStatic data
-jumpTableEntry :: Maybe BlockId -> CmmStatic
-jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth)
-jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
+jumpTableEntry :: DynFlags -> Maybe BlockId -> CmmStatic
+jumpTableEntry dflags Nothing = CmmStaticLit (CmmInt 0 (wordWidth dflags))
+jumpTableEntry _ (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
where blockLabel = mkAsmTempLabel (getUnique blockid)
@@ -285,10 +285,10 @@ jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel)
-- Expand CmmRegOff. ToDo: should we do it this way around, or convert
-- CmmExprs into CmmRegOff?
-mangleIndexTree :: CmmReg -> Int -> CmmExpr
-mangleIndexTree reg off
+mangleIndexTree :: DynFlags -> CmmReg -> Int -> CmmExpr
+mangleIndexTree dflags reg off
= CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)]
- where width = typeWidth (cmmRegType reg)
+ where width = typeWidth (cmmRegType dflags reg)
-- | The dual to getAnyReg: compute an expression into a register, but
-- we don't mind which one it is.
@@ -406,12 +406,13 @@ iselExpr64 expr
--------------------------------------------------------------------------------
getRegister :: CmmExpr -> NatM Register
-getRegister e = do is32Bit <- is32BitPlatform
- getRegister' is32Bit e
+getRegister e = do dflags <- getDynFlags
+ is32Bit <- is32BitPlatform
+ getRegister' dflags is32Bit e
-getRegister' :: Bool -> CmmExpr -> NatM Register
+getRegister' :: DynFlags -> Bool -> CmmExpr -> NatM Register
-getRegister' is32Bit (CmmReg reg)
+getRegister' dflags is32Bit (CmmReg reg)
= case reg of
CmmGlobal PicBaseReg
| is32Bit ->
@@ -423,44 +424,43 @@ getRegister' is32Bit (CmmReg reg)
_ ->
do use_sse2 <- sse2Enabled
let
- sz = cmmTypeSize (cmmRegType reg)
+ sz = cmmTypeSize (cmmRegType dflags reg)
size | not use_sse2 && isFloatSize sz = FF80
| otherwise = sz
--
- dflags <- getDynFlags
let platform = targetPlatform dflags
return (Fixed size (getRegisterReg platform use_sse2 reg) nilOL)
-getRegister' is32Bit (CmmRegOff r n)
- = getRegister' is32Bit $ mangleIndexTree r n
+getRegister' dflags is32Bit (CmmRegOff r n)
+ = getRegister' dflags is32Bit $ mangleIndexTree dflags r n
-- for 32-bit architectuers, support some 64 -> 32 bit conversions:
-- TO_W_(x), TO_W_(x >> 32)
-getRegister' is32Bit (CmmMachOp (MO_UU_Conv W64 W32)
+getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W64 W32)
[CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 (getHiVRegFromLo rlo) code
-getRegister' is32Bit (CmmMachOp (MO_SS_Conv W64 W32)
+getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W64 W32)
[CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 (getHiVRegFromLo rlo) code
-getRegister' is32Bit (CmmMachOp (MO_UU_Conv W64 W32) [x])
+getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W64 W32) [x])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 rlo code
-getRegister' is32Bit (CmmMachOp (MO_SS_Conv W64 W32) [x])
+getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W64 W32) [x])
| is32Bit = do
ChildCode64 code rlo <- iselExpr64 x
return $ Fixed II32 rlo code
-getRegister' _ (CmmLit lit@(CmmFloat f w)) =
+getRegister' _ _ (CmmLit lit@(CmmFloat f w)) =
if_sse2 float_const_sse2 float_const_x87
where
float_const_sse2
@@ -491,60 +491,60 @@ getRegister' _ (CmmLit lit@(CmmFloat f w)) =
loadFloatAmode False w addr code
-- catch simple cases of zero- or sign-extended load
-getRegister' _ (CmmMachOp (MO_UU_Conv W8 W32) [CmmLoad addr _]) = do
+getRegister' _ _ (CmmMachOp (MO_UU_Conv W8 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVZxL II8) addr
return (Any II32 code)
-getRegister' _ (CmmMachOp (MO_SS_Conv W8 W32) [CmmLoad addr _]) = do
+getRegister' _ _ (CmmMachOp (MO_SS_Conv W8 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVSxL II8) addr
return (Any II32 code)
-getRegister' _ (CmmMachOp (MO_UU_Conv W16 W32) [CmmLoad addr _]) = do
+getRegister' _ _ (CmmMachOp (MO_UU_Conv W16 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVZxL II16) addr
return (Any II32 code)
-getRegister' _ (CmmMachOp (MO_SS_Conv W16 W32) [CmmLoad addr _]) = do
+getRegister' _ _ (CmmMachOp (MO_SS_Conv W16 W32) [CmmLoad addr _]) = do
code <- intLoadCode (MOVSxL II16) addr
return (Any II32 code)
-- catch simple cases of zero- or sign-extended load
-getRegister' is32Bit (CmmMachOp (MO_UU_Conv W8 W64) [CmmLoad addr _])
+getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W8 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVZxL II8) addr
return (Any II64 code)
-getRegister' is32Bit (CmmMachOp (MO_SS_Conv W8 W64) [CmmLoad addr _])
+getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W8 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVSxL II8) addr
return (Any II64 code)
-getRegister' is32Bit (CmmMachOp (MO_UU_Conv W16 W64) [CmmLoad addr _])
+getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W16 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVZxL II16) addr
return (Any II64 code)
-getRegister' is32Bit (CmmMachOp (MO_SS_Conv W16 W64) [CmmLoad addr _])
+getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W16 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVSxL II16) addr
return (Any II64 code)
-getRegister' is32Bit (CmmMachOp (MO_UU_Conv W32 W64) [CmmLoad addr _])
+getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W32 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOV II32) addr -- 32-bit loads zero-extend
return (Any II64 code)
-getRegister' is32Bit (CmmMachOp (MO_SS_Conv W32 W64) [CmmLoad addr _])
+getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W32 W64) [CmmLoad addr _])
| not is32Bit = do
code <- intLoadCode (MOVSxL II32) addr
return (Any II64 code)
-getRegister' is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg),
+getRegister' _ is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg),
CmmLit displacement])
| not is32Bit = do
return $ Any II64 (\dst -> unitOL $
LEA II64 (OpAddr (ripRel (litToImm displacement))) (OpReg dst))
-getRegister' is32Bit (CmmMachOp mop [x]) = do -- unary MachOps
+getRegister' dflags is32Bit (CmmMachOp mop [x]) = do -- unary MachOps
sse2 <- sse2Enabled
case mop of
MO_F_Neg w
@@ -634,11 +634,11 @@ getRegister' is32Bit (CmmMachOp mop [x]) = do -- unary MachOps
conversionNop :: Size -> CmmExpr -> NatM Register
conversionNop new_size expr
- = do e_code <- getRegister' is32Bit expr
+ = do e_code <- getRegister' dflags is32Bit expr
return (swizzleRegisterRep e_code new_size)
-getRegister' is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps
+getRegister' _ is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps
sse2 <- sse2Enabled
case mop of
MO_F_Eq _ -> condFltReg is32Bit EQQ x y
@@ -812,14 +812,14 @@ getRegister' is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps
return (Fixed size result code)
-getRegister' _ (CmmLoad mem pk)
+getRegister' _ _ (CmmLoad mem pk)
| isFloatType pk
= do
Amode addr mem_code <- getAmode mem
use_sse2 <- sse2Enabled
loadFloatAmode use_sse2 (typeWidth pk) addr mem_code
-getRegister' is32Bit (CmmLoad mem pk)
+getRegister' _ is32Bit (CmmLoad mem pk)
| is32Bit && not (isWord64 pk)
= do
code <- intLoadCode instr mem
@@ -837,14 +837,14 @@ getRegister' is32Bit (CmmLoad mem pk)
-- simpler we do our 8-bit arithmetic with full 32-bit registers.
-- Simpler memory load code on x86_64
-getRegister' is32Bit (CmmLoad mem pk)
+getRegister' _ is32Bit (CmmLoad mem pk)
| not is32Bit
= do
code <- intLoadCode (MOV size) mem
return (Any size code)
where size = intSize $ typeWidth pk
-getRegister' is32Bit (CmmLit (CmmInt 0 width))
+getRegister' _ is32Bit (CmmLit (CmmInt 0 width))
= let
size = intSize width
@@ -861,8 +861,8 @@ getRegister' is32Bit (CmmLit (CmmInt 0 width))
-- optimisation for loading small literals on x86_64: take advantage
-- of the automatic zero-extension from 32 to 64 bits, because the 32-bit
-- instruction forms are shorter.
-getRegister' is32Bit (CmmLit lit)
- | not is32Bit, isWord64 (cmmLitType lit), not (isBigLit lit)
+getRegister' dflags is32Bit (CmmLit lit)
+ | not is32Bit, isWord64 (cmmLitType dflags lit), not (isBigLit lit)
= let
imm = litToImm lit
code dst = unitOL (MOV II32 (OpImm imm) (OpReg dst))
@@ -877,15 +877,13 @@ getRegister' is32Bit (CmmLit lit)
-- note2: all labels are small, because we're assuming the
-- small memory model (see gcc docs, -mcmodel=small).
-getRegister' _ (CmmLit lit)
- = let
- size = cmmTypeSize (cmmLitType lit)
- imm = litToImm lit
- code dst = unitOL (MOV size (OpImm imm) (OpReg dst))
- in
- return (Any size code)
+getRegister' dflags _ (CmmLit lit)
+ = do let size = cmmTypeSize (cmmLitType dflags lit)
+ imm = litToImm lit
+ code dst = unitOL (MOV size (OpImm imm) (OpReg dst))
+ return (Any size code)
-getRegister' _ other = pprPanic "getRegister(x86)" (ppr other)
+getRegister' _ _ other = pprPanic "getRegister(x86)" (ppr other)
intLoadCode :: (Operand -> Operand -> Instr) -> CmmExpr
@@ -958,7 +956,8 @@ getAmode e = do is32Bit <- is32BitPlatform
getAmode' is32Bit e
getAmode' :: Bool -> CmmExpr -> NatM Amode
-getAmode' _ (CmmRegOff r n) = getAmode $ mangleIndexTree r n
+getAmode' _ (CmmRegOff r n) = do dflags <- getDynFlags
+ getAmode $ mangleIndexTree dflags r n
getAmode' is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg),
CmmLit displacement])
@@ -1047,7 +1046,8 @@ getNonClobberedOperand (CmmLit lit) = do
else do
is32Bit <- is32BitPlatform
- if is32BitLit is32Bit lit && not (isFloatType (cmmLitType lit))
+ dflags <- getDynFlags
+ if is32BitLit is32Bit lit && not (isFloatType (cmmLitType dflags lit))
then return (OpImm (litToImm lit), nilOL)
else getNonClobberedOperand_generic (CmmLit lit)
@@ -1100,7 +1100,8 @@ getOperand (CmmLit lit) = do
else do
is32Bit <- is32BitPlatform
- if is32BitLit is32Bit lit && not (isFloatType (cmmLitType lit))
+ dflags <- getDynFlags
+ if is32BitLit is32Bit lit && not (isFloatType (cmmLitType dflags lit))
then return (OpImm (litToImm lit), nilOL)
else getOperand_generic (CmmLit lit)
@@ -1276,21 +1277,23 @@ condIntCode' _ cond x (CmmLit (CmmInt 0 pk)) = do
-- anything vs operand
condIntCode' is32Bit cond x y | isOperand is32Bit y = do
+ dflags <- getDynFlags
(x_reg, x_code) <- getNonClobberedReg x
(y_op, y_code) <- getOperand y
let
code = x_code `appOL` y_code `snocOL`
- CMP (cmmTypeSize (cmmExprType x)) y_op (OpReg x_reg)
+ CMP (cmmTypeSize (cmmExprType dflags x)) y_op (OpReg x_reg)
return (CondCode False cond code)
-- anything vs anything
condIntCode' _ cond x y = do
+ dflags <- getDynFlags
(y_reg, y_code) <- getNonClobberedReg y
(x_op, x_code) <- getRegOrMem x
let
code = y_code `appOL`
x_code `snocOL`
- CMP (cmmTypeSize (cmmExprType x)) (OpReg y_reg) x_op
+ CMP (cmmTypeSize (cmmExprType dflags x)) (OpReg y_reg) x_op
return (CondCode False cond code)
@@ -1317,12 +1320,13 @@ condFltCode cond x y
-- an operand, but the right must be a reg. We can probably do better
-- than this general case...
condFltCode_sse2 = do
+ dflags <- getDynFlags
(x_reg, x_code) <- getNonClobberedReg x
(y_op, y_code) <- getOperand y
let
code = x_code `appOL`
y_code `snocOL`
- CMP (floatSize $ cmmExprWidth x) y_op (OpReg x_reg)
+ CMP (floatSize $ cmmExprWidth dflags x) y_op (OpReg x_reg)
-- NB(1): we need to use the unsigned comparison operators on the
-- result of this comparison.
return (CondCode True (condToUnsigned cond) code)
@@ -1713,7 +1717,7 @@ genCCall32 target dest_regs args = do
(CmmPrim _ (Just stmts), _) ->
stmtsToInstrs stmts
- _ -> genCCall32' target dest_regs args
+ _ -> genCCall32' dflags target dest_regs args
where divOp1 platform signed width results [CmmHinted arg_x _, CmmHinted arg_y _]
= divOp platform signed width results Nothing arg_x arg_y
@@ -1750,19 +1754,20 @@ genCCall32 target dest_regs args = do
divOp _ _ _ _ _ _ _
= panic "genCCall32: Wrong number of results for divOp"
-genCCall32' :: CmmCallTarget -- function to call
+genCCall32' :: DynFlags
+ -> CmmCallTarget -- function to call
-> [HintedCmmFormal] -- where to put the result
-> [HintedCmmActual] -- arguments (of mixed type)
-> NatM InstrBlock
-genCCall32' target dest_regs args = do
+genCCall32' dflags target dest_regs args = do
let
-- Align stack to 16n for calls, assuming a starting stack
-- alignment of 16n - word_size on procedure entry. Which we
-- maintiain. See Note [rts/StgCRun.c : Stack Alignment on X86]
- sizes = map (arg_size . cmmExprType . hintlessCmm) (reverse args)
- raw_arg_size = sum sizes + wORD_SIZE
+ sizes = map (arg_size . cmmExprType dflags . hintlessCmm) (reverse args)
+ raw_arg_size = sum sizes + wORD_SIZE dflags
arg_pad_size = (roundTo 16 $ raw_arg_size) - raw_arg_size
- tot_arg_size = raw_arg_size + arg_pad_size - wORD_SIZE
+ tot_arg_size = raw_arg_size + arg_pad_size - wORD_SIZE dflags
delta0 <- getDeltaNat
setDeltaNat (delta0 - arg_pad_size)
@@ -1780,7 +1785,7 @@ genCCall32' target dest_regs args = do
where fn_imm = ImmCLbl lbl
CmmCallee expr conv
-> do { (dyn_r, dyn_c) <- getSomeReg expr
- ; ASSERT( isWord32 (cmmExprType expr) )
+ ; ASSERT( isWord32 (cmmExprType dflags expr) )
return (dyn_c `snocOL` CALL (Right dyn_r) [], conv) }
CmmPrim _ _
-> panic $ "genCCall: Can't handle CmmPrim call type here, error "
@@ -1896,7 +1901,7 @@ genCCall32' target dest_regs args = do
DELTA (delta-size))
where
- arg_ty = cmmExprType arg
+ arg_ty = cmmExprType dflags arg
size = arg_size arg_ty -- Byte size
genCCall64 :: CmmCallTarget -- function to call
@@ -1953,8 +1958,7 @@ genCCall64 target dest_regs args = do
_ ->
do dflags <- getDynFlags
- let platform = targetPlatform dflags
- genCCall64' platform target dest_regs args
+ genCCall64' dflags target dest_regs args
where divOp1 platform signed width results [CmmHinted arg_x _, CmmHinted arg_y _]
= divOp platform signed width results Nothing arg_x arg_y
@@ -1989,12 +1993,12 @@ genCCall64 target dest_regs args = do
divOp _ _ _ _ _ _ _
= panic "genCCall64: Wrong number of results for divOp"
-genCCall64' :: Platform
+genCCall64' :: DynFlags
-> CmmCallTarget -- function to call
-> [HintedCmmFormal] -- where to put the result
-> [HintedCmmActual] -- arguments (of mixed type)
-> NatM InstrBlock
-genCCall64' platform target dest_regs args = do
+genCCall64' dflags target dest_regs args = do
-- load up the register arguments
(stack_args, int_regs_used, fp_regs_used, load_args_code)
<-
@@ -2021,14 +2025,14 @@ genCCall64' platform target dest_regs args = do
-- alignment of 16n - word_size on procedure entry. Which we
-- maintiain. See Note [rts/StgCRun.c : Stack Alignment on X86]
(real_size, adjust_rsp) <-
- if (tot_arg_size + wORD_SIZE) `rem` 16 == 0
+ if (tot_arg_size + wORD_SIZE dflags) `rem` 16 == 0
then return (tot_arg_size, nilOL)
else do -- we need to adjust...
delta <- getDeltaNat
- setDeltaNat (delta - wORD_SIZE)
- return (tot_arg_size + wORD_SIZE, toOL [
- SUB II64 (OpImm (ImmInt wORD_SIZE)) (OpReg rsp),
- DELTA (delta - wORD_SIZE) ])
+ setDeltaNat (delta - wORD_SIZE dflags)
+ return (tot_arg_size + wORD_SIZE dflags, toOL [
+ SUB II64 (OpImm (ImmInt (wORD_SIZE dflags))) (OpReg rsp),
+ DELTA (delta - wORD_SIZE dflags) ])
-- push the stack args, right to left
push_code <- push_args (reverse stack_args) nilOL
@@ -2070,7 +2074,7 @@ genCCall64' platform target dest_regs args = do
-- stdcall has callee do it, but is not supported on
-- x86_64 target (see #3336)
(if real_size==0 then [] else
- [ADD (intSize wordWidth) (OpImm (ImmInt real_size)) (OpReg esp)])
+ [ADD (intSize (wordWidth dflags)) (OpImm (ImmInt real_size)) (OpReg esp)])
++
[DELTA (delta + real_size)]
)
@@ -2097,7 +2101,8 @@ genCCall64' platform target dest_regs args = do
call `appOL`
assign_code dest_regs)
- where arg_size = 8 -- always, at the mo
+ where platform = targetPlatform dflags
+ arg_size = 8 -- always, at the mo
load_args :: [CmmHinted CmmExpr]
-> [Reg] -- int regs avail for args
@@ -2122,7 +2127,7 @@ genCCall64' platform target dest_regs args = do
arg_code <- getAnyReg arg
load_args rest rs fregs (code `appOL` arg_code r)
where
- arg_rep = cmmExprType arg
+ arg_rep = cmmExprType dflags arg
push_this_arg = do
(args',ars,frs,code') <- load_args rest aregs fregs code
@@ -2156,7 +2161,7 @@ genCCall64' platform target dest_regs args = do
load_args_win rest (ireg : usedInt) usedFP regs
(code `appOL` arg_code ireg)
where
- arg_rep = cmmExprType arg
+ arg_rep = cmmExprType dflags arg
push_args [] code = return code
push_args ((CmmHinted arg _):rest) code
@@ -2165,9 +2170,9 @@ genCCall64' platform target dest_regs args = do
delta <- getDeltaNat
setDeltaNat (delta-arg_size)
let code' = code `appOL` arg_code `appOL` toOL [
- SUB (intSize wordWidth) (OpImm (ImmInt arg_size)) (OpReg rsp) ,
+ SUB (intSize (wordWidth dflags)) (OpImm (ImmInt arg_size)) (OpReg rsp) ,
DELTA (delta-arg_size),
- MOV (floatSize width) (OpReg arg_reg) (OpAddr (spRel platform 0))]
+ MOV (floatSize width) (OpReg arg_reg) (OpAddr (spRel dflags 0))]
push_args rest code'
| otherwise = do
@@ -2183,14 +2188,14 @@ genCCall64' platform target dest_regs args = do
DELTA (delta-arg_size)]
push_args rest code'
where
- arg_rep = cmmExprType arg
+ arg_rep = cmmExprType dflags arg
width = typeWidth arg_rep
leaveStackSpace n = do
delta <- getDeltaNat
setDeltaNat (delta - n * arg_size)
return $ toOL [
- SUB II64 (OpImm (ImmInt (n * wORD_SIZE))) (OpReg rsp),
+ SUB II64 (OpImm (ImmInt (n * wORD_SIZE dflags))) (OpReg rsp),
DELTA (delta - n * arg_size)]
-- | We're willing to inline and unroll memcpy/memset calls that touch
@@ -2282,11 +2287,11 @@ genSwitch dflags expr ids
dynRef <- cmmMakeDynamicReference dflags addImportNat DataReference lbl
(tableReg,t_code) <- getSomeReg $ dynRef
let op = OpAddr (AddrBaseIndex (EABaseReg tableReg)
- (EAIndex reg wORD_SIZE) (ImmInt 0))
+ (EAIndex reg (wORD_SIZE dflags)) (ImmInt 0))
return $ if target32Bit (targetPlatform dflags)
then e_code `appOL` t_code `appOL` toOL [
- ADD (intSize wordWidth) op (OpReg tableReg),
+ ADD (intSize (wordWidth dflags)) op (OpReg tableReg),
JMP_TBL (OpReg tableReg) ids ReadOnlyData lbl
]
else case platformOS (targetPlatform dflags) of
@@ -2299,7 +2304,7 @@ genSwitch dflags expr ids
-- if L0 is not preceded by a non-anonymous
-- label in its section.
e_code `appOL` t_code `appOL` toOL [
- ADD (intSize wordWidth) op (OpReg tableReg),
+ ADD (intSize (wordWidth dflags)) op (OpReg tableReg),
JMP_TBL (OpReg tableReg) ids Text lbl
]
_ ->
@@ -2313,14 +2318,14 @@ genSwitch dflags expr ids
-- once binutils 2.17 is standard.
e_code `appOL` t_code `appOL` toOL [
MOVSxL II32 op (OpReg reg),
- ADD (intSize wordWidth) (OpReg reg) (OpReg tableReg),
+ ADD (intSize (wordWidth dflags)) (OpReg reg) (OpReg tableReg),
JMP_TBL (OpReg tableReg) ids ReadOnlyData lbl
]
| otherwise
= do
(reg,e_code) <- getSomeReg expr
lbl <- getNewLabelNat
- let op = OpAddr (AddrBaseIndex EABaseNone (EAIndex reg wORD_SIZE) (ImmCLbl lbl))
+ let op = OpAddr (AddrBaseIndex EABaseNone (EAIndex reg (wORD_SIZE dflags)) (ImmCLbl lbl))
code = e_code `appOL` toOL [
JMP_TBL op ids ReadOnlyData lbl
]
@@ -2337,12 +2342,12 @@ createJumpTable dflags ids section lbl
= let jumpTable
| dopt Opt_PIC dflags =
let jumpTableEntryRel Nothing
- = CmmStaticLit (CmmInt 0 wordWidth)
+ = CmmStaticLit (CmmInt 0 (wordWidth dflags))
jumpTableEntryRel (Just blockid)
= CmmStaticLit (CmmLabelDiffOff blockLabel lbl 0)
where blockLabel = mkAsmTempLabel (getUnique blockid)
in map jumpTableEntryRel ids
- | otherwise = map jumpTableEntry ids
+ | otherwise = map (jumpTableEntry dflags) ids
in CmmData section (1, Statics lbl jumpTable)
-- -----------------------------------------------------------------------------
diff --git a/compiler/nativeGen/X86/Instr.hs b/compiler/nativeGen/X86/Instr.hs
index a2263b3116..7f0e48e769 100644
--- a/compiler/nativeGen/X86/Instr.hs
+++ b/compiler/nativeGen/X86/Instr.hs
@@ -30,10 +30,10 @@ import FastString
import FastBool
import Outputable
import Platform
-import Constants (rESERVED_C_STACK_BYTES)
import BasicTypes (Alignment)
import CLabel
+import DynFlags
import UniqSet
import Unique
@@ -613,62 +613,65 @@ x86_patchJumpInstr insn patchF
-- -----------------------------------------------------------------------------
-- | Make a spill instruction.
x86_mkSpillInstr
- :: Platform
+ :: DynFlags
-> Reg -- register to spill
-> Int -- current stack delta
-> Int -- spill slot to use
-> Instr
-x86_mkSpillInstr platform reg delta slot
- = let off = spillSlotToOffset is32Bit slot
+x86_mkSpillInstr dflags reg delta slot
+ = let off = spillSlotToOffset dflags slot
in
let off_w = (off - delta) `div` (if is32Bit then 4 else 8)
in case targetClassOfReg platform reg of
RcInteger -> MOV (archWordSize is32Bit)
- (OpReg reg) (OpAddr (spRel platform off_w))
- RcDouble -> GST FF80 reg (spRel platform off_w) {- RcFloat/RcDouble -}
- RcDoubleSSE -> MOV FF64 (OpReg reg) (OpAddr (spRel platform off_w))
+ (OpReg reg) (OpAddr (spRel dflags off_w))
+ RcDouble -> GST FF80 reg (spRel dflags off_w) {- RcFloat/RcDouble -}
+ RcDoubleSSE -> MOV FF64 (OpReg reg) (OpAddr (spRel dflags off_w))
_ -> panic "X86.mkSpillInstr: no match"
- where is32Bit = target32Bit platform
+ where platform = targetPlatform dflags
+ is32Bit = target32Bit platform
-- | Make a spill reload instruction.
x86_mkLoadInstr
- :: Platform
+ :: DynFlags
-> Reg -- register to load
-> Int -- current stack delta
-> Int -- spill slot to use
-> Instr
-x86_mkLoadInstr platform reg delta slot
- = let off = spillSlotToOffset is32Bit slot
+x86_mkLoadInstr dflags reg delta slot
+ = let off = spillSlotToOffset dflags slot
in
let off_w = (off-delta) `div` (if is32Bit then 4 else 8)
in case targetClassOfReg platform reg of
RcInteger -> MOV (archWordSize is32Bit)
- (OpAddr (spRel platform off_w)) (OpReg reg)
- RcDouble -> GLD FF80 (spRel platform off_w) reg {- RcFloat/RcDouble -}
- RcDoubleSSE -> MOV FF64 (OpAddr (spRel platform off_w)) (OpReg reg)
+ (OpAddr (spRel dflags off_w)) (OpReg reg)
+ RcDouble -> GLD FF80 (spRel dflags off_w) reg {- RcFloat/RcDouble -}
+ RcDoubleSSE -> MOV FF64 (OpAddr (spRel dflags off_w)) (OpReg reg)
_ -> panic "X86.x86_mkLoadInstr"
- where is32Bit = target32Bit platform
+ where platform = targetPlatform dflags
+ is32Bit = target32Bit platform
-spillSlotSize :: Bool -> Int
-spillSlotSize is32Bit = if is32Bit then 12 else 8
+spillSlotSize :: DynFlags -> Int
+spillSlotSize dflags = if is32Bit then 12 else 8
+ where is32Bit = target32Bit (targetPlatform dflags)
-maxSpillSlots :: Bool -> Int
-maxSpillSlots is32Bit
- = ((rESERVED_C_STACK_BYTES - 64) `div` spillSlotSize is32Bit) - 1
+maxSpillSlots :: DynFlags -> Int
+maxSpillSlots dflags
+ = ((rESERVED_C_STACK_BYTES dflags - 64) `div` spillSlotSize dflags) - 1
-- convert a spill slot number to a *byte* offset, with no sign:
-- decide on a per arch basis whether you are spilling above or below
-- the C stack pointer.
-spillSlotToOffset :: Bool -> Int -> Int
-spillSlotToOffset is32Bit slot
- | slot >= 0 && slot < maxSpillSlots is32Bit
- = 64 + spillSlotSize is32Bit * slot
+spillSlotToOffset :: DynFlags -> Int -> Int
+spillSlotToOffset dflags slot
+ | slot >= 0 && slot < maxSpillSlots dflags
+ = 64 + spillSlotSize dflags * slot
| otherwise
= pprPanic "spillSlotToOffset:"
( text "invalid spill location: " <> int slot
- $$ text "maxSpillSlots: " <> int (maxSpillSlots is32Bit))
+ $$ text "maxSpillSlots: " <> int (maxSpillSlots dflags))
--------------------------------------------------------------------------------
diff --git a/compiler/nativeGen/X86/Ppr.hs b/compiler/nativeGen/X86/Ppr.hs
index 6411fb94b1..420da7cc3d 100644
--- a/compiler/nativeGen/X86/Ppr.hs
+++ b/compiler/nativeGen/X86/Ppr.hs
@@ -34,6 +34,7 @@ import PprBase
import BlockId
import BasicTypes (Alignment)
+import DynFlags
import OldCmm
import CLabel
import Unique ( pprUnique, Uniquable(..) )
@@ -419,12 +420,13 @@ pprSectionHeader seg
pprDataItem :: CmmLit -> SDoc
-pprDataItem lit = sdocWithPlatform $ \platform -> pprDataItem' platform lit
+pprDataItem lit = sdocWithDynFlags $ \dflags -> pprDataItem' dflags lit
-pprDataItem' :: Platform -> CmmLit -> SDoc
-pprDataItem' platform lit
- = vcat (ppr_item (cmmTypeSize $ cmmLitType lit) lit)
+pprDataItem' :: DynFlags -> CmmLit -> SDoc
+pprDataItem' dflags lit
+ = vcat (ppr_item (cmmTypeSize $ cmmLitType dflags lit) lit)
where
+ platform = targetPlatform dflags
imm = litToImm lit
-- These seem to be common:
diff --git a/compiler/nativeGen/X86/Regs.hs b/compiler/nativeGen/X86/Regs.hs
index 16938a8f15..4eec96f5e1 100644
--- a/compiler/nativeGen/X86/Regs.hs
+++ b/compiler/nativeGen/X86/Regs.hs
@@ -54,11 +54,11 @@ import RegClass
import OldCmm
import CmmCallConv
import CLabel ( CLabel )
+import DynFlags
import Outputable
import Platform
import FastTypes
import FastBool
-import Constants
-- | regSqueeze_class reg
@@ -195,14 +195,14 @@ addrModeRegs _ = []
-- applicable, is the same but for the frame pointer.
-spRel :: Platform
+spRel :: DynFlags
-> Int -- ^ desired stack offset in words, positive or negative
-> AddrMode
-spRel platform n
- | target32Bit platform
- = AddrBaseIndex (EABaseReg esp) EAIndexNone (ImmInt (n * wORD_SIZE))
+spRel dflags n
+ | target32Bit (targetPlatform dflags)
+ = AddrBaseIndex (EABaseReg esp) EAIndexNone (ImmInt (n * wORD_SIZE dflags))
| otherwise
- = AddrBaseIndex (EABaseReg rsp) EAIndexNone (ImmInt (n * wORD_SIZE))
+ = AddrBaseIndex (EABaseReg rsp) EAIndexNone (ImmInt (n * wORD_SIZE dflags))
-- The register numbers must fit into 32 bits on x86, so that we can
-- use a Word32 to represent the set of free registers in the register
@@ -440,8 +440,9 @@ instrClobberedRegs platform
--
-- All machine registers that are used for argument-passing to Haskell functions
-allHaskellArgRegs :: Platform -> [Reg]
-allHaskellArgRegs platform = [ RegReal r | Just r <- map (globalRegMaybe platform) globalArgRegs ]
+allHaskellArgRegs :: DynFlags -> [Reg]
+allHaskellArgRegs dflags = [ RegReal r | Just r <- map (globalRegMaybe platform) (globalArgRegs dflags) ]
+ where platform = targetPlatform dflags
-- allocatableRegs is allMachRegNos with the fixed-use regs removed.
-- i.e., these are the regs for which we are prepared to allow the