diff options
Diffstat (limited to 'compiler/nativeGen/X86/CodeGen.hs')
| -rw-r--r-- | compiler/nativeGen/X86/CodeGen.hs | 168 |
1 files changed, 87 insertions, 81 deletions
diff --git a/compiler/nativeGen/X86/CodeGen.hs b/compiler/nativeGen/X86/CodeGen.hs index e8f2eccd6b..66ebf75629 100644 --- a/compiler/nativeGen/X86/CodeGen.hs +++ b/compiler/nativeGen/X86/CodeGen.hs @@ -141,6 +141,7 @@ stmtsToInstrs stmts stmtToInstrs :: CmmStmt -> NatM InstrBlock stmtToInstrs stmt = do + dflags <- getDynFlags is32Bit <- is32BitPlatform case stmt of CmmNop -> return nilOL @@ -150,14 +151,14 @@ stmtToInstrs stmt = do | isFloatType ty -> assignReg_FltCode size reg src | is32Bit && isWord64 ty -> assignReg_I64Code reg src | otherwise -> assignReg_IntCode size reg src - where ty = cmmRegType reg + where ty = cmmRegType dflags reg size = cmmTypeSize ty CmmStore addr src | isFloatType ty -> assignMem_FltCode size addr src | is32Bit && isWord64 ty -> assignMem_I64Code addr src | otherwise -> assignMem_IntCode size addr src - where ty = cmmExprType src + where ty = cmmExprType dflags src size = cmmTypeSize ty CmmCall target result_regs args _ @@ -168,15 +169,15 @@ stmtToInstrs stmt = do CmmSwitch arg ids -> do dflags <- getDynFlags genSwitch dflags arg ids CmmJump arg gregs -> do dflags <- getDynFlags - let platform = targetPlatform dflags - genJump arg (jumpRegs platform gregs) + genJump arg (jumpRegs dflags gregs) CmmReturn -> panic "stmtToInstrs: return statement should have been cps'd away" -jumpRegs :: Platform -> Maybe [GlobalReg] -> [Reg] -jumpRegs platform Nothing = allHaskellArgRegs platform -jumpRegs platform (Just gregs) = [ RegReal r | Just r <- map (globalRegMaybe platform) gregs ] +jumpRegs :: DynFlags -> Maybe [GlobalReg] -> [Reg] +jumpRegs dflags Nothing = allHaskellArgRegs dflags +jumpRegs dflags (Just gregs) = [ RegReal r | Just r <- map (globalRegMaybe platform) gregs ] + where platform = targetPlatform dflags -------------------------------------------------------------------------------- -- | 'InstrBlock's are the insn sequences generated by the insn selectors. @@ -274,9 +275,9 @@ is32BitInteger i = i64 <= 0x7fffffff && i64 >= -0x80000000 -- | Convert a BlockId to some CmmStatic data -jumpTableEntry :: Maybe BlockId -> CmmStatic -jumpTableEntry Nothing = CmmStaticLit (CmmInt 0 wordWidth) -jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel) +jumpTableEntry :: DynFlags -> Maybe BlockId -> CmmStatic +jumpTableEntry dflags Nothing = CmmStaticLit (CmmInt 0 (wordWidth dflags)) +jumpTableEntry _ (Just blockid) = CmmStaticLit (CmmLabel blockLabel) where blockLabel = mkAsmTempLabel (getUnique blockid) @@ -285,10 +286,10 @@ jumpTableEntry (Just blockid) = CmmStaticLit (CmmLabel blockLabel) -- Expand CmmRegOff. ToDo: should we do it this way around, or convert -- CmmExprs into CmmRegOff? -mangleIndexTree :: CmmReg -> Int -> CmmExpr -mangleIndexTree reg off +mangleIndexTree :: DynFlags -> CmmReg -> Int -> CmmExpr +mangleIndexTree dflags reg off = CmmMachOp (MO_Add width) [CmmReg reg, CmmLit (CmmInt (fromIntegral off) width)] - where width = typeWidth (cmmRegType reg) + where width = typeWidth (cmmRegType dflags reg) -- | The dual to getAnyReg: compute an expression into a register, but -- we don't mind which one it is. @@ -406,12 +407,13 @@ iselExpr64 expr -------------------------------------------------------------------------------- getRegister :: CmmExpr -> NatM Register -getRegister e = do is32Bit <- is32BitPlatform - getRegister' is32Bit e +getRegister e = do dflags <- getDynFlags + is32Bit <- is32BitPlatform + getRegister' dflags is32Bit e -getRegister' :: Bool -> CmmExpr -> NatM Register +getRegister' :: DynFlags -> Bool -> CmmExpr -> NatM Register -getRegister' is32Bit (CmmReg reg) +getRegister' dflags is32Bit (CmmReg reg) = case reg of CmmGlobal PicBaseReg | is32Bit -> @@ -423,44 +425,43 @@ getRegister' is32Bit (CmmReg reg) _ -> do use_sse2 <- sse2Enabled let - sz = cmmTypeSize (cmmRegType reg) + sz = cmmTypeSize (cmmRegType dflags reg) size | not use_sse2 && isFloatSize sz = FF80 | otherwise = sz -- - dflags <- getDynFlags let platform = targetPlatform dflags return (Fixed size (getRegisterReg platform use_sse2 reg) nilOL) -getRegister' is32Bit (CmmRegOff r n) - = getRegister' is32Bit $ mangleIndexTree r n +getRegister' dflags is32Bit (CmmRegOff r n) + = getRegister' dflags is32Bit $ mangleIndexTree dflags r n -- for 32-bit architectuers, support some 64 -> 32 bit conversions: -- TO_W_(x), TO_W_(x >> 32) -getRegister' is32Bit (CmmMachOp (MO_UU_Conv W64 W32) +getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W64 W32) [CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]]) | is32Bit = do ChildCode64 code rlo <- iselExpr64 x return $ Fixed II32 (getHiVRegFromLo rlo) code -getRegister' is32Bit (CmmMachOp (MO_SS_Conv W64 W32) +getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W64 W32) [CmmMachOp (MO_U_Shr W64) [x,CmmLit (CmmInt 32 _)]]) | is32Bit = do ChildCode64 code rlo <- iselExpr64 x return $ Fixed II32 (getHiVRegFromLo rlo) code -getRegister' is32Bit (CmmMachOp (MO_UU_Conv W64 W32) [x]) +getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W64 W32) [x]) | is32Bit = do ChildCode64 code rlo <- iselExpr64 x return $ Fixed II32 rlo code -getRegister' is32Bit (CmmMachOp (MO_SS_Conv W64 W32) [x]) +getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W64 W32) [x]) | is32Bit = do ChildCode64 code rlo <- iselExpr64 x return $ Fixed II32 rlo code -getRegister' _ (CmmLit lit@(CmmFloat f w)) = +getRegister' _ _ (CmmLit lit@(CmmFloat f w)) = if_sse2 float_const_sse2 float_const_x87 where float_const_sse2 @@ -491,60 +492,60 @@ getRegister' _ (CmmLit lit@(CmmFloat f w)) = loadFloatAmode False w addr code -- catch simple cases of zero- or sign-extended load -getRegister' _ (CmmMachOp (MO_UU_Conv W8 W32) [CmmLoad addr _]) = do +getRegister' _ _ (CmmMachOp (MO_UU_Conv W8 W32) [CmmLoad addr _]) = do code <- intLoadCode (MOVZxL II8) addr return (Any II32 code) -getRegister' _ (CmmMachOp (MO_SS_Conv W8 W32) [CmmLoad addr _]) = do +getRegister' _ _ (CmmMachOp (MO_SS_Conv W8 W32) [CmmLoad addr _]) = do code <- intLoadCode (MOVSxL II8) addr return (Any II32 code) -getRegister' _ (CmmMachOp (MO_UU_Conv W16 W32) [CmmLoad addr _]) = do +getRegister' _ _ (CmmMachOp (MO_UU_Conv W16 W32) [CmmLoad addr _]) = do code <- intLoadCode (MOVZxL II16) addr return (Any II32 code) -getRegister' _ (CmmMachOp (MO_SS_Conv W16 W32) [CmmLoad addr _]) = do +getRegister' _ _ (CmmMachOp (MO_SS_Conv W16 W32) [CmmLoad addr _]) = do code <- intLoadCode (MOVSxL II16) addr return (Any II32 code) -- catch simple cases of zero- or sign-extended load -getRegister' is32Bit (CmmMachOp (MO_UU_Conv W8 W64) [CmmLoad addr _]) +getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W8 W64) [CmmLoad addr _]) | not is32Bit = do code <- intLoadCode (MOVZxL II8) addr return (Any II64 code) -getRegister' is32Bit (CmmMachOp (MO_SS_Conv W8 W64) [CmmLoad addr _]) +getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W8 W64) [CmmLoad addr _]) | not is32Bit = do code <- intLoadCode (MOVSxL II8) addr return (Any II64 code) -getRegister' is32Bit (CmmMachOp (MO_UU_Conv W16 W64) [CmmLoad addr _]) +getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W16 W64) [CmmLoad addr _]) | not is32Bit = do code <- intLoadCode (MOVZxL II16) addr return (Any II64 code) -getRegister' is32Bit (CmmMachOp (MO_SS_Conv W16 W64) [CmmLoad addr _]) +getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W16 W64) [CmmLoad addr _]) | not is32Bit = do code <- intLoadCode (MOVSxL II16) addr return (Any II64 code) -getRegister' is32Bit (CmmMachOp (MO_UU_Conv W32 W64) [CmmLoad addr _]) +getRegister' _ is32Bit (CmmMachOp (MO_UU_Conv W32 W64) [CmmLoad addr _]) | not is32Bit = do code <- intLoadCode (MOV II32) addr -- 32-bit loads zero-extend return (Any II64 code) -getRegister' is32Bit (CmmMachOp (MO_SS_Conv W32 W64) [CmmLoad addr _]) +getRegister' _ is32Bit (CmmMachOp (MO_SS_Conv W32 W64) [CmmLoad addr _]) | not is32Bit = do code <- intLoadCode (MOVSxL II32) addr return (Any II64 code) -getRegister' is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg), +getRegister' _ is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg), CmmLit displacement]) | not is32Bit = do return $ Any II64 (\dst -> unitOL $ LEA II64 (OpAddr (ripRel (litToImm displacement))) (OpReg dst)) -getRegister' is32Bit (CmmMachOp mop [x]) = do -- unary MachOps +getRegister' dflags is32Bit (CmmMachOp mop [x]) = do -- unary MachOps sse2 <- sse2Enabled case mop of MO_F_Neg w @@ -634,11 +635,11 @@ getRegister' is32Bit (CmmMachOp mop [x]) = do -- unary MachOps conversionNop :: Size -> CmmExpr -> NatM Register conversionNop new_size expr - = do e_code <- getRegister' is32Bit expr + = do e_code <- getRegister' dflags is32Bit expr return (swizzleRegisterRep e_code new_size) -getRegister' is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps +getRegister' _ is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps sse2 <- sse2Enabled case mop of MO_F_Eq _ -> condFltReg is32Bit EQQ x y @@ -812,14 +813,14 @@ getRegister' is32Bit (CmmMachOp mop [x, y]) = do -- dyadic MachOps return (Fixed size result code) -getRegister' _ (CmmLoad mem pk) +getRegister' _ _ (CmmLoad mem pk) | isFloatType pk = do Amode addr mem_code <- getAmode mem use_sse2 <- sse2Enabled loadFloatAmode use_sse2 (typeWidth pk) addr mem_code -getRegister' is32Bit (CmmLoad mem pk) +getRegister' _ is32Bit (CmmLoad mem pk) | is32Bit && not (isWord64 pk) = do code <- intLoadCode instr mem @@ -837,14 +838,14 @@ getRegister' is32Bit (CmmLoad mem pk) -- simpler we do our 8-bit arithmetic with full 32-bit registers. -- Simpler memory load code on x86_64 -getRegister' is32Bit (CmmLoad mem pk) +getRegister' _ is32Bit (CmmLoad mem pk) | not is32Bit = do code <- intLoadCode (MOV size) mem return (Any size code) where size = intSize $ typeWidth pk -getRegister' is32Bit (CmmLit (CmmInt 0 width)) +getRegister' _ is32Bit (CmmLit (CmmInt 0 width)) = let size = intSize width @@ -861,8 +862,8 @@ getRegister' is32Bit (CmmLit (CmmInt 0 width)) -- optimisation for loading small literals on x86_64: take advantage -- of the automatic zero-extension from 32 to 64 bits, because the 32-bit -- instruction forms are shorter. -getRegister' is32Bit (CmmLit lit) - | not is32Bit, isWord64 (cmmLitType lit), not (isBigLit lit) +getRegister' dflags is32Bit (CmmLit lit) + | not is32Bit, isWord64 (cmmLitType dflags lit), not (isBigLit lit) = let imm = litToImm lit code dst = unitOL (MOV II32 (OpImm imm) (OpReg dst)) @@ -877,15 +878,13 @@ getRegister' is32Bit (CmmLit lit) -- note2: all labels are small, because we're assuming the -- small memory model (see gcc docs, -mcmodel=small). -getRegister' _ (CmmLit lit) - = let - size = cmmTypeSize (cmmLitType lit) - imm = litToImm lit - code dst = unitOL (MOV size (OpImm imm) (OpReg dst)) - in - return (Any size code) +getRegister' dflags _ (CmmLit lit) + = do let size = cmmTypeSize (cmmLitType dflags lit) + imm = litToImm lit + code dst = unitOL (MOV size (OpImm imm) (OpReg dst)) + return (Any size code) -getRegister' _ other = pprPanic "getRegister(x86)" (ppr other) +getRegister' _ _ other = pprPanic "getRegister(x86)" (ppr other) intLoadCode :: (Operand -> Operand -> Instr) -> CmmExpr @@ -958,7 +957,8 @@ getAmode e = do is32Bit <- is32BitPlatform getAmode' is32Bit e getAmode' :: Bool -> CmmExpr -> NatM Amode -getAmode' _ (CmmRegOff r n) = getAmode $ mangleIndexTree r n +getAmode' _ (CmmRegOff r n) = do dflags <- getDynFlags + getAmode $ mangleIndexTree dflags r n getAmode' is32Bit (CmmMachOp (MO_Add W64) [CmmReg (CmmGlobal PicBaseReg), CmmLit displacement]) @@ -1047,7 +1047,8 @@ getNonClobberedOperand (CmmLit lit) = do else do is32Bit <- is32BitPlatform - if is32BitLit is32Bit lit && not (isFloatType (cmmLitType lit)) + dflags <- getDynFlags + if is32BitLit is32Bit lit && not (isFloatType (cmmLitType dflags lit)) then return (OpImm (litToImm lit), nilOL) else getNonClobberedOperand_generic (CmmLit lit) @@ -1100,7 +1101,8 @@ getOperand (CmmLit lit) = do else do is32Bit <- is32BitPlatform - if is32BitLit is32Bit lit && not (isFloatType (cmmLitType lit)) + dflags <- getDynFlags + if is32BitLit is32Bit lit && not (isFloatType (cmmLitType dflags lit)) then return (OpImm (litToImm lit), nilOL) else getOperand_generic (CmmLit lit) @@ -1276,21 +1278,23 @@ condIntCode' _ cond x (CmmLit (CmmInt 0 pk)) = do -- anything vs operand condIntCode' is32Bit cond x y | isOperand is32Bit y = do + dflags <- getDynFlags (x_reg, x_code) <- getNonClobberedReg x (y_op, y_code) <- getOperand y let code = x_code `appOL` y_code `snocOL` - CMP (cmmTypeSize (cmmExprType x)) y_op (OpReg x_reg) + CMP (cmmTypeSize (cmmExprType dflags x)) y_op (OpReg x_reg) return (CondCode False cond code) -- anything vs anything condIntCode' _ cond x y = do + dflags <- getDynFlags (y_reg, y_code) <- getNonClobberedReg y (x_op, x_code) <- getRegOrMem x let code = y_code `appOL` x_code `snocOL` - CMP (cmmTypeSize (cmmExprType x)) (OpReg y_reg) x_op + CMP (cmmTypeSize (cmmExprType dflags x)) (OpReg y_reg) x_op return (CondCode False cond code) @@ -1317,12 +1321,13 @@ condFltCode cond x y -- an operand, but the right must be a reg. We can probably do better -- than this general case... condFltCode_sse2 = do + dflags <- getDynFlags (x_reg, x_code) <- getNonClobberedReg x (y_op, y_code) <- getOperand y let code = x_code `appOL` y_code `snocOL` - CMP (floatSize $ cmmExprWidth x) y_op (OpReg x_reg) + CMP (floatSize $ cmmExprWidth dflags x) y_op (OpReg x_reg) -- NB(1): we need to use the unsigned comparison operators on the -- result of this comparison. return (CondCode True (condToUnsigned cond) code) @@ -1713,7 +1718,7 @@ genCCall32 target dest_regs args = do (CmmPrim _ (Just stmts), _) -> stmtsToInstrs stmts - _ -> genCCall32' target dest_regs args + _ -> genCCall32' dflags target dest_regs args where divOp1 platform signed width results [CmmHinted arg_x _, CmmHinted arg_y _] = divOp platform signed width results Nothing arg_x arg_y @@ -1750,16 +1755,17 @@ genCCall32 target dest_regs args = do divOp _ _ _ _ _ _ _ = panic "genCCall32: Wrong number of results for divOp" -genCCall32' :: CmmCallTarget -- function to call +genCCall32' :: DynFlags + -> CmmCallTarget -- function to call -> [HintedCmmFormal] -- where to put the result -> [HintedCmmActual] -- arguments (of mixed type) -> NatM InstrBlock -genCCall32' target dest_regs args = do +genCCall32' dflags target dest_regs args = do let -- Align stack to 16n for calls, assuming a starting stack -- alignment of 16n - word_size on procedure entry. Which we -- maintiain. See Note [rts/StgCRun.c : Stack Alignment on X86] - sizes = map (arg_size . cmmExprType . hintlessCmm) (reverse args) + sizes = map (arg_size . cmmExprType dflags . hintlessCmm) (reverse args) raw_arg_size = sum sizes + wORD_SIZE arg_pad_size = (roundTo 16 $ raw_arg_size) - raw_arg_size tot_arg_size = raw_arg_size + arg_pad_size - wORD_SIZE @@ -1780,7 +1786,7 @@ genCCall32' target dest_regs args = do where fn_imm = ImmCLbl lbl CmmCallee expr conv -> do { (dyn_r, dyn_c) <- getSomeReg expr - ; ASSERT( isWord32 (cmmExprType expr) ) + ; ASSERT( isWord32 (cmmExprType dflags expr) ) return (dyn_c `snocOL` CALL (Right dyn_r) [], conv) } CmmPrim _ _ -> panic $ "genCCall: Can't handle CmmPrim call type here, error " @@ -1896,7 +1902,7 @@ genCCall32' target dest_regs args = do DELTA (delta-size)) where - arg_ty = cmmExprType arg + arg_ty = cmmExprType dflags arg size = arg_size arg_ty -- Byte size genCCall64 :: CmmCallTarget -- function to call @@ -1953,8 +1959,7 @@ genCCall64 target dest_regs args = do _ -> do dflags <- getDynFlags - let platform = targetPlatform dflags - genCCall64' platform target dest_regs args + genCCall64' dflags target dest_regs args where divOp1 platform signed width results [CmmHinted arg_x _, CmmHinted arg_y _] = divOp platform signed width results Nothing arg_x arg_y @@ -1989,12 +1994,12 @@ genCCall64 target dest_regs args = do divOp _ _ _ _ _ _ _ = panic "genCCall64: Wrong number of results for divOp" -genCCall64' :: Platform +genCCall64' :: DynFlags -> CmmCallTarget -- function to call -> [HintedCmmFormal] -- where to put the result -> [HintedCmmActual] -- arguments (of mixed type) -> NatM InstrBlock -genCCall64' platform target dest_regs args = do +genCCall64' dflags target dest_regs args = do -- load up the register arguments (stack_args, int_regs_used, fp_regs_used, load_args_code) <- @@ -2070,7 +2075,7 @@ genCCall64' platform target dest_regs args = do -- stdcall has callee do it, but is not supported on -- x86_64 target (see #3336) (if real_size==0 then [] else - [ADD (intSize wordWidth) (OpImm (ImmInt real_size)) (OpReg esp)]) + [ADD (intSize (wordWidth dflags)) (OpImm (ImmInt real_size)) (OpReg esp)]) ++ [DELTA (delta + real_size)] ) @@ -2097,7 +2102,8 @@ genCCall64' platform target dest_regs args = do call `appOL` assign_code dest_regs) - where arg_size = 8 -- always, at the mo + where platform = targetPlatform dflags + arg_size = 8 -- always, at the mo load_args :: [CmmHinted CmmExpr] -> [Reg] -- int regs avail for args @@ -2122,7 +2128,7 @@ genCCall64' platform target dest_regs args = do arg_code <- getAnyReg arg load_args rest rs fregs (code `appOL` arg_code r) where - arg_rep = cmmExprType arg + arg_rep = cmmExprType dflags arg push_this_arg = do (args',ars,frs,code') <- load_args rest aregs fregs code @@ -2156,7 +2162,7 @@ genCCall64' platform target dest_regs args = do load_args_win rest (ireg : usedInt) usedFP regs (code `appOL` arg_code ireg) where - arg_rep = cmmExprType arg + arg_rep = cmmExprType dflags arg push_args [] code = return code push_args ((CmmHinted arg _):rest) code @@ -2165,7 +2171,7 @@ genCCall64' platform target dest_regs args = do delta <- getDeltaNat setDeltaNat (delta-arg_size) let code' = code `appOL` arg_code `appOL` toOL [ - SUB (intSize wordWidth) (OpImm (ImmInt arg_size)) (OpReg rsp) , + SUB (intSize (wordWidth dflags)) (OpImm (ImmInt arg_size)) (OpReg rsp) , DELTA (delta-arg_size), MOV (floatSize width) (OpReg arg_reg) (OpAddr (spRel platform 0))] push_args rest code' @@ -2183,7 +2189,7 @@ genCCall64' platform target dest_regs args = do DELTA (delta-arg_size)] push_args rest code' where - arg_rep = cmmExprType arg + arg_rep = cmmExprType dflags arg width = typeWidth arg_rep leaveStackSpace n = do @@ -2286,7 +2292,7 @@ genSwitch dflags expr ids return $ if target32Bit (targetPlatform dflags) then e_code `appOL` t_code `appOL` toOL [ - ADD (intSize wordWidth) op (OpReg tableReg), + ADD (intSize (wordWidth dflags)) op (OpReg tableReg), JMP_TBL (OpReg tableReg) ids ReadOnlyData lbl ] else case platformOS (targetPlatform dflags) of @@ -2299,7 +2305,7 @@ genSwitch dflags expr ids -- if L0 is not preceded by a non-anonymous -- label in its section. e_code `appOL` t_code `appOL` toOL [ - ADD (intSize wordWidth) op (OpReg tableReg), + ADD (intSize (wordWidth dflags)) op (OpReg tableReg), JMP_TBL (OpReg tableReg) ids Text lbl ] _ -> @@ -2313,7 +2319,7 @@ genSwitch dflags expr ids -- once binutils 2.17 is standard. e_code `appOL` t_code `appOL` toOL [ MOVSxL II32 op (OpReg reg), - ADD (intSize wordWidth) (OpReg reg) (OpReg tableReg), + ADD (intSize (wordWidth dflags)) (OpReg reg) (OpReg tableReg), JMP_TBL (OpReg tableReg) ids ReadOnlyData lbl ] | otherwise @@ -2337,12 +2343,12 @@ createJumpTable dflags ids section lbl = let jumpTable | dopt Opt_PIC dflags = let jumpTableEntryRel Nothing - = CmmStaticLit (CmmInt 0 wordWidth) + = CmmStaticLit (CmmInt 0 (wordWidth dflags)) jumpTableEntryRel (Just blockid) = CmmStaticLit (CmmLabelDiffOff blockLabel lbl 0) where blockLabel = mkAsmTempLabel (getUnique blockid) in map jumpTableEntryRel ids - | otherwise = map jumpTableEntry ids + | otherwise = map (jumpTableEntry dflags) ids in CmmData section (1, Statics lbl jumpTable) -- ----------------------------------------------------------------------------- |
