diff options
Diffstat (limited to 'compiler/nativeGen')
| -rw-r--r-- | compiler/nativeGen/BlockLayout.hs | 4 | ||||
| -rw-r--r-- | compiler/nativeGen/CFG.hs | 18 | ||||
| -rw-r--r-- | compiler/nativeGen/Dwarf/Types.hs | 2 | ||||
| -rw-r--r-- | compiler/nativeGen/PPC/Ppr.hs | 2 | ||||
| -rw-r--r-- | compiler/nativeGen/X86/CodeGen.hs | 2 |
5 files changed, 14 insertions, 14 deletions
diff --git a/compiler/nativeGen/BlockLayout.hs b/compiler/nativeGen/BlockLayout.hs index 9b5f2eda8f..f0e98cda75 100644 --- a/compiler/nativeGen/BlockLayout.hs +++ b/compiler/nativeGen/BlockLayout.hs @@ -342,7 +342,7 @@ takeL n (BlockChain blks) = -- We have the chains (A-B-C-D) and (E-F) and an Edge C->E. -- -- While placing the latter after the former doesn't result in sequential --- control flow it is still benefical. As block C and E might end +-- control flow it is still beneficial. As block C and E might end -- up in the same cache line. -- -- So we place these chains next to each other even if we can't fuse them. @@ -740,7 +740,7 @@ sequenceChain info weights' blocks@((BasicBlock entry _):_) = --pprTraceIt "placedBlocks" $ -- ++ [] is stil kinda expensive if null unplaced then blockList else blockList ++ unplaced - getBlock bid = expectJust "Block placment" $ mapLookup bid blockMap + getBlock bid = expectJust "Block placement" $ mapLookup bid blockMap in --Assert we placed all blocks given as input ASSERT(all (\bid -> mapMember bid blockMap) placedBlocks) diff --git a/compiler/nativeGen/CFG.hs b/compiler/nativeGen/CFG.hs index fb17d269a8..4dc5f9ccb3 100644 --- a/compiler/nativeGen/CFG.hs +++ b/compiler/nativeGen/CFG.hs @@ -291,7 +291,7 @@ from blocks suitable for shortcutting to their jump targets. Then it redirects all jump instructions to these blocks using the built up mapping. This function (shortcutWeightMap) takes the same mapping and -applies the mapping to the CFG in the way layed out above. +applies the mapping to the CFG in the way laid out above. -} shortcutWeightMap :: LabelMap (Maybe BlockId) -> CFG -> CFG @@ -575,7 +575,7 @@ addNodesBetween m updates = so have a larger number of successors. So without more information we can only say that each individual successor is unlikely to be jumped to and we rank them accordingly. - * Calls - We currently ignore calls completly: + * Calls - We currently ignore calls completely: * By the time we return from a call there is a good chance that the address we return to has already been evicted from cache eliminating a main advantage sequential placement brings. @@ -648,7 +648,7 @@ getCfg weights graph = (CmmCall { cml_cont = Nothing }) -> [] other -> panic "Foo" $ - ASSERT2(False, ppr "Unkown successor cause:" <> + ASSERT2(False, ppr "Unknown successor cause:" <> (ppr branch <+> text "=>" <> ppr (G.successors other))) map (\x -> ((bid,x),mkEdgeInfo 0)) $ G.successors other where @@ -959,10 +959,10 @@ mkGlobalWeights root localCfg vertexMapping = mapFromList $ zip revOrder [0..] :: LabelMap Int blockMapping = listArray (0,mapSize vertexMapping - 1) revOrder :: Array Int BlockId - -- Map from blockId to indicies starting at zero + -- Map from blockId to indices starting at zero toVertex :: BlockId -> Int toVertex blockId = expectJust "mkGlobalWeights" $ mapLookup blockId vertexMapping - -- Map from indicies starting at zero to blockIds + -- Map from indices starting at zero to blockIds fromVertex :: Int -> BlockId fromVertex vertex = blockMapping ! vertex @@ -990,13 +990,13 @@ many instances of "loopy" Cmm where these make a difference. TODO: * The paper containers more benchmarks which should be implemented. -* If we turn the likelyhood on if/else branches into a probability +* If we turn the likelihood on if/else branches into a probability instead of true/false we could implement this as a Cmm pass. + The complete Cmm code still exists and can be accessed by the heuristics + There is no chance of register allocation/codegen inserting branches/blocks + making the TransitionSource info wrong. + potential to use this information in CmmPasses. - - Requires refactoring of all the code relying on the binary nature of likelyhood. + - Requires refactoring of all the code relying on the binary nature of likelihood. - Requires refactoring `loopInfo` to work on both, Cmm Graphs and the backend CFG. -} @@ -1060,7 +1060,7 @@ staticBranchPrediction _root (LoopInfo l_backEdges loopLevels l_loops) cfg = heuristics = map ($ ((s1,s1_info),(s2,s2_info))) [lehPredicts, phPredicts, ohPredicts, ghPredicts, lhhPredicts, chPredicts , shPredicts, rhPredicts] - -- Apply result of a heuristic. Argument is the likelyhood + -- Apply result of a heuristic. Argument is the likelihood -- predicted for s1. applyHeuristic :: CFG -> Maybe Prob -> CFG applyHeuristic cfg Nothing = cfg @@ -1101,7 +1101,7 @@ staticBranchPrediction _root (LoopInfo l_backEdges loopLevels l_loops) cfg = (m,not_m) = partition (\succ -> S.member (node, fst succ) backedges) successors -- Heuristics return nothing if they don't say anything about this branch - -- or Just (prob_s1) where prob_s1 is the likelyhood for s1 to be the + -- or Just (prob_s1) where prob_s1 is the likelihood for s1 to be the -- taken branch. s1 is the branch in the true case. -- Loop exit heuristic. diff --git a/compiler/nativeGen/Dwarf/Types.hs b/compiler/nativeGen/Dwarf/Types.hs index 9386117386..a6ba596f35 100644 --- a/compiler/nativeGen/Dwarf/Types.hs +++ b/compiler/nativeGen/Dwarf/Types.hs @@ -284,7 +284,7 @@ instance Outputable DwarfFrameBlock where ppr (DwarfFrameBlock hasInfo unwinds) = braces $ ppr hasInfo <+> ppr unwinds -- | Header for the @.debug_frame@ section. Here we emit the "Common --- Information Entry" record that etablishes general call frame +-- Information Entry" record that establishes general call frame -- parameters and the default stack layout. pprDwarfFrame :: DwarfFrame -> SDoc pprDwarfFrame DwarfFrame{dwCieLabel=cieLabel,dwCieInit=cieInit,dwCieProcs=procs} diff --git a/compiler/nativeGen/PPC/Ppr.hs b/compiler/nativeGen/PPC/Ppr.hs index 0c3aaccda2..ea0b36fb64 100644 --- a/compiler/nativeGen/PPC/Ppr.hs +++ b/compiler/nativeGen/PPC/Ppr.hs @@ -846,7 +846,7 @@ pprInstr (FCMP reg1 reg2) = hcat [ -- Note: we're using fcmpu, not fcmpo -- The difference is with fcmpo, compare with NaN is an invalid operation. -- We don't handle invalid fp ops, so we don't care. - -- Morever, we use `fcmpu 0, ...` rather than `fcmpu cr0, ...` for + -- Moreover, we use `fcmpu 0, ...` rather than `fcmpu cr0, ...` for -- better portability since some non-GNU assembler (such as -- IBM's `as`) tend not to support the symbolic register name cr0. -- This matches the syntax that GCC seems to emit for PPC targets. diff --git a/compiler/nativeGen/X86/CodeGen.hs b/compiler/nativeGen/X86/CodeGen.hs index 59a1e4115b..8cea28d920 100644 --- a/compiler/nativeGen/X86/CodeGen.hs +++ b/compiler/nativeGen/X86/CodeGen.hs @@ -1333,7 +1333,7 @@ x86_complex_amode :: CmmExpr -> CmmExpr -> Integer -> Integer -> NatM Amode x86_complex_amode base index shift offset = do (x_reg, x_code) <- getNonClobberedReg base -- x must be in a temp, because it has to stay live over y_code - -- we could compre x_reg and y_reg and do something better here... + -- we could compare x_reg and y_reg and do something better here... (y_reg, y_code) <- getSomeReg index let code = x_code `appOL` y_code |
