diff options
Diffstat (limited to 'compiler/deSugar')
-rw-r--r-- | compiler/deSugar/Check.lhs | 1458 | ||||
-rw-r--r-- | compiler/deSugar/Coverage.lhs | 26 | ||||
-rw-r--r-- | compiler/deSugar/DsListComp.lhs | 199 |
3 files changed, 809 insertions, 874 deletions
diff --git a/compiler/deSugar/Check.lhs b/compiler/deSugar/Check.lhs index 2432051c7b..94f0a39c4f 100644 --- a/compiler/deSugar/Check.lhs +++ b/compiler/deSugar/Check.lhs @@ -1,728 +1,730 @@ -% -% (c) The University of Glasgow 2006 -% (c) The GRASP/AQUA Project, Glasgow University, 1997-1998 -% -% Author: Juan J. Quintela <quintela@krilin.dc.fi.udc.es> - -\begin{code} -{-# OPTIONS -fno-warn-incomplete-patterns #-} --- The above warning supression flag is a temporary kludge. --- While working on this module you are encouraged to remove it and fix --- any warnings in the module. See --- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings --- for details - -module Check ( check , ExhaustivePat ) where - -#include "HsVersions.h" - -import HsSyn -import TcHsSyn -import DsUtils -import MatchLit -import Id -import DataCon -import Name -import TysWiredIn -import PrelNames -import TyCon -import Type -import Unify( dataConCannotMatch ) -import SrcLoc -import UniqSet -import Util -import Outputable -import FastString -\end{code} - -This module performs checks about if one list of equations are: -\begin{itemize} -\item Overlapped -\item Non exhaustive -\end{itemize} -To discover that we go through the list of equations in a tree-like fashion. - -If you like theory, a similar algorithm is described in: -\begin{quotation} - {\em Two Techniques for Compiling Lazy Pattern Matching}, - Luc Maranguet, - INRIA Rocquencourt (RR-2385, 1994) -\end{quotation} -The algorithm is based on the first technique, but there are some differences: -\begin{itemize} -\item We don't generate code -\item We have constructors and literals (not only literals as in the - article) -\item We don't use directions, we must select the columns from - left-to-right -\end{itemize} -(By the way the second technique is really similar to the one used in - @Match.lhs@ to generate code) - -This function takes the equations of a pattern and returns: -\begin{itemize} -\item The patterns that are not recognized -\item The equations that are not overlapped -\end{itemize} -It simplify the patterns and then call @check'@ (the same semantics), and it -needs to reconstruct the patterns again .... - -The problem appear with things like: -\begin{verbatim} - f [x,y] = .... - f (x:xs) = ..... -\end{verbatim} -We want to put the two patterns with the same syntax, (prefix form) and -then all the constructors are equal: -\begin{verbatim} - f (: x (: y [])) = .... - f (: x xs) = ..... -\end{verbatim} -(more about that in @tidy_eqns@) - -We would prefer to have a @WarningPat@ of type @String@, but Strings and the -Pretty Printer are not friends. - -We use @InPat@ in @WarningPat@ instead of @OutPat@ -because we need to print the -warning messages in the same way they are introduced, i.e. if the user -wrote: -\begin{verbatim} - f [x,y] = .. -\end{verbatim} -He don't want a warning message written: -\begin{verbatim} - f (: x (: y [])) ........ -\end{verbatim} -Then we need to use InPats. -\begin{quotation} - Juan Quintela 5 JUL 1998\\ - User-friendliness and compiler writers are no friends. -\end{quotation} - -\begin{code} -type WarningPat = InPat Name -type ExhaustivePat = ([WarningPat], [(Name, [HsLit])]) -type EqnNo = Int -type EqnSet = UniqSet EqnNo - - -check :: [EquationInfo] -> ([ExhaustivePat], [EquationInfo]) - -- Second result is the shadowed equations - -- if there are view patterns, just give up - don't know what the function is -check qs = (untidy_warns, shadowed_eqns) - where - (warns, used_nos) = check' ([1..] `zip` map tidy_eqn qs) - untidy_warns = map untidy_exhaustive warns - shadowed_eqns = [eqn | (eqn,i) <- qs `zip` [1..], - not (i `elementOfUniqSet` used_nos)] - -untidy_exhaustive :: ExhaustivePat -> ExhaustivePat -untidy_exhaustive ([pat], messages) = - ([untidy_no_pars pat], map untidy_message messages) -untidy_exhaustive (pats, messages) = - (map untidy_pars pats, map untidy_message messages) - -untidy_message :: (Name, [HsLit]) -> (Name, [HsLit]) -untidy_message (string, lits) = (string, map untidy_lit lits) -\end{code} - -The function @untidy@ does the reverse work of the @tidy_pat@ funcion. - -\begin{code} - -type NeedPars = Bool - -untidy_no_pars :: WarningPat -> WarningPat -untidy_no_pars p = untidy False p - -untidy_pars :: WarningPat -> WarningPat -untidy_pars p = untidy True p - -untidy :: NeedPars -> WarningPat -> WarningPat -untidy b (L loc p) = L loc (untidy' b p) - where - untidy' _ p@(WildPat _) = p - untidy' _ p@(VarPat _) = p - untidy' _ (LitPat lit) = LitPat (untidy_lit lit) - untidy' _ p@(ConPatIn _ (PrefixCon [])) = p - untidy' b (ConPatIn name ps) = pars b (L loc (ConPatIn name (untidy_con ps))) - untidy' _ (ListPat pats ty) = ListPat (map untidy_no_pars pats) ty - untidy' _ (TuplePat pats box ty) = TuplePat (map untidy_no_pars pats) box ty - untidy' _ (PArrPat _ _) = panic "Check.untidy: Shouldn't get a parallel array here!" - untidy' _ (SigPatIn _ _) = panic "Check.untidy: SigPat" - -untidy_con :: HsConPatDetails Name -> HsConPatDetails Name -untidy_con (PrefixCon pats) = PrefixCon (map untidy_pars pats) -untidy_con (InfixCon p1 p2) = InfixCon (untidy_pars p1) (untidy_pars p2) -untidy_con (RecCon (HsRecFields flds dd)) - = RecCon (HsRecFields [ fld { hsRecFieldArg = untidy_pars (hsRecFieldArg fld) } - | fld <- flds ] dd) - -pars :: NeedPars -> WarningPat -> Pat Name -pars True p = ParPat p -pars _ p = unLoc p - -untidy_lit :: HsLit -> HsLit -untidy_lit (HsCharPrim c) = HsChar c -untidy_lit lit = lit -\end{code} - -This equation is the same that check, the only difference is that the -boring work is done, that work needs to be done only once, this is -the reason top have two functions, check is the external interface, -@check'@ is called recursively. - -There are several cases: - -\begin{itemize} -\item There are no equations: Everything is OK. -\item There are only one equation, that can fail, and all the patterns are - variables. Then that equation is used and the same equation is - non-exhaustive. -\item All the patterns are variables, and the match can fail, there are - more equations then the results is the result of the rest of equations - and this equation is used also. - -\item The general case, if all the patterns are variables (here the match - can't fail) then the result is that this equation is used and this - equation doesn't generate non-exhaustive cases. - -\item In the general case, there can exist literals ,constructors or only - vars in the first column, we actuate in consequence. - -\end{itemize} - - -\begin{code} - -check' :: [(EqnNo, EquationInfo)] - -> ([ExhaustivePat], -- Pattern scheme that might not be matched at all - EqnSet) -- Eqns that are used (others are overlapped) - -check' [] = ([([],[])],emptyUniqSet) - -check' ((n, EqnInfo { eqn_pats = ps, eqn_rhs = MatchResult can_fail _ }) : rs) - | first_eqn_all_vars && case can_fail of { CantFail -> True; CanFail -> False } - = ([], unitUniqSet n) -- One eqn, which can't fail - - | first_eqn_all_vars && null rs -- One eqn, but it can fail - = ([(takeList ps (repeat nlWildPat),[])], unitUniqSet n) - - | first_eqn_all_vars -- Several eqns, first can fail - = (pats, addOneToUniqSet indexs n) - where - first_eqn_all_vars = all_vars ps - (pats,indexs) = check' rs - -check' qs - | some_literals = split_by_literals qs - | some_constructors = split_by_constructor qs - | only_vars = first_column_only_vars qs - | otherwise = pprPanic "Check.check': Not implemented :-(" (ppr first_pats) - -- Shouldn't happen - where - -- Note: RecPats will have been simplified to ConPats - -- at this stage. - first_pats = ASSERT2( okGroup qs, pprGroup qs ) map firstPatN qs - some_constructors = any is_con first_pats - some_literals = any is_lit first_pats - only_vars = all is_var first_pats -\end{code} - -Here begins the code to deal with literals, we need to split the matrix -in different matrix beginning by each literal and a last matrix with the -rest of values. - -\begin{code} -split_by_literals :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet) -split_by_literals qs = process_literals used_lits qs - where - used_lits = get_used_lits qs -\end{code} - -@process_explicit_literals@ is a function that process each literal that appears -in the column of the matrix. - -\begin{code} -process_explicit_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet) -process_explicit_literals lits qs = (concat pats, unionManyUniqSets indexs) - where - pats_indexs = map (\x -> construct_literal_matrix x qs) lits - (pats,indexs) = unzip pats_indexs -\end{code} - - -@process_literals@ calls @process_explicit_literals@ to deal with the literals -that appears in the matrix and deal also with the rest of the cases. It -must be one Variable to be complete. - -\begin{code} - -process_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet) -process_literals used_lits qs - | null default_eqns = ASSERT( not (null qs) ) ([make_row_vars used_lits (head qs)] ++ pats,indexs) - | otherwise = (pats_default,indexs_default) - where - (pats,indexs) = process_explicit_literals used_lits qs - default_eqns = ASSERT2( okGroup qs, pprGroup qs ) - [remove_var q | q <- qs, is_var (firstPatN q)] - (pats',indexs') = check' default_eqns - pats_default = [(nlWildPat:ps,constraints) | (ps,constraints) <- (pats')] ++ pats - indexs_default = unionUniqSets indexs' indexs -\end{code} - -Here we have selected the literal and we will select all the equations that -begins for that literal and create a new matrix. - -\begin{code} -construct_literal_matrix :: HsLit -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet) -construct_literal_matrix lit qs = - (map (\ (xs,ys) -> (new_lit:xs,ys)) pats,indexs) - where - (pats,indexs) = (check' (remove_first_column_lit lit qs)) - new_lit = nlLitPat lit - -remove_first_column_lit :: HsLit - -> [(EqnNo, EquationInfo)] - -> [(EqnNo, EquationInfo)] -remove_first_column_lit lit qs - = ASSERT2( okGroup qs, pprGroup qs ) - [(n, shift_pat eqn) | q@(n,eqn) <- qs, is_var_lit lit (firstPatN q)] - where - shift_pat eqn@(EqnInfo { eqn_pats = _:ps}) = eqn { eqn_pats = ps } - shift_pat _ = panic "Check.shift_var: no patterns" -\end{code} - -This function splits the equations @qs@ in groups that deal with the -same constructor. - -\begin{code} -split_by_constructor :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet) -split_by_constructor qs - | notNull unused_cons = need_default_case used_cons unused_cons qs - | otherwise = no_need_default_case used_cons qs - where - used_cons = get_used_cons qs - unused_cons = get_unused_cons used_cons -\end{code} - -The first column of the patterns matrix only have vars, then there is -nothing to do. - -\begin{code} -first_column_only_vars :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet) -first_column_only_vars qs = (map (\ (xs,ys) -> (nlWildPat:xs,ys)) pats,indexs) - where - (pats, indexs) = check' (map remove_var qs) -\end{code} - -This equation takes a matrix of patterns and split the equations by -constructor, using all the constructors that appears in the first column -of the pattern matching. - -We can need a default clause or not ...., it depends if we used all the -constructors or not explicitly. The reasoning is similar to @process_literals@, -the difference is that here the default case is not always needed. - -\begin{code} -no_need_default_case :: [Pat Id] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet) -no_need_default_case cons qs = (concat pats, unionManyUniqSets indexs) - where - pats_indexs = map (\x -> construct_matrix x qs) cons - (pats,indexs) = unzip pats_indexs - -need_default_case :: [Pat Id] -> [DataCon] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet) -need_default_case used_cons unused_cons qs - | null default_eqns = (pats_default_no_eqns,indexs) - | otherwise = (pats_default,indexs_default) - where - (pats,indexs) = no_need_default_case used_cons qs - default_eqns = ASSERT2( okGroup qs, pprGroup qs ) - [remove_var q | q <- qs, is_var (firstPatN q)] - (pats',indexs') = check' default_eqns - pats_default = [(make_whole_con c:ps,constraints) | - c <- unused_cons, (ps,constraints) <- pats'] ++ pats - new_wilds = ASSERT( not (null qs) ) make_row_vars_for_constructor (head qs) - pats_default_no_eqns = [(make_whole_con c:new_wilds,[]) | c <- unused_cons] ++ pats - indexs_default = unionUniqSets indexs' indexs - -construct_matrix :: Pat Id -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet) -construct_matrix con qs = - (map (make_con con) pats,indexs) - where - (pats,indexs) = (check' (remove_first_column con qs)) -\end{code} - -Here remove first column is more difficult that with literals due to the fact -that constructors can have arguments. - -For instance, the matrix -\begin{verbatim} - (: x xs) y - z y -\end{verbatim} -is transformed in: -\begin{verbatim} - x xs y - _ _ y -\end{verbatim} - -\begin{code} -remove_first_column :: Pat Id -- Constructor - -> [(EqnNo, EquationInfo)] - -> [(EqnNo, EquationInfo)] -remove_first_column (ConPatOut{ pat_con = L _ con, pat_args = PrefixCon con_pats }) qs - = ASSERT2( okGroup qs, pprGroup qs ) - [(n, shift_var eqn) | q@(n, eqn) <- qs, is_var_con con (firstPatN q)] - where - new_wilds = [WildPat (hsLPatType arg_pat) | arg_pat <- con_pats] - shift_var eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_args = PrefixCon ps' } : ps}) - = eqn { eqn_pats = map unLoc ps' ++ ps } - shift_var eqn@(EqnInfo { eqn_pats = WildPat _ : ps }) - = eqn { eqn_pats = new_wilds ++ ps } - shift_var _ = panic "Check.Shift_var:No done" - -make_row_vars :: [HsLit] -> (EqnNo, EquationInfo) -> ExhaustivePat -make_row_vars used_lits (_, EqnInfo { eqn_pats = pats}) - = (nlVarPat new_var:takeList (tail pats) (repeat nlWildPat),[(new_var,used_lits)]) - where - new_var = hash_x - -hash_x :: Name -hash_x = mkInternalName unboundKey {- doesn't matter much -} - (mkVarOccFS (fsLit "#x")) - noSrcSpan - -make_row_vars_for_constructor :: (EqnNo, EquationInfo) -> [WarningPat] -make_row_vars_for_constructor (_, EqnInfo { eqn_pats = pats}) - = takeList (tail pats) (repeat nlWildPat) - -compare_cons :: Pat Id -> Pat Id -> Bool -compare_cons (ConPatOut{ pat_con = L _ id1 }) (ConPatOut { pat_con = L _ id2 }) = id1 == id2 - -remove_dups :: [Pat Id] -> [Pat Id] -remove_dups [] = [] -remove_dups (x:xs) | or (map (\y -> compare_cons x y) xs) = remove_dups xs - | otherwise = x : remove_dups xs - -get_used_cons :: [(EqnNo, EquationInfo)] -> [Pat Id] -get_used_cons qs = remove_dups [pat | q <- qs, let pat = firstPatN q, - isConPatOut pat] - -isConPatOut :: Pat Id -> Bool -isConPatOut (ConPatOut {}) = True -isConPatOut _ = False - -remove_dups' :: [HsLit] -> [HsLit] -remove_dups' [] = [] -remove_dups' (x:xs) | x `elem` xs = remove_dups' xs - | otherwise = x : remove_dups' xs - - -get_used_lits :: [(EqnNo, EquationInfo)] -> [HsLit] -get_used_lits qs = remove_dups' all_literals - where - all_literals = get_used_lits' qs - -get_used_lits' :: [(EqnNo, EquationInfo)] -> [HsLit] -get_used_lits' [] = [] -get_used_lits' (q:qs) - | Just lit <- get_lit (firstPatN q) = lit : get_used_lits' qs - | otherwise = get_used_lits qs - -get_lit :: Pat id -> Maybe HsLit --- Get a representative HsLit to stand for the OverLit --- It doesn't matter which one, because they will only be compared --- with other HsLits gotten in the same way -get_lit (LitPat lit) = Just lit -get_lit (NPat (OverLit { ol_val = HsIntegral i}) mb _) = Just (HsIntPrim (mb_neg mb i)) -get_lit (NPat (OverLit { ol_val = HsFractional f }) mb _) = Just (HsFloatPrim (mb_neg mb f)) -get_lit (NPat (OverLit { ol_val = HsIsString s }) _ _) = Just (HsStringPrim s) -get_lit _ = Nothing - -mb_neg :: Num a => Maybe b -> a -> a -mb_neg Nothing v = v -mb_neg (Just _) v = -v - -get_unused_cons :: [Pat Id] -> [DataCon] -get_unused_cons used_cons = ASSERT( not (null used_cons) ) unused_cons - where - used_set :: UniqSet DataCon - used_set = mkUniqSet [d | ConPatOut{ pat_con = L _ d} <- used_cons] - (ConPatOut { pat_ty = ty }) = head used_cons - Just (ty_con, inst_tys) = splitTyConApp_maybe ty - unused_cons = filterOut is_used (tyConDataCons ty_con) - is_used con = con `elementOfUniqSet` used_set - || dataConCannotMatch inst_tys con - -all_vars :: [Pat Id] -> Bool -all_vars [] = True -all_vars (WildPat _:ps) = all_vars ps -all_vars _ = False - -remove_var :: (EqnNo, EquationInfo) -> (EqnNo, EquationInfo) -remove_var (n, eqn@(EqnInfo { eqn_pats = WildPat _ : ps})) = (n, eqn { eqn_pats = ps }) -remove_var _ = panic "Check.remove_var: equation does not begin with a variable" - ------------------------ -eqnPats :: (EqnNo, EquationInfo) -> [Pat Id] -eqnPats (_, eqn) = eqn_pats eqn - -okGroup :: [(EqnNo, EquationInfo)] -> Bool --- True if all equations have at least one pattern, and --- all have the same number of patterns -okGroup [] = True -okGroup (e:es) = n_pats > 0 && and [length (eqnPats e) == n_pats | e <- es] - where - n_pats = length (eqnPats e) - --- Half-baked print -pprGroup :: [(EqnNo, EquationInfo)] -> SDoc -pprEqnInfo :: (EqnNo, EquationInfo) -> SDoc -pprGroup es = vcat (map pprEqnInfo es) -pprEqnInfo e = ppr (eqnPats e) - - -firstPatN :: (EqnNo, EquationInfo) -> Pat Id -firstPatN (_, eqn) = firstPat eqn - -is_con :: Pat Id -> Bool -is_con (ConPatOut {}) = True -is_con _ = False - -is_lit :: Pat Id -> Bool -is_lit (LitPat _) = True -is_lit (NPat _ _ _) = True -is_lit _ = False - -is_var :: Pat Id -> Bool -is_var (WildPat _) = True -is_var _ = False - -is_var_con :: DataCon -> Pat Id -> Bool -is_var_con _ (WildPat _) = True -is_var_con con (ConPatOut{ pat_con = L _ id }) | id == con = True -is_var_con _ _ = False - -is_var_lit :: HsLit -> Pat Id -> Bool -is_var_lit _ (WildPat _) = True -is_var_lit lit pat - | Just lit' <- get_lit pat = lit == lit' - | otherwise = False -\end{code} - -The difference beteewn @make_con@ and @make_whole_con@ is that -@make_wole_con@ creates a new constructor with all their arguments, and -@make_con@ takes a list of argumntes, creates the contructor getting their -arguments from the list. See where \fbox{\ ???\ } are used for details. - -We need to reconstruct the patterns (make the constructors infix and -similar) at the same time that we create the constructors. - -You can tell tuple constructors using -\begin{verbatim} - Id.isTupleCon -\end{verbatim} -You can see if one constructor is infix with this clearer code :-)))))))))) -\begin{verbatim} - Lex.isLexConSym (Name.occNameString (Name.getOccName con)) -\end{verbatim} - - Rather clumsy but it works. (Simon Peyton Jones) - - -We don't mind the @nilDataCon@ because it doesn't change the way to -print the messsage, we are searching only for things like: @[1,2,3]@, -not @x:xs@ .... - -In @reconstruct_pat@ we want to ``undo'' the work -that we have done in @tidy_pat@. -In particular: -\begin{tabular}{lll} - @((,) x y)@ & returns to be & @(x, y)@ -\\ @((:) x xs)@ & returns to be & @(x:xs)@ -\\ @(x:(...:[])@ & returns to be & @[x,...]@ -\end{tabular} -% -The difficult case is the third one becouse we need to follow all the -contructors until the @[]@ to know that we need to use the second case, -not the second. \fbox{\ ???\ } -% -\begin{code} -isInfixCon :: DataCon -> Bool -isInfixCon con = isDataSymOcc (getOccName con) - -is_nil :: Pat Name -> Bool -is_nil (ConPatIn con (PrefixCon [])) = unLoc con == getName nilDataCon -is_nil _ = False - -is_list :: Pat Name -> Bool -is_list (ListPat _ _) = True -is_list _ = False - -return_list :: DataCon -> Pat Name -> Bool -return_list id q = id == consDataCon && (is_nil q || is_list q) - -make_list :: LPat Name -> Pat Name -> Pat Name -make_list p q | is_nil q = ListPat [p] placeHolderType -make_list p (ListPat ps ty) = ListPat (p:ps) ty -make_list _ _ = panic "Check.make_list: Invalid argument" - -make_con :: Pat Id -> ExhaustivePat -> ExhaustivePat -make_con (ConPatOut{ pat_con = L _ id }) (lp:lq:ps, constraints) - | return_list id q = (noLoc (make_list lp q) : ps, constraints) - | isInfixCon id = (nlInfixConPat (getName id) lp lq : ps, constraints) - where q = unLoc lq - -make_con (ConPatOut{ pat_con = L _ id, pat_args = PrefixCon pats, pat_ty = ty }) (ps, constraints) - | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) ty) : rest_pats, constraints) - | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints) - | otherwise = (nlConPat name pats_con : rest_pats, constraints) - where - name = getName id - (pats_con, rest_pats) = splitAtList pats ps - tc = dataConTyCon id - --- reconstruct parallel array pattern --- --- * don't check for the type only; we need to make sure that we are really --- dealing with one of the fake constructors and not with the real --- representation - -make_whole_con :: DataCon -> WarningPat -make_whole_con con | isInfixCon con = nlInfixConPat name nlWildPat nlWildPat - | otherwise = nlConPat name pats - where - name = getName con - pats = [nlWildPat | _ <- dataConOrigArgTys con] -\end{code} - ------------------------------------------------------------------------- - Tidying equations ------------------------------------------------------------------------- - -tidy_eqn does more or less the same thing as @tidy@ in @Match.lhs@; -that is, it removes syntactic sugar, reducing the number of cases that -must be handled by the main checking algorithm. One difference is -that here we can do *all* the tidying at once (recursively), rather -than doing it incrementally. - -\begin{code} -tidy_eqn :: EquationInfo -> EquationInfo -tidy_eqn eqn = eqn { eqn_pats = map tidy_pat (eqn_pats eqn), - eqn_rhs = tidy_rhs (eqn_rhs eqn) } - where - -- Horrible hack. The tidy_pat stuff converts "might-fail" patterns to - -- WildPats which of course loses the info that they can fail to match. - -- So we stick in a CanFail as if it were a guard. - tidy_rhs (MatchResult can_fail body) - | any might_fail_pat (eqn_pats eqn) = MatchResult CanFail body - | otherwise = MatchResult can_fail body - --------------- -might_fail_pat :: Pat Id -> Bool --- Returns True of patterns that might fail (i.e. fall through) in a way --- that is not covered by the checking algorithm. Specifically: --- NPlusKPat --- ViewPat (if refutable) - --- First the two special cases -might_fail_pat (NPlusKPat {}) = True -might_fail_pat (ViewPat _ p _) = not (isIrrefutableHsPat p) - --- Now the recursive stuff -might_fail_pat (ParPat p) = might_fail_lpat p -might_fail_pat (AsPat _ p) = might_fail_lpat p -might_fail_pat (SigPatOut p _ ) = might_fail_lpat p -might_fail_pat (ListPat ps _) = any might_fail_lpat ps -might_fail_pat (TuplePat ps _ _) = any might_fail_lpat ps -might_fail_pat (PArrPat ps _) = any might_fail_lpat ps -might_fail_pat (BangPat p) = might_fail_lpat p -might_fail_pat (ConPatOut { pat_args = ps }) = any might_fail_lpat (hsConPatArgs ps) - --- Finally the ones that are sure to succeed, or which are covered by the checking algorithm -might_fail_pat (LazyPat _) = False -- Always succeeds -might_fail_pat _ = False -- VarPat, WildPat, LitPat, NPat, TypePat - --------------- -might_fail_lpat :: LPat Id -> Bool -might_fail_lpat (L _ p) = might_fail_pat p - --------------- -tidy_lpat :: LPat Id -> LPat Id -tidy_lpat p = fmap tidy_pat p - --------------- -tidy_pat :: Pat Id -> Pat Id -tidy_pat pat@(WildPat _) = pat -tidy_pat (VarPat id) = WildPat (idType id) -tidy_pat (ParPat p) = tidy_pat (unLoc p) -tidy_pat (LazyPat p) = WildPat (hsLPatType p) -- For overlap and exhaustiveness checking - -- purposes, a ~pat is like a wildcard -tidy_pat (BangPat p) = tidy_pat (unLoc p) -tidy_pat (AsPat _ p) = tidy_pat (unLoc p) -tidy_pat (SigPatOut p _) = tidy_pat (unLoc p) -tidy_pat (CoPat _ pat _) = tidy_pat pat - --- These two are might_fail patterns, so we map them to --- WildPats. The might_fail_pat stuff arranges that the --- guard says "this equation might fall through". -tidy_pat (NPlusKPat id _ _ _) = WildPat (idType (unLoc id)) -tidy_pat (ViewPat _ _ ty) = WildPat ty - -tidy_pat (NPat lit mb_neg eq) = tidyNPat lit mb_neg eq - -tidy_pat pat@(ConPatOut { pat_con = L _ id, pat_args = ps }) - = pat { pat_args = tidy_con id ps } - -tidy_pat (ListPat ps ty) - = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] list_ty) - (mkNilPat list_ty) - (map tidy_lpat ps) - where list_ty = mkListTy ty - --- introduce fake parallel array constructors to be able to handle parallel --- arrays with the existing machinery for constructor pattern --- -tidy_pat (PArrPat ps ty) - = unLoc $ mkPrefixConPat (parrFakeCon (length ps)) - (map tidy_lpat ps) - (mkPArrTy ty) - -tidy_pat (TuplePat ps boxity ty) - = unLoc $ mkPrefixConPat (tupleCon boxity arity) - (map tidy_lpat ps) ty - where - arity = length ps - --- Unpack string patterns fully, so we can see when they overlap with --- each other, or even explicit lists of Chars. -tidy_pat (LitPat lit) - | HsString s <- lit - = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mk_char_lit c, pat] stringTy) - (mkPrefixConPat nilDataCon [] stringTy) (unpackFS s) - | otherwise - = tidyLitPat lit - where - mk_char_lit c = mkPrefixConPat charDataCon [nlLitPat (HsCharPrim c)] charTy - ------------------ -tidy_con :: DataCon -> HsConPatDetails Id -> HsConPatDetails Id -tidy_con _ (PrefixCon ps) = PrefixCon (map tidy_lpat ps) -tidy_con _ (InfixCon p1 p2) = PrefixCon [tidy_lpat p1, tidy_lpat p2] -tidy_con con (RecCon (HsRecFields fs _)) - | null fs = PrefixCon [nlWildPat | _ <- dataConOrigArgTys con] - -- Special case for null patterns; maybe not a record at all - | otherwise = PrefixCon (map (tidy_lpat.snd) all_pats) - where - -- pad out all the missing fields with WildPats. - field_pats = map (\ f -> (f, nlWildPat)) (dataConFieldLabels con) - all_pats = foldr (\(HsRecField id p _) acc -> insertNm (getName (unLoc id)) p acc) - field_pats fs - - insertNm nm p [] = [(nm,p)] - insertNm nm p (x@(n,_):xs) - | nm == n = (nm,p):xs - | otherwise = x : insertNm nm p xs -\end{code} +%
+% (c) The University of Glasgow 2006
+% (c) The GRASP/AQUA Project, Glasgow University, 1997-1998
+%
+% Author: Juan J. Quintela <quintela@krilin.dc.fi.udc.es>
+
+\begin{code}
+{-# OPTIONS -fno-warn-incomplete-patterns #-}
+-- The above warning supression flag is a temporary kludge.
+-- While working on this module you are encouraged to remove it and fix
+-- any warnings in the module. See
+-- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings
+-- for details
+
+module Check ( check , ExhaustivePat ) where
+
+#include "HsVersions.h"
+
+import HsSyn
+import TcHsSyn
+import DsUtils
+import MatchLit
+import Id
+import DataCon
+import Name
+import TysWiredIn
+import PrelNames
+import TyCon
+import Type
+import Unify( dataConCannotMatch )
+import SrcLoc
+import UniqSet
+import Util
+import Outputable
+import FastString
+\end{code}
+
+This module performs checks about if one list of equations are:
+\begin{itemize}
+\item Overlapped
+\item Non exhaustive
+\end{itemize}
+To discover that we go through the list of equations in a tree-like fashion.
+
+If you like theory, a similar algorithm is described in:
+\begin{quotation}
+ {\em Two Techniques for Compiling Lazy Pattern Matching},
+ Luc Maranguet,
+ INRIA Rocquencourt (RR-2385, 1994)
+\end{quotation}
+The algorithm is based on the first technique, but there are some differences:
+\begin{itemize}
+\item We don't generate code
+\item We have constructors and literals (not only literals as in the
+ article)
+\item We don't use directions, we must select the columns from
+ left-to-right
+\end{itemize}
+(By the way the second technique is really similar to the one used in
+ @Match.lhs@ to generate code)
+
+This function takes the equations of a pattern and returns:
+\begin{itemize}
+\item The patterns that are not recognized
+\item The equations that are not overlapped
+\end{itemize}
+It simplify the patterns and then call @check'@ (the same semantics), and it
+needs to reconstruct the patterns again ....
+
+The problem appear with things like:
+\begin{verbatim}
+ f [x,y] = ....
+ f (x:xs) = .....
+\end{verbatim}
+We want to put the two patterns with the same syntax, (prefix form) and
+then all the constructors are equal:
+\begin{verbatim}
+ f (: x (: y [])) = ....
+ f (: x xs) = .....
+\end{verbatim}
+(more about that in @tidy_eqns@)
+
+We would prefer to have a @WarningPat@ of type @String@, but Strings and the
+Pretty Printer are not friends.
+
+We use @InPat@ in @WarningPat@ instead of @OutPat@
+because we need to print the
+warning messages in the same way they are introduced, i.e. if the user
+wrote:
+\begin{verbatim}
+ f [x,y] = ..
+\end{verbatim}
+He don't want a warning message written:
+\begin{verbatim}
+ f (: x (: y [])) ........
+\end{verbatim}
+Then we need to use InPats.
+\begin{quotation}
+ Juan Quintela 5 JUL 1998\\
+ User-friendliness and compiler writers are no friends.
+\end{quotation}
+
+\begin{code}
+type WarningPat = InPat Name
+type ExhaustivePat = ([WarningPat], [(Name, [HsLit])])
+type EqnNo = Int
+type EqnSet = UniqSet EqnNo
+
+
+check :: [EquationInfo] -> ([ExhaustivePat], [EquationInfo])
+ -- Second result is the shadowed equations
+ -- if there are view patterns, just give up - don't know what the function is
+check qs = pprTrace "check" (ppr tidy_qs) $
+ (untidy_warns, shadowed_eqns)
+ where
+ tidy_qs = map tidy_eqn qs
+ (warns, used_nos) = check' ([1..] `zip` tidy_qs)
+ untidy_warns = map untidy_exhaustive warns
+ shadowed_eqns = [eqn | (eqn,i) <- qs `zip` [1..],
+ not (i `elementOfUniqSet` used_nos)]
+
+untidy_exhaustive :: ExhaustivePat -> ExhaustivePat
+untidy_exhaustive ([pat], messages) =
+ ([untidy_no_pars pat], map untidy_message messages)
+untidy_exhaustive (pats, messages) =
+ (map untidy_pars pats, map untidy_message messages)
+
+untidy_message :: (Name, [HsLit]) -> (Name, [HsLit])
+untidy_message (string, lits) = (string, map untidy_lit lits)
+\end{code}
+
+The function @untidy@ does the reverse work of the @tidy_pat@ funcion.
+
+\begin{code}
+
+type NeedPars = Bool
+
+untidy_no_pars :: WarningPat -> WarningPat
+untidy_no_pars p = untidy False p
+
+untidy_pars :: WarningPat -> WarningPat
+untidy_pars p = untidy True p
+
+untidy :: NeedPars -> WarningPat -> WarningPat
+untidy b (L loc p) = L loc (untidy' b p)
+ where
+ untidy' _ p@(WildPat _) = p
+ untidy' _ p@(VarPat _) = p
+ untidy' _ (LitPat lit) = LitPat (untidy_lit lit)
+ untidy' _ p@(ConPatIn _ (PrefixCon [])) = p
+ untidy' b (ConPatIn name ps) = pars b (L loc (ConPatIn name (untidy_con ps)))
+ untidy' _ (ListPat pats ty) = ListPat (map untidy_no_pars pats) ty
+ untidy' _ (TuplePat pats box ty) = TuplePat (map untidy_no_pars pats) box ty
+ untidy' _ (PArrPat _ _) = panic "Check.untidy: Shouldn't get a parallel array here!"
+ untidy' _ (SigPatIn _ _) = panic "Check.untidy: SigPat"
+
+untidy_con :: HsConPatDetails Name -> HsConPatDetails Name
+untidy_con (PrefixCon pats) = PrefixCon (map untidy_pars pats)
+untidy_con (InfixCon p1 p2) = InfixCon (untidy_pars p1) (untidy_pars p2)
+untidy_con (RecCon (HsRecFields flds dd))
+ = RecCon (HsRecFields [ fld { hsRecFieldArg = untidy_pars (hsRecFieldArg fld) }
+ | fld <- flds ] dd)
+
+pars :: NeedPars -> WarningPat -> Pat Name
+pars True p = ParPat p
+pars _ p = unLoc p
+
+untidy_lit :: HsLit -> HsLit
+untidy_lit (HsCharPrim c) = HsChar c
+untidy_lit lit = lit
+\end{code}
+
+This equation is the same that check, the only difference is that the
+boring work is done, that work needs to be done only once, this is
+the reason top have two functions, check is the external interface,
+@check'@ is called recursively.
+
+There are several cases:
+
+\begin{itemize}
+\item There are no equations: Everything is OK.
+\item There are only one equation, that can fail, and all the patterns are
+ variables. Then that equation is used and the same equation is
+ non-exhaustive.
+\item All the patterns are variables, and the match can fail, there are
+ more equations then the results is the result of the rest of equations
+ and this equation is used also.
+
+\item The general case, if all the patterns are variables (here the match
+ can't fail) then the result is that this equation is used and this
+ equation doesn't generate non-exhaustive cases.
+
+\item In the general case, there can exist literals ,constructors or only
+ vars in the first column, we actuate in consequence.
+
+\end{itemize}
+
+
+\begin{code}
+
+check' :: [(EqnNo, EquationInfo)]
+ -> ([ExhaustivePat], -- Pattern scheme that might not be matched at all
+ EqnSet) -- Eqns that are used (others are overlapped)
+
+check' [] = ([([],[])],emptyUniqSet)
+
+check' ((n, EqnInfo { eqn_pats = ps, eqn_rhs = MatchResult can_fail _ }) : rs)
+ | first_eqn_all_vars && case can_fail of { CantFail -> True; CanFail -> False }
+ = ([], unitUniqSet n) -- One eqn, which can't fail
+
+ | first_eqn_all_vars && null rs -- One eqn, but it can fail
+ = ([(takeList ps (repeat nlWildPat),[])], unitUniqSet n)
+
+ | first_eqn_all_vars -- Several eqns, first can fail
+ = (pats, addOneToUniqSet indexs n)
+ where
+ first_eqn_all_vars = all_vars ps
+ (pats,indexs) = check' rs
+
+check' qs
+ | some_literals = split_by_literals qs
+ | some_constructors = split_by_constructor qs
+ | only_vars = first_column_only_vars qs
+ | otherwise = pprPanic "Check.check': Not implemented :-(" (ppr first_pats)
+ -- Shouldn't happen
+ where
+ -- Note: RecPats will have been simplified to ConPats
+ -- at this stage.
+ first_pats = ASSERT2( okGroup qs, pprGroup qs ) map firstPatN qs
+ some_constructors = any is_con first_pats
+ some_literals = any is_lit first_pats
+ only_vars = all is_var first_pats
+\end{code}
+
+Here begins the code to deal with literals, we need to split the matrix
+in different matrix beginning by each literal and a last matrix with the
+rest of values.
+
+\begin{code}
+split_by_literals :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)
+split_by_literals qs = process_literals used_lits qs
+ where
+ used_lits = get_used_lits qs
+\end{code}
+
+@process_explicit_literals@ is a function that process each literal that appears
+in the column of the matrix.
+
+\begin{code}
+process_explicit_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
+process_explicit_literals lits qs = (concat pats, unionManyUniqSets indexs)
+ where
+ pats_indexs = map (\x -> construct_literal_matrix x qs) lits
+ (pats,indexs) = unzip pats_indexs
+\end{code}
+
+
+@process_literals@ calls @process_explicit_literals@ to deal with the literals
+that appears in the matrix and deal also with the rest of the cases. It
+must be one Variable to be complete.
+
+\begin{code}
+
+process_literals :: [HsLit] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
+process_literals used_lits qs
+ | null default_eqns = ASSERT( not (null qs) ) ([make_row_vars used_lits (head qs)] ++ pats,indexs)
+ | otherwise = (pats_default,indexs_default)
+ where
+ (pats,indexs) = process_explicit_literals used_lits qs
+ default_eqns = ASSERT2( okGroup qs, pprGroup qs )
+ [remove_var q | q <- qs, is_var (firstPatN q)]
+ (pats',indexs') = check' default_eqns
+ pats_default = [(nlWildPat:ps,constraints) | (ps,constraints) <- (pats')] ++ pats
+ indexs_default = unionUniqSets indexs' indexs
+\end{code}
+
+Here we have selected the literal and we will select all the equations that
+begins for that literal and create a new matrix.
+
+\begin{code}
+construct_literal_matrix :: HsLit -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
+construct_literal_matrix lit qs =
+ (map (\ (xs,ys) -> (new_lit:xs,ys)) pats,indexs)
+ where
+ (pats,indexs) = (check' (remove_first_column_lit lit qs))
+ new_lit = nlLitPat lit
+
+remove_first_column_lit :: HsLit
+ -> [(EqnNo, EquationInfo)]
+ -> [(EqnNo, EquationInfo)]
+remove_first_column_lit lit qs
+ = ASSERT2( okGroup qs, pprGroup qs )
+ [(n, shift_pat eqn) | q@(n,eqn) <- qs, is_var_lit lit (firstPatN q)]
+ where
+ shift_pat eqn@(EqnInfo { eqn_pats = _:ps}) = eqn { eqn_pats = ps }
+ shift_pat _ = panic "Check.shift_var: no patterns"
+\end{code}
+
+This function splits the equations @qs@ in groups that deal with the
+same constructor.
+
+\begin{code}
+split_by_constructor :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat], EqnSet)
+split_by_constructor qs
+ | notNull unused_cons = need_default_case used_cons unused_cons qs
+ | otherwise = no_need_default_case used_cons qs
+ where
+ used_cons = get_used_cons qs
+ unused_cons = get_unused_cons used_cons
+\end{code}
+
+The first column of the patterns matrix only have vars, then there is
+nothing to do.
+
+\begin{code}
+first_column_only_vars :: [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
+first_column_only_vars qs = (map (\ (xs,ys) -> (nlWildPat:xs,ys)) pats,indexs)
+ where
+ (pats, indexs) = check' (map remove_var qs)
+\end{code}
+
+This equation takes a matrix of patterns and split the equations by
+constructor, using all the constructors that appears in the first column
+of the pattern matching.
+
+We can need a default clause or not ...., it depends if we used all the
+constructors or not explicitly. The reasoning is similar to @process_literals@,
+the difference is that here the default case is not always needed.
+
+\begin{code}
+no_need_default_case :: [Pat Id] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
+no_need_default_case cons qs = (concat pats, unionManyUniqSets indexs)
+ where
+ pats_indexs = map (\x -> construct_matrix x qs) cons
+ (pats,indexs) = unzip pats_indexs
+
+need_default_case :: [Pat Id] -> [DataCon] -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
+need_default_case used_cons unused_cons qs
+ | null default_eqns = (pats_default_no_eqns,indexs)
+ | otherwise = (pats_default,indexs_default)
+ where
+ (pats,indexs) = no_need_default_case used_cons qs
+ default_eqns = ASSERT2( okGroup qs, pprGroup qs )
+ [remove_var q | q <- qs, is_var (firstPatN q)]
+ (pats',indexs') = check' default_eqns
+ pats_default = [(make_whole_con c:ps,constraints) |
+ c <- unused_cons, (ps,constraints) <- pats'] ++ pats
+ new_wilds = ASSERT( not (null qs) ) make_row_vars_for_constructor (head qs)
+ pats_default_no_eqns = [(make_whole_con c:new_wilds,[]) | c <- unused_cons] ++ pats
+ indexs_default = unionUniqSets indexs' indexs
+
+construct_matrix :: Pat Id -> [(EqnNo, EquationInfo)] -> ([ExhaustivePat],EqnSet)
+construct_matrix con qs =
+ (map (make_con con) pats,indexs)
+ where
+ (pats,indexs) = (check' (remove_first_column con qs))
+\end{code}
+
+Here remove first column is more difficult that with literals due to the fact
+that constructors can have arguments.
+
+For instance, the matrix
+\begin{verbatim}
+ (: x xs) y
+ z y
+\end{verbatim}
+is transformed in:
+\begin{verbatim}
+ x xs y
+ _ _ y
+\end{verbatim}
+
+\begin{code}
+remove_first_column :: Pat Id -- Constructor
+ -> [(EqnNo, EquationInfo)]
+ -> [(EqnNo, EquationInfo)]
+remove_first_column (ConPatOut{ pat_con = L _ con, pat_args = PrefixCon con_pats }) qs
+ = ASSERT2( okGroup qs, pprGroup qs )
+ [(n, shift_var eqn) | q@(n, eqn) <- qs, is_var_con con (firstPatN q)]
+ where
+ new_wilds = [WildPat (hsLPatType arg_pat) | arg_pat <- con_pats]
+ shift_var eqn@(EqnInfo { eqn_pats = ConPatOut{ pat_args = PrefixCon ps' } : ps})
+ = eqn { eqn_pats = map unLoc ps' ++ ps }
+ shift_var eqn@(EqnInfo { eqn_pats = WildPat _ : ps })
+ = eqn { eqn_pats = new_wilds ++ ps }
+ shift_var _ = panic "Check.Shift_var:No done"
+
+make_row_vars :: [HsLit] -> (EqnNo, EquationInfo) -> ExhaustivePat
+make_row_vars used_lits (_, EqnInfo { eqn_pats = pats})
+ = (nlVarPat new_var:takeList (tail pats) (repeat nlWildPat),[(new_var,used_lits)])
+ where
+ new_var = hash_x
+
+hash_x :: Name
+hash_x = mkInternalName unboundKey {- doesn't matter much -}
+ (mkVarOccFS (fsLit "#x"))
+ noSrcSpan
+
+make_row_vars_for_constructor :: (EqnNo, EquationInfo) -> [WarningPat]
+make_row_vars_for_constructor (_, EqnInfo { eqn_pats = pats})
+ = takeList (tail pats) (repeat nlWildPat)
+
+compare_cons :: Pat Id -> Pat Id -> Bool
+compare_cons (ConPatOut{ pat_con = L _ id1 }) (ConPatOut { pat_con = L _ id2 }) = id1 == id2
+
+remove_dups :: [Pat Id] -> [Pat Id]
+remove_dups [] = []
+remove_dups (x:xs) | or (map (\y -> compare_cons x y) xs) = remove_dups xs
+ | otherwise = x : remove_dups xs
+
+get_used_cons :: [(EqnNo, EquationInfo)] -> [Pat Id]
+get_used_cons qs = remove_dups [pat | q <- qs, let pat = firstPatN q,
+ isConPatOut pat]
+
+isConPatOut :: Pat Id -> Bool
+isConPatOut (ConPatOut {}) = True
+isConPatOut _ = False
+
+remove_dups' :: [HsLit] -> [HsLit]
+remove_dups' [] = []
+remove_dups' (x:xs) | x `elem` xs = remove_dups' xs
+ | otherwise = x : remove_dups' xs
+
+
+get_used_lits :: [(EqnNo, EquationInfo)] -> [HsLit]
+get_used_lits qs = remove_dups' all_literals
+ where
+ all_literals = get_used_lits' qs
+
+get_used_lits' :: [(EqnNo, EquationInfo)] -> [HsLit]
+get_used_lits' [] = []
+get_used_lits' (q:qs)
+ | Just lit <- get_lit (firstPatN q) = lit : get_used_lits' qs
+ | otherwise = get_used_lits qs
+
+get_lit :: Pat id -> Maybe HsLit
+-- Get a representative HsLit to stand for the OverLit
+-- It doesn't matter which one, because they will only be compared
+-- with other HsLits gotten in the same way
+get_lit (LitPat lit) = Just lit
+get_lit (NPat (OverLit { ol_val = HsIntegral i}) mb _) = Just (HsIntPrim (mb_neg mb i))
+get_lit (NPat (OverLit { ol_val = HsFractional f }) mb _) = Just (HsFloatPrim (mb_neg mb f))
+get_lit (NPat (OverLit { ol_val = HsIsString s }) _ _) = Just (HsStringPrim s)
+get_lit _ = Nothing
+
+mb_neg :: Num a => Maybe b -> a -> a
+mb_neg Nothing v = v
+mb_neg (Just _) v = -v
+
+get_unused_cons :: [Pat Id] -> [DataCon]
+get_unused_cons used_cons = ASSERT( not (null used_cons) ) unused_cons
+ where
+ used_set :: UniqSet DataCon
+ used_set = mkUniqSet [d | ConPatOut{ pat_con = L _ d} <- used_cons]
+ (ConPatOut { pat_ty = ty }) = head used_cons
+ Just (ty_con, inst_tys) = splitTyConApp_maybe ty
+ unused_cons = filterOut is_used (tyConDataCons ty_con)
+ is_used con = con `elementOfUniqSet` used_set
+ || dataConCannotMatch inst_tys con
+
+all_vars :: [Pat Id] -> Bool
+all_vars [] = True
+all_vars (WildPat _:ps) = all_vars ps
+all_vars _ = False
+
+remove_var :: (EqnNo, EquationInfo) -> (EqnNo, EquationInfo)
+remove_var (n, eqn@(EqnInfo { eqn_pats = WildPat _ : ps})) = (n, eqn { eqn_pats = ps })
+remove_var _ = panic "Check.remove_var: equation does not begin with a variable"
+
+-----------------------
+eqnPats :: (EqnNo, EquationInfo) -> [Pat Id]
+eqnPats (_, eqn) = eqn_pats eqn
+
+okGroup :: [(EqnNo, EquationInfo)] -> Bool
+-- True if all equations have at least one pattern, and
+-- all have the same number of patterns
+okGroup [] = True
+okGroup (e:es) = n_pats > 0 && and [length (eqnPats e) == n_pats | e <- es]
+ where
+ n_pats = length (eqnPats e)
+
+-- Half-baked print
+pprGroup :: [(EqnNo, EquationInfo)] -> SDoc
+pprEqnInfo :: (EqnNo, EquationInfo) -> SDoc
+pprGroup es = vcat (map pprEqnInfo es)
+pprEqnInfo e = ppr (eqnPats e)
+
+
+firstPatN :: (EqnNo, EquationInfo) -> Pat Id
+firstPatN (_, eqn) = firstPat eqn
+
+is_con :: Pat Id -> Bool
+is_con (ConPatOut {}) = True
+is_con _ = False
+
+is_lit :: Pat Id -> Bool
+is_lit (LitPat _) = True
+is_lit (NPat _ _ _) = True
+is_lit _ = False
+
+is_var :: Pat Id -> Bool
+is_var (WildPat _) = True
+is_var _ = False
+
+is_var_con :: DataCon -> Pat Id -> Bool
+is_var_con _ (WildPat _) = True
+is_var_con con (ConPatOut{ pat_con = L _ id }) | id == con = True
+is_var_con _ _ = False
+
+is_var_lit :: HsLit -> Pat Id -> Bool
+is_var_lit _ (WildPat _) = True
+is_var_lit lit pat
+ | Just lit' <- get_lit pat = lit == lit'
+ | otherwise = False
+\end{code}
+
+The difference beteewn @make_con@ and @make_whole_con@ is that
+@make_wole_con@ creates a new constructor with all their arguments, and
+@make_con@ takes a list of argumntes, creates the contructor getting their
+arguments from the list. See where \fbox{\ ???\ } are used for details.
+
+We need to reconstruct the patterns (make the constructors infix and
+similar) at the same time that we create the constructors.
+
+You can tell tuple constructors using
+\begin{verbatim}
+ Id.isTupleCon
+\end{verbatim}
+You can see if one constructor is infix with this clearer code :-))))))))))
+\begin{verbatim}
+ Lex.isLexConSym (Name.occNameString (Name.getOccName con))
+\end{verbatim}
+
+ Rather clumsy but it works. (Simon Peyton Jones)
+
+
+We don't mind the @nilDataCon@ because it doesn't change the way to
+print the messsage, we are searching only for things like: @[1,2,3]@,
+not @x:xs@ ....
+
+In @reconstruct_pat@ we want to ``undo'' the work
+that we have done in @tidy_pat@.
+In particular:
+\begin{tabular}{lll}
+ @((,) x y)@ & returns to be & @(x, y)@
+\\ @((:) x xs)@ & returns to be & @(x:xs)@
+\\ @(x:(...:[])@ & returns to be & @[x,...]@
+\end{tabular}
+%
+The difficult case is the third one becouse we need to follow all the
+contructors until the @[]@ to know that we need to use the second case,
+not the second. \fbox{\ ???\ }
+%
+\begin{code}
+isInfixCon :: DataCon -> Bool
+isInfixCon con = isDataSymOcc (getOccName con)
+
+is_nil :: Pat Name -> Bool
+is_nil (ConPatIn con (PrefixCon [])) = unLoc con == getName nilDataCon
+is_nil _ = False
+
+is_list :: Pat Name -> Bool
+is_list (ListPat _ _) = True
+is_list _ = False
+
+return_list :: DataCon -> Pat Name -> Bool
+return_list id q = id == consDataCon && (is_nil q || is_list q)
+
+make_list :: LPat Name -> Pat Name -> Pat Name
+make_list p q | is_nil q = ListPat [p] placeHolderType
+make_list p (ListPat ps ty) = ListPat (p:ps) ty
+make_list _ _ = panic "Check.make_list: Invalid argument"
+
+make_con :: Pat Id -> ExhaustivePat -> ExhaustivePat
+make_con (ConPatOut{ pat_con = L _ id }) (lp:lq:ps, constraints)
+ | return_list id q = (noLoc (make_list lp q) : ps, constraints)
+ | isInfixCon id = (nlInfixConPat (getName id) lp lq : ps, constraints)
+ where q = unLoc lq
+
+make_con (ConPatOut{ pat_con = L _ id, pat_args = PrefixCon pats, pat_ty = ty }) (ps, constraints)
+ | isTupleTyCon tc = (noLoc (TuplePat pats_con (tupleTyConBoxity tc) ty) : rest_pats, constraints)
+ | isPArrFakeCon id = (noLoc (PArrPat pats_con placeHolderType) : rest_pats, constraints)
+ | otherwise = (nlConPat name pats_con : rest_pats, constraints)
+ where
+ name = getName id
+ (pats_con, rest_pats) = splitAtList pats ps
+ tc = dataConTyCon id
+
+-- reconstruct parallel array pattern
+--
+-- * don't check for the type only; we need to make sure that we are really
+-- dealing with one of the fake constructors and not with the real
+-- representation
+
+make_whole_con :: DataCon -> WarningPat
+make_whole_con con | isInfixCon con = nlInfixConPat name nlWildPat nlWildPat
+ | otherwise = nlConPat name pats
+ where
+ name = getName con
+ pats = [nlWildPat | _ <- dataConOrigArgTys con]
+\end{code}
+
+------------------------------------------------------------------------
+ Tidying equations
+------------------------------------------------------------------------
+
+tidy_eqn does more or less the same thing as @tidy@ in @Match.lhs@;
+that is, it removes syntactic sugar, reducing the number of cases that
+must be handled by the main checking algorithm. One difference is
+that here we can do *all* the tidying at once (recursively), rather
+than doing it incrementally.
+
+\begin{code}
+tidy_eqn :: EquationInfo -> EquationInfo
+tidy_eqn eqn = eqn { eqn_pats = map tidy_pat (eqn_pats eqn),
+ eqn_rhs = tidy_rhs (eqn_rhs eqn) }
+ where
+ -- Horrible hack. The tidy_pat stuff converts "might-fail" patterns to
+ -- WildPats which of course loses the info that they can fail to match.
+ -- So we stick in a CanFail as if it were a guard.
+ tidy_rhs (MatchResult can_fail body)
+ | any might_fail_pat (eqn_pats eqn) = MatchResult CanFail body
+ | otherwise = MatchResult can_fail body
+
+--------------
+might_fail_pat :: Pat Id -> Bool
+-- Returns True of patterns that might fail (i.e. fall through) in a way
+-- that is not covered by the checking algorithm. Specifically:
+-- NPlusKPat
+-- ViewPat (if refutable)
+
+-- First the two special cases
+might_fail_pat (NPlusKPat {}) = True
+might_fail_pat (ViewPat _ p _) = not (isIrrefutableHsPat p)
+
+-- Now the recursive stuff
+might_fail_pat (ParPat p) = might_fail_lpat p
+might_fail_pat (AsPat _ p) = might_fail_lpat p
+might_fail_pat (SigPatOut p _ ) = might_fail_lpat p
+might_fail_pat (ListPat ps _) = any might_fail_lpat ps
+might_fail_pat (TuplePat ps _ _) = any might_fail_lpat ps
+might_fail_pat (PArrPat ps _) = any might_fail_lpat ps
+might_fail_pat (BangPat p) = might_fail_lpat p
+might_fail_pat (ConPatOut { pat_args = ps }) = any might_fail_lpat (hsConPatArgs ps)
+
+-- Finally the ones that are sure to succeed, or which are covered by the checking algorithm
+might_fail_pat (LazyPat _) = False -- Always succeeds
+might_fail_pat _ = False -- VarPat, WildPat, LitPat, NPat, TypePat
+
+--------------
+might_fail_lpat :: LPat Id -> Bool
+might_fail_lpat (L _ p) = might_fail_pat p
+
+--------------
+tidy_lpat :: LPat Id -> LPat Id
+tidy_lpat p = fmap tidy_pat p
+
+--------------
+tidy_pat :: Pat Id -> Pat Id
+tidy_pat pat@(WildPat _) = pat
+tidy_pat (VarPat id) = WildPat (idType id)
+tidy_pat (ParPat p) = tidy_pat (unLoc p)
+tidy_pat (LazyPat p) = WildPat (hsLPatType p) -- For overlap and exhaustiveness checking
+ -- purposes, a ~pat is like a wildcard
+tidy_pat (BangPat p) = tidy_pat (unLoc p)
+tidy_pat (AsPat _ p) = tidy_pat (unLoc p)
+tidy_pat (SigPatOut p _) = tidy_pat (unLoc p)
+tidy_pat (CoPat _ pat _) = tidy_pat pat
+
+-- These two are might_fail patterns, so we map them to
+-- WildPats. The might_fail_pat stuff arranges that the
+-- guard says "this equation might fall through".
+tidy_pat (NPlusKPat id _ _ _) = WildPat (idType (unLoc id))
+tidy_pat (ViewPat _ _ ty) = WildPat ty
+
+tidy_pat pat@(ConPatOut { pat_con = L _ id, pat_args = ps })
+ = pat { pat_args = tidy_con id ps }
+
+tidy_pat (ListPat ps ty)
+ = unLoc $ foldr (\ x y -> mkPrefixConPat consDataCon [x,y] list_ty)
+ (mkNilPat list_ty)
+ (map tidy_lpat ps)
+ where list_ty = mkListTy ty
+
+-- introduce fake parallel array constructors to be able to handle parallel
+-- arrays with the existing machinery for constructor pattern
+--
+tidy_pat (PArrPat ps ty)
+ = unLoc $ mkPrefixConPat (parrFakeCon (length ps))
+ (map tidy_lpat ps)
+ (mkPArrTy ty)
+
+tidy_pat (TuplePat ps boxity ty)
+ = unLoc $ mkPrefixConPat (tupleCon boxity arity)
+ (map tidy_lpat ps) ty
+ where
+ arity = length ps
+
+tidy_pat (NPat lit mb_neg eq) = tidyNPat lit mb_neg eq
+
+-- Unpack string patterns fully, so we can see when they overlap with
+-- each other, or even explicit lists of Chars.
+tidy_pat (LitPat lit)
+ | HsString s <- lit
+ = unLoc $ foldr (\c pat -> mkPrefixConPat consDataCon [mk_char_lit c, pat] stringTy)
+ (mkPrefixConPat nilDataCon [] stringTy) (unpackFS s)
+ | otherwise
+ = tidyLitPat lit
+ where
+ mk_char_lit c = mkPrefixConPat charDataCon [nlLitPat (HsCharPrim c)] charTy
+
+-----------------
+tidy_con :: DataCon -> HsConPatDetails Id -> HsConPatDetails Id
+tidy_con _ (PrefixCon ps) = PrefixCon (map tidy_lpat ps)
+tidy_con _ (InfixCon p1 p2) = PrefixCon [tidy_lpat p1, tidy_lpat p2]
+tidy_con con (RecCon (HsRecFields fs _))
+ | null fs = PrefixCon [nlWildPat | _ <- dataConOrigArgTys con]
+ -- Special case for null patterns; maybe not a record at all
+ | otherwise = PrefixCon (map (tidy_lpat.snd) all_pats)
+ where
+ -- pad out all the missing fields with WildPats.
+ field_pats = map (\ f -> (f, nlWildPat)) (dataConFieldLabels con)
+ all_pats = foldr (\(HsRecField id p _) acc -> insertNm (getName (unLoc id)) p acc)
+ field_pats fs
+
+ insertNm nm p [] = [(nm,p)]
+ insertNm nm p (x@(n,_):xs)
+ | nm == n = (nm,p):xs
+ | otherwise = x : insertNm nm p xs
+\end{code}
diff --git a/compiler/deSugar/Coverage.lhs b/compiler/deSugar/Coverage.lhs index 30be2aa1f0..57455c4818 100644 --- a/compiler/deSugar/Coverage.lhs +++ b/compiler/deSugar/Coverage.lhs @@ -455,26 +455,18 @@ addTickStmt isGuard (ParStmt pairs mzipExpr bindExpr returnExpr) = do (addTickSyntaxExpr hpcSrcSpan bindExpr) (addTickSyntaxExpr hpcSrcSpan returnExpr) -addTickStmt isGuard (TransformStmt stmts ids usingExpr maybeByExpr returnExpr bindExpr) = do - t_s <- (addTickLStmts isGuard stmts) - t_u <- (addTickLHsExprAlways usingExpr) - t_m <- (addTickMaybeByLHsExpr maybeByExpr) - t_r <- (addTickSyntaxExpr hpcSrcSpan returnExpr) - t_b <- (addTickSyntaxExpr hpcSrcSpan bindExpr) - return $ TransformStmt t_s ids t_u t_m t_r t_b - -addTickStmt isGuard stmt@(GroupStmt { grpS_stmts = stmts - , grpS_by = by, grpS_using = using - , grpS_ret = returnExpr, grpS_bind = bindExpr - , grpS_fmap = liftMExpr }) = do +addTickStmt isGuard stmt@(TransStmt { trS_stmts = stmts + , trS_by = by, trS_using = using + , trS_ret = returnExpr, trS_bind = bindExpr + , trS_fmap = liftMExpr }) = do t_s <- addTickLStmts isGuard stmts t_y <- fmapMaybeM addTickLHsExprAlways by t_u <- addTickLHsExprAlways using t_f <- addTickSyntaxExpr hpcSrcSpan returnExpr t_b <- addTickSyntaxExpr hpcSrcSpan bindExpr t_m <- addTickSyntaxExpr hpcSrcSpan liftMExpr - return $ stmt { grpS_stmts = t_s, grpS_by = t_y, grpS_using = t_u - , grpS_ret = t_f, grpS_bind = t_b, grpS_fmap = t_m } + return $ stmt { trS_stmts = t_s, trS_by = t_y, trS_using = t_u + , trS_ret = t_f, trS_bind = t_b, trS_fmap = t_m } addTickStmt isGuard stmt@(RecStmt {}) = do { stmts' <- addTickLStmts isGuard (recS_stmts stmt) @@ -495,12 +487,6 @@ addTickStmtAndBinders isGuard (stmts, ids) = (addTickLStmts isGuard stmts) (return ids) -addTickMaybeByLHsExpr :: Maybe (LHsExpr Id) -> TM (Maybe (LHsExpr Id)) -addTickMaybeByLHsExpr maybeByExpr = - case maybeByExpr of - Nothing -> return Nothing - Just byExpr -> addTickLHsExprAlways byExpr >>= (return . Just) - addTickHsLocalBinds :: HsLocalBinds Id -> TM (HsLocalBinds Id) addTickHsLocalBinds (HsValBinds binds) = liftM HsValBinds diff --git a/compiler/deSugar/DsListComp.lhs b/compiler/deSugar/DsListComp.lhs index 63cae938d0..0d3adbc7c3 100644 --- a/compiler/deSugar/DsListComp.lhs +++ b/compiler/deSugar/DsListComp.lhs @@ -91,45 +91,19 @@ dsInnerListComp (stmts, bndrs) where bndrs_tuple_type = mkBigCoreVarTupTy bndrs --- This function factors out commonality between the desugaring strategies for TransformStmt. --- Given such a statement it gives you back an expression representing how to compute the transformed --- list and the tuple that you need to bind from that list in order to proceed with your desugaring -dsTransformStmt :: Stmt Id -> DsM (CoreExpr, LPat Id) -dsTransformStmt (TransformStmt stmts binders usingExpr maybeByExpr _ _) - = do { (expr, binders_tuple_type) <- dsInnerListComp (stmts, binders) - ; usingExpr' <- dsLExpr usingExpr - - ; using_args <- - case maybeByExpr of - Nothing -> return [expr] - Just byExpr -> do - byExpr' <- dsLExpr byExpr - - us <- newUniqueSupply - [tuple_binder] <- newSysLocalsDs [binders_tuple_type] - let byExprWrapper = mkTupleCase us binders byExpr' tuple_binder (Var tuple_binder) - - return [Lam tuple_binder byExprWrapper, expr] - - ; let inner_list_expr = mkApps usingExpr' ((Type binders_tuple_type) : using_args) - pat = mkBigLHsVarPatTup binders - ; return (inner_list_expr, pat) } - -- This function factors out commonality between the desugaring strategies for GroupStmt. -- Given such a statement it gives you back an expression representing how to compute the transformed -- list and the tuple that you need to bind from that list in order to proceed with your desugaring -dsGroupStmt :: Stmt Id -> DsM (CoreExpr, LPat Id) -dsGroupStmt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = binderMap - , grpS_by = by, grpS_using = using }) = do - let (fromBinders, toBinders) = unzip binderMap - - fromBindersTypes = map idType fromBinders - toBindersTypes = map idType toBinders - - toBindersTupleType = mkBigCoreTupTy toBindersTypes +dsTransStmt :: Stmt Id -> DsM (CoreExpr, LPat Id) +dsTransStmt (TransStmt { trS_form = form, trS_stmts = stmts, trS_bndrs = binderMap + , trS_by = by, trS_using = using }) = do + let (from_bndrs, to_bndrs) = unzip binderMap + from_bndrs_tys = map idType from_bndrs + to_bndrs_tys = map idType to_bndrs + to_bndrs_tup_ty = mkBigCoreTupTy to_bndrs_tys -- Desugar an inner comprehension which outputs a list of tuples of the "from" binders - (expr, from_tup_ty) <- dsInnerListComp (stmts, fromBinders) + (expr, from_tup_ty) <- dsInnerListComp (stmts, from_bndrs) -- Work out what arguments should be supplied to that expression: i.e. is an extraction -- function required? If so, create that desugared function and add to arguments @@ -137,31 +111,34 @@ dsGroupStmt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = binderMap usingArgs <- case by of Nothing -> return [expr] Just by_e -> do { by_e' <- dsLExpr by_e - ; us <- newUniqueSupply - ; [from_tup_id] <- newSysLocalsDs [from_tup_ty] - ; let by_wrap = mkTupleCase us fromBinders by_e' - from_tup_id (Var from_tup_id) - ; return [Lam from_tup_id by_wrap, expr] } + ; lam <- matchTuple from_bndrs by_e' + ; return [lam, expr] } -- Create an unzip function for the appropriate arity and element types and find "map" - (unzip_fn, unzip_rhs) <- mkUnzipBind fromBindersTypes + unzip_stuff <- mkUnzipBind form from_bndrs_tys map_id <- dsLookupGlobalId mapName -- Generate the expressions to build the grouped list let -- First we apply the grouping function to the inner list - inner_list_expr = mkApps usingExpr' ((Type from_tup_ty) : usingArgs) + inner_list_expr = mkApps usingExpr' (Type from_tup_ty : usingArgs) -- Then we map our "unzip" across it to turn the lists of tuples into tuples of lists -- We make sure we instantiate the type variable "a" to be a list of "from" tuples and -- the "b" to be a tuple of "to" lists! - unzipped_inner_list_expr = mkApps (Var map_id) - [Type (mkListTy from_tup_ty), Type toBindersTupleType, Var unzip_fn, inner_list_expr] -- Then finally we bind the unzip function around that expression - bound_unzipped_inner_list_expr = Let (Rec [(unzip_fn, unzip_rhs)]) unzipped_inner_list_expr - - -- Build a pattern that ensures the consumer binds into the NEW binders, which hold lists rather than single values - let pat = mkBigLHsVarPatTup toBinders + bound_unzipped_inner_list_expr + = case unzip_stuff of + Nothing -> inner_list_expr + Just (unzip_fn, unzip_rhs) -> Let (Rec [(unzip_fn, unzip_rhs)]) $ + mkApps (Var map_id) $ + [ Type (mkListTy from_tup_ty) + , Type to_bndrs_tup_ty + , Var unzip_fn + , inner_list_expr] + + -- Build a pattern that ensures the consumer binds into the NEW binders, + -- which hold lists rather than single values + let pat = mkBigLHsVarPatTup to_bndrs return (bound_unzipped_inner_list_expr, pat) - \end{code} %************************************************************************ @@ -251,12 +228,8 @@ deListComp (LetStmt binds : quals) list = do core_rest <- deListComp quals list dsLocalBinds binds core_rest -deListComp (stmt@(TransformStmt {}) : quals) list = do - (inner_list_expr, pat) <- dsTransformStmt stmt - deBindComp pat inner_list_expr quals list - -deListComp (stmt@(GroupStmt {}) : quals) list = do - (inner_list_expr, pat) <- dsGroupStmt stmt +deListComp (stmt@(TransStmt {}) : quals) list = do + (inner_list_expr, pat) <- dsTransStmt stmt deBindComp pat inner_list_expr quals list deListComp (BindStmt pat list1 _ _ : quals) core_list2 = do -- rule A' above @@ -264,16 +237,14 @@ deListComp (BindStmt pat list1 _ _ : quals) core_list2 = do -- rule A' above deBindComp pat core_list1 quals core_list2 deListComp (ParStmt stmtss_w_bndrs _ _ _ : quals) list - = do - exps_and_qual_tys <- mapM dsInnerListComp stmtss_w_bndrs - let (exps, qual_tys) = unzip exps_and_qual_tys + = do { exps_and_qual_tys <- mapM dsInnerListComp stmtss_w_bndrs + ; let (exps, qual_tys) = unzip exps_and_qual_tys - (zip_fn, zip_rhs) <- mkZipBind qual_tys + ; (zip_fn, zip_rhs) <- mkZipBind qual_tys -- Deal with [e | pat <- zip l1 .. ln] in example above - deBindComp pat (Let (Rec [(zip_fn, zip_rhs)]) (mkApps (Var zip_fn) exps)) - quals list - + ; deBindComp pat (Let (Rec [(zip_fn, zip_rhs)]) (mkApps (Var zip_fn) exps)) + quals list } where bndrs_s = map snd stmtss_w_bndrs @@ -361,13 +332,8 @@ dfListComp c_id n_id (LetStmt binds : quals) = do core_rest <- dfListComp c_id n_id quals dsLocalBinds binds core_rest -dfListComp c_id n_id (stmt@(TransformStmt {}) : quals) = do - (inner_list_expr, pat) <- dsTransformStmt stmt - -- Anyway, we bind the newly transformed list via the generic binding function - dfBindComp c_id n_id (pat, inner_list_expr) quals - -dfListComp c_id n_id (stmt@(GroupStmt {}) : quals) = do - (inner_list_expr, pat) <- dsGroupStmt stmt +dfListComp c_id n_id (stmt@(TransStmt {}) : quals) = do + (inner_list_expr, pat) <- dsTransStmt stmt -- Anyway, we bind the newly grouped list via the generic binding function dfBindComp c_id n_id (pat, inner_list_expr) quals @@ -445,7 +411,7 @@ mkZipBind elt_tys = do -- Increasing order of tag -mkUnzipBind :: [Type] -> DsM (Id, CoreExpr) +mkUnzipBind :: TransForm -> [Type] -> DsM (Maybe (Id, CoreExpr)) -- mkUnzipBind [t1, t2] -- = (unzip, \ys :: [(t1, t2)] -> foldr (\ax :: (t1, t2) axs :: ([t1], [t2]) -- -> case ax of @@ -455,28 +421,29 @@ mkUnzipBind :: [Type] -> DsM (Id, CoreExpr) -- ys) -- -- We use foldr here in all cases, even if rules are turned off, because we may as well! -mkUnzipBind elt_tys = do - ax <- newSysLocalDs elt_tuple_ty - axs <- newSysLocalDs elt_list_tuple_ty - ys <- newSysLocalDs elt_tuple_list_ty - xs <- mapM newSysLocalDs elt_tys - xss <- mapM newSysLocalDs elt_list_tys +mkUnzipBind ThenForm _ + = return Nothing -- No unzipping for ThenForm +mkUnzipBind _ elt_tys + = do { ax <- newSysLocalDs elt_tuple_ty + ; axs <- newSysLocalDs elt_list_tuple_ty + ; ys <- newSysLocalDs elt_tuple_list_ty + ; xs <- mapM newSysLocalDs elt_tys + ; xss <- mapM newSysLocalDs elt_list_tys - unzip_fn <- newSysLocalDs unzip_fn_ty - - [us1, us2] <- sequence [newUniqueSupply, newUniqueSupply] - - let nil_tuple = mkBigCoreTup (map mkNilExpr elt_tys) - - concat_expressions = map mkConcatExpression (zip3 elt_tys (map Var xs) (map Var xss)) - tupled_concat_expression = mkBigCoreTup concat_expressions - - folder_body_inner_case = mkTupleCase us1 xss tupled_concat_expression axs (Var axs) - folder_body_outer_case = mkTupleCase us2 xs folder_body_inner_case ax (Var ax) - folder_body = mkLams [ax, axs] folder_body_outer_case - - unzip_body <- mkFoldrExpr elt_tuple_ty elt_list_tuple_ty folder_body nil_tuple (Var ys) - return (unzip_fn, mkLams [ys] unzip_body) + ; unzip_fn <- newSysLocalDs unzip_fn_ty + + ; [us1, us2] <- sequence [newUniqueSupply, newUniqueSupply] + + ; let nil_tuple = mkBigCoreTup (map mkNilExpr elt_tys) + concat_expressions = map mkConcatExpression (zip3 elt_tys (map Var xs) (map Var xss)) + tupled_concat_expression = mkBigCoreTup concat_expressions + + folder_body_inner_case = mkTupleCase us1 xss tupled_concat_expression axs (Var axs) + folder_body_outer_case = mkTupleCase us2 xs folder_body_inner_case ax (Var ax) + folder_body = mkLams [ax, axs] folder_body_outer_case + + ; unzip_body <- mkFoldrExpr elt_tuple_ty elt_list_tuple_ty folder_body nil_tuple (Var ys) + ; return (Just (unzip_fn, mkLams [ys] unzip_body)) } where elt_tuple_ty = mkBigCoreTupTy elt_tys elt_tuple_list_ty = mkListTy elt_tuple_ty @@ -730,30 +697,6 @@ dsMcStmt (ExprStmt exp then_exp guard_exp _) stmts ; return $ mkApps then_exp' [ mkApps guard_exp' [exp'] , rest ] } --- Transform statements desugar like this: --- --- [ .. | qs, then f by e ] -> f (\q_v -> e) [| qs |] --- --- where [| qs |] is the desugared inner monad comprehenion generated by the --- statements `qs`. -dsMcStmt (TransformStmt stmts binders usingExpr maybeByExpr return_op bind_op) stmts_rest - = do { expr <- dsInnerMonadComp stmts binders return_op - ; let binders_tup_type = mkBigCoreTupTy $ map idType binders - ; usingExpr' <- dsLExpr usingExpr - ; using_args <- case maybeByExpr of - Nothing -> return [expr] - Just byExpr -> do - byExpr' <- dsLExpr byExpr - us <- newUniqueSupply - tup_binder <- newSysLocalDs binders_tup_type - let byExprWrapper = mkTupleCase us binders byExpr' tup_binder (Var tup_binder) - return [Lam tup_binder byExprWrapper, expr] - - ; let pat = mkBigLHsVarPatTup binders - rhs = mkApps usingExpr' ((Type binders_tup_type) : using_args) - - ; dsMcBindStmt pat rhs bind_op noSyntaxExpr stmts_rest } - -- Group statements desugar like this: -- -- [| (q, then group by e using f); rest |] @@ -768,10 +711,10 @@ dsMcStmt (TransformStmt stmts binders usingExpr maybeByExpr return_op bind_op) s -- n_tup :: n qt -- unzip :: n qt -> (n t1, ..., n tk) (needs Functor n) -dsMcStmt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = bndrs - , grpS_by = by, grpS_using = using - , grpS_ret = return_op, grpS_bind = bind_op - , grpS_fmap = fmap_op }) stmts_rest +dsMcStmt (TransStmt { trS_stmts = stmts, trS_bndrs = bndrs + , trS_by = by, trS_using = using + , trS_ret = return_op, trS_bind = bind_op + , trS_fmap = fmap_op, trS_form = form }) stmts_rest = do { let (from_bndrs, to_bndrs) = unzip bndrs from_bndr_tys = map idType from_bndrs -- Types ty @@ -790,16 +733,15 @@ dsMcStmt (GroupStmt { grpS_stmts = stmts, grpS_bndrs = bndrs -- Generate the expressions to build the grouped list -- Build a pattern that ensures the consumer binds into the NEW binders, -- which hold monads rather than single values - ; fmap_op' <- dsExpr fmap_op ; bind_op' <- dsExpr bind_op - ; let bind_ty = exprType bind_op' -- m2 (n (a,b,c)) -> (n (a,b,c) -> r1) -> r2 + ; let bind_ty = exprType bind_op' -- m2 (n (a,b,c)) -> (n (a,b,c) -> r1) -> r2 n_tup_ty = funArgTy $ funArgTy $ funResultTy bind_ty -- n (a,b,c) tup_n_ty = mkBigCoreVarTupTy to_bndrs ; body <- dsMcStmts stmts_rest ; n_tup_var <- newSysLocalDs n_tup_ty ; tup_n_var <- newSysLocalDs tup_n_ty - ; tup_n_expr <- mkMcUnzipM fmap_op' n_tup_var from_bndr_tys + ; tup_n_expr <- mkMcUnzipM form fmap_op n_tup_var from_bndr_tys ; us <- newUniqueSupply ; let rhs' = mkApps usingExpr' usingArgs body' = mkTupleCase us to_bndrs body tup_n_var tup_n_expr @@ -908,16 +850,21 @@ dsInnerMonadComp stmts bndrs ret_op -- = ( fmap (selN1 :: (t1, t2) -> t1) ys -- , fmap (selN2 :: (t1, t2) -> t2) ys ) -mkMcUnzipM :: CoreExpr -- fmap +mkMcUnzipM :: TransForm + -> SyntaxExpr TcId -- fmap -> Id -- Of type n (a,b,c) -> [Type] -- [a,b,c] -> DsM CoreExpr -- Of type (n a, n b, n c) -mkMcUnzipM fmap_op ys elt_tys - = do { xs <- mapM newSysLocalDs elt_tys - ; tup_xs <- newSysLocalDs (mkBigCoreTupTy elt_tys) +mkMcUnzipM ThenForm _ ys _ + = return (Var ys) -- No unzipping to do + +mkMcUnzipM _ fmap_op ys elt_tys + = do { fmap_op' <- dsExpr fmap_op + ; xs <- mapM newSysLocalDs elt_tys + ; tup_xs <- newSysLocalDs (mkBigCoreTupTy elt_tys) ; let arg_ty = idType ys - mk_elt i = mkApps fmap_op -- fmap :: forall a b. (a -> b) -> n a -> n b + mk_elt i = mkApps fmap_op' -- fmap :: forall a b. (a -> b) -> n a -> n b [ Type arg_ty, Type (elt_tys !! i) , mk_sel i, Var ys] |