diff --git a/compiler/Eta/Backpack/NameShape.hs b/compiler/Eta/Backpack/NameShape.hs index 05105cb8..1d6d9cae 100644 --- a/compiler/Eta/Backpack/NameShape.hs +++ b/compiler/Eta/Backpack/NameShape.hs @@ -150,7 +150,7 @@ ns_module = mkHoleModule . ns_mod_name -- | Substitution on @{A.T}@. We enforce the invariant that the -- 'nameModule' of keys of this map have 'moduleUnitId' @hole@ -- (meaning that if we have a hole substitution, the keys of the map --- are never affected.) Alternately, this is ismorphic to +-- are never affected.) Alternately, this is isomorphic to -- @Map ('ModuleName', 'OccName') 'Name'@. type ShNameSubst = NameEnv Name diff --git a/compiler/Eta/BasicTypes/BasicTypes.hs b/compiler/Eta/BasicTypes/BasicTypes.hs index 67f19e74..09249bf2 100644 --- a/compiler/Eta/BasicTypes/BasicTypes.hs +++ b/compiler/Eta/BasicTypes/BasicTypes.hs @@ -2,7 +2,7 @@ (c) The University of Glasgow 2006 (c) The GRASP/AQUA Project, Glasgow University, 1997-1998 -\section[BasicTypes]{Miscellanous types} +\section[BasicTypes]{Miscellaneous types} This module defines a miscellaneously collection of very simple types that @@ -343,7 +343,7 @@ Consider \begin{verbatim} a `op1` b `op2` c \end{verbatim} -@(compareFixity op1 op2)@ tells which way to arrange appication, or +@(compareFixity op1 op2)@ tells which way to arrange application, or whether there's an error. -} @@ -761,7 +761,7 @@ instance Outputable OccInfo where {- ************************************************************************ * * - Default method specfication + Default method specification * * ************************************************************************ diff --git a/compiler/Eta/BasicTypes/DataCon.hs b/compiler/Eta/BasicTypes/DataCon.hs index adee2c27..e049a8ab 100644 --- a/compiler/Eta/BasicTypes/DataCon.hs +++ b/compiler/Eta/BasicTypes/DataCon.hs @@ -181,7 +181,7 @@ Why might the wrapper have anything to do? Two reasons: The wrapper has the programmer-specified type: \$wMkT :: a -> T [a] \$wMkT a x = MkT [a] a [a] x - The third argument is a coerion + The third argument is a coercion [a] :: [a]~[a] INVARIANT: the dictionary constructor for a class @@ -222,7 +222,7 @@ It's a flaw in the language. it separately in the type checker on occurrences of a constructor, either in an expression or in a pattern. - [May 2003: actually I think this decision could evasily be + [May 2003: actually I think this decision could easily be reversed now, and probably should be. Generics could be disabled for types with a stupid context; record updates now (H98) needs the context too; etc. It's an unforced change, so @@ -524,7 +524,7 @@ Terminology: Note [Data con representation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The dcRepType field contains the type of the representation of a contructor +The dcRepType field contains the type of the representation of a constructor This may differ from the type of the constructor *Id* (built by MkId.mkDataConId) for two reasons: a) the constructor Id may be overloaded, but the dictionary isn't stored @@ -669,7 +669,7 @@ mkDataCon :: Name -> [TyVar] -- ^ Universally quantified type variables -> [TyVar] -- ^ Existentially quantified type variables -> [(TyVar,Type)] -- ^ GADT equalities - -> ThetaType -- ^ Theta-type occuring before the arguments proper + -> ThetaType -- ^ Theta-type occurring before the arguments proper -> [Type] -- ^ Original argument types -> Type -- ^ Original result type -> TyCon -- ^ Representation type constructor @@ -692,7 +692,7 @@ mkDataCon name declared_infix -- data T a where { MkT :: S } -- then it's possible that the univ_tvs may hit an assertion failure -- if you pull on univ_tvs. This case is checked by checkValidDataCon, --- so the error is detected properly... it's just that asaertions here +-- so the error is detected properly... it's just that assertions here -- are a little dodgy. = con @@ -741,7 +741,7 @@ eqSpecPreds spec = [ mkEqPred (mkTyVarTy tv) ty | (tv,ty) <- spec ] {- Note [Unpack equality predicates] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If we have a GADT with a contructor C :: (a~[b]) => b -> T a +If we have a GADT with a constructor C :: (a~[b]) => b -> T a we definitely want that equality predicate *unboxed* so that it takes no space at all. This is easily done: just give it an UNPACK pragma. The rest of the unpack/repack code does the @@ -789,7 +789,7 @@ dataConUnivTyVars = dcUnivTyVars dataConExTyVars :: DataCon -> [TyVar] dataConExTyVars = dcExTyVars --- | Both the universal and existentiatial type variables of the constructor +-- | Both the universal and existential type variables of the constructor dataConAllTyVars :: DataCon -> [TyVar] dataConAllTyVars (MkData { dcUnivTyVars = univ_tvs, dcExTyVars = ex_tvs }) = univ_tvs ++ ex_tvs @@ -1143,7 +1143,7 @@ promoteDataCon_maybe (MkData { dcPromoted = mb_tc }) = mb_tc {- Note [Promoting a Type to a Kind] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Suppsoe we have a data constructor D +Suppose we have a data constructor D D :: forall (a:*). Maybe a -> T a We promote this to be a type constructor 'D: 'D :: forall (k:BOX). 'Maybe k -> 'T k diff --git a/compiler/Eta/BasicTypes/Demand.hs b/compiler/Eta/BasicTypes/Demand.hs index 252bf0c4..a94593f6 100644 --- a/compiler/Eta/BasicTypes/Demand.hs +++ b/compiler/Eta/BasicTypes/Demand.hs @@ -736,7 +736,7 @@ cleanEvalProdDmd n = JD { sd = HeadStr, ud = UProd (replicate n useTop) } {- ************************************************************************ * * - Demand: combining stricness and usage + Demand: combining strictness and usage * * ************************************************************************ -} @@ -1246,7 +1246,7 @@ We 3. combine the termination results, but 4. take CPR info from the first argument. -3 and 4 are implementd in bothDmdResult. +3 and 4 are implemented in bothDmdResult. -} -- Equality needed for fixpoints in DmdAnal @@ -1544,7 +1544,7 @@ But the demand fed into f might be less than . There are a f - And finally termination information: If r says that f diverges for sure, then this holds when the demand guarantees that two arguments are going to be passed. If the demand is lower, we may just as well converge. - If we were tracking definite convegence, than that would still hold under + If we were tracking definite convergence, than that would still hold under a weaker demand than expected by the demand transformer. * Not enough demand from the usage side: The missing usage can be expanded using UCall Many, therefore this is subsumed by the third case: @@ -1581,7 +1581,7 @@ Note [Default demand on free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the variable is not mentioned in the environment of a demand type, its demand is taken to be a result demand of the type. - For the stricness component, + For the strictness component, if the result demand is a Diverges, then we use HyperStr else we use Lazy For the usage component, we use Absent. @@ -1686,8 +1686,8 @@ a demand on the Id into a DmdType, which gives c) an indication of the result of applying the Id to its arguments -However, in fact we store in the Id an extremely emascuated demand -transfomer, namely +However, in fact we store in the Id an extremely emasculated demand +transformer, namely a single DmdType (Nevertheless we dignify StrictSig as a distinct type.) diff --git a/compiler/Eta/BasicTypes/Id.hs b/compiler/Eta/BasicTypes/Id.hs index 1b37bb28..4ecfc514 100644 --- a/compiler/Eta/BasicTypes/Id.hs +++ b/compiler/Eta/BasicTypes/Id.hs @@ -211,7 +211,7 @@ setIdInfo id info = seqIdInfo info `seq` (lazySetIdInfo id info) modifyIdInfo :: (IdInfo -> IdInfo) -> Id -> Id modifyIdInfo fn id = setIdInfo id (fn (idInfo id)) --- maybeModifyIdInfo tries to avoid unnecesary thrashing +-- maybeModifyIdInfo tries to avoid unnecessary thrashing maybeModifyIdInfo :: Maybe IdInfo -> Id -> Id maybeModifyIdInfo (Just new_info) id = lazySetIdInfo id new_info maybeModifyIdInfo Nothing id = id @@ -482,7 +482,7 @@ isImplicitId id PrimOpId {} -> True DataConWorkId {} -> True DataConWrapId {} -> True - -- These are are implied by their type or class decl; + -- These are implied by their type or class decl; -- remember that all type and class decls appear in the interface file. -- The dfun id is not an implicit Id; it must *not* be omitted, because -- it carries version info for the instance decl @@ -627,7 +627,7 @@ setIdCafInfo :: Id -> CafInfo -> Id setIdCafInfo id caf_info = modifyIdInfo (`setCafInfo` caf_info) id --------------------------------- - -- Occcurrence INFO + -- Occurrence INFO idOccInfo :: Id -> OccInfo idOccInfo id = occInfo (idInfo id) diff --git a/compiler/Eta/BasicTypes/IdInfo.hs b/compiler/Eta/BasicTypes/IdInfo.hs index f9de491d..b30be1fb 100644 --- a/compiler/Eta/BasicTypes/IdInfo.hs +++ b/compiler/Eta/BasicTypes/IdInfo.hs @@ -193,7 +193,7 @@ data IdInfo unfoldingInfo :: Unfolding, -- ^ The 'Id's unfolding cafInfo :: CafInfo, -- ^ 'Id' CAF info oneShotInfo :: OneShotInfo, -- ^ Info about a lambda-bound variable, if the 'Id' is one - inlinePragInfo :: InlinePragma, -- ^ Any inline pragma atached to the 'Id' + inlinePragInfo :: InlinePragma, -- ^ Any inline pragma attached to the 'Id' occInfo :: OccInfo, -- ^ How the 'Id' occurs in the program strictnessInfo :: StrictSig, -- ^ A strictness signature @@ -343,7 +343,7 @@ But we don't do that for instance declarations and so we just treat them all uniformly. The EXCEPTION is PrimOpIds, which do have rules in their IdInfo. That is -jsut for convenience really. +just for convenience really. However, LocalIds may have non-empty RuleInfo. We treat them differently because: @@ -364,7 +364,7 @@ data RuleInfo -- ru_fn though. -- Note [Rule dependency info] in OccurAnal --- | Assume that no specilizations exist: always safe +-- | Assume that no specializations exist: always safe emptyRuleInfo :: RuleInfo emptyRuleInfo = RuleInfo [] emptyDVarSet diff --git a/compiler/Eta/BasicTypes/Literal.hs b/compiler/Eta/BasicTypes/Literal.hs index 3c2a39ab..fe270ab9 100644 --- a/compiler/Eta/BasicTypes/Literal.hs +++ b/compiler/Eta/BasicTypes/Literal.hs @@ -137,8 +137,8 @@ They only get converted into real Core, during the CorePrep phase, although TidyPgm looks ahead at what the core will be, so that it can see whether it involves CAFs. -When we initally build an Integer literal, notably when -deserialising it from an interface file (see the Binary instance +When we initially build an Integer literal, notably when +deserializing it from an interface file (see the Binary instance below), we don't have convenient access to the mkInteger Id. So we just use an error thunk, and fill in the real Id when we do tcIfaceLit in TcIface. @@ -475,7 +475,7 @@ literalType (MachLabel _ _ _) = addrPrimTy literalType (LitInteger _ t) = t absentLiteralOf :: TyCon -> Maybe Literal --- Return a literal of the appropriate primtive +-- Return a literal of the appropriate primitive -- TyCon, to use as a placeholder when it doesn't matter absentLiteralOf tc = lookupUFM absent_lits (tyConName tc) @@ -527,7 +527,7 @@ litTag (MachNull) = _ILIT(12) {- Printing ~~~~~~~~ -* MachX (i.e. unboxed) things are printed unadornded (e.g. 3, 'a', "foo") +* MachX (i.e. unboxed) things are printed unadorned (e.g. 3, 'a', "foo") exceptions: MachFloat gets an initial keyword prefix. -} diff --git a/compiler/Eta/BasicTypes/MkId.hs b/compiler/Eta/BasicTypes/MkId.hs index 3a4e1114..188d08b4 100644 --- a/compiler/Eta/BasicTypes/MkId.hs +++ b/compiler/Eta/BasicTypes/MkId.hs @@ -108,7 +108,7 @@ There are several reasons why an Id might appear in the wiredInIds: result type. -- sof 1/99] (3) Other error functions (rUNTIME_ERROR_ID) are wired in (a) because - the desugarer generates code that mentiones them directly, and + the desugarer generates code that mentions them directly, and (b) for the same reason as eRROR_ID (4) lazyId is wired in because the wired-in version overrides the @@ -395,13 +395,13 @@ mkDataConWorkId wkr_name data_con -- even if the data constructor is declared strict -- e.g. data T = MkT !(Int,Int) -- Why? Because the *wrapper* is strict (and its unfolding has case - -- expresssions that do the evals) but the *worker* itself is not. + -- expressions that do the evals) but the *worker* itself is not. -- If we pretend it is strict then when we see -- case x of y -> $wMkT y -- the simplifier thinks that y is "sure to be evaluated" (because -- $wMkT is strict) and drops the case. No, $wMkT is not strict. -- - -- When the simplifer sees a pattern + -- When the simplifier sees a pattern -- case e of MkT x -> ... -- it uses the dataConRepStrictness of MkT to mark x as evaluated; -- but that's fine... dataConRepStrictness comes from the data con @@ -504,7 +504,7 @@ mkDataConRep dflags fam_envs wrap_name mb_bangs data_con -- The Cpr info can be important inside INLINE rhss, where the -- wrapper constructor isn't inlined. -- And the argument strictness can be important too; we - -- may not inline a contructor when it is partially applied. + -- may not inline a constructor when it is partially applied. -- For example: -- data W = C !Int !Int !Int -- ...(let w = C x in ...(w p q)...)... @@ -821,7 +821,7 @@ Because then we'd get an infinite number of arguments. Here is a more complicated case: data S = MkS {-# UNPACK #-} !T Int data T = MkT {-# UNPACK #-} !S Int -Each of S and T must decide independendently whether to unpack +Each of S and T must decide independently whether to unpack and they had better not both say yes. So they must both say no. Also behave conservatively when there is no UNPACK pragma @@ -836,7 +836,7 @@ because Int is non-recursive. Note [Unpack equality predicates] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If we have a GADT with a contructor C :: (a~[b]) => b -> T a +If we have a GADT with a constructor C :: (a~[b]) => b -> T a we definitely want that equality predicate *unboxed* so that it takes no space at all. This is easily done: just give it an UNPACK pragma. The rest of the unpack/repack code does the @@ -863,7 +863,7 @@ wrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr -- newtype T a = MkT (a,Int) -- MkT :: forall a. (a,Int) -> T a -- MkT = /\a. \(x:(a,Int)). x `cast` sym (CoT a) --- where CoT is the coercion TyCon assoicated with the newtype +-- where CoT is the coercion TyCon associated with the newtype -- -- The call (wrapNewTypeBody T [a] e) returns the -- body of the wrapper, namely @@ -887,7 +887,7 @@ wrapNewTypeBody tycon args result_expr -- When unwrapping, we do *not* apply any family coercion, because this will -- be done via a CoPat by the type checker. We have to do it this way as -- computing the right type arguments for the coercion requires more than just --- a spliting operation (cf, TcPat.tcConPat). +-- a splitting operation (cf, TcPat.tcConPat). unwrapNewTypeBody :: TyCon -> [Type] -> CoreExpr -> CoreExpr unwrapNewTypeBody tycon args result_expr @@ -992,7 +992,7 @@ mkFCallId dflags uniq fcall ty strict_sig = mkClosedStrictSig (replicate arity topDmd) topRes -- the call does not claim to be strict in its arguments, since they - -- may be lifted (foreign import prim) and the called code doen't + -- may be lifted (foreign import prim) and the called code doesn't -- necessarily force them. See Trac #11076. {- ************************************************************************ @@ -1415,7 +1415,7 @@ and Note [Left folds via right fold]) it was determined that it would be useful if library authors could explicitly tell the compiler that a certain lambda is called at most once. The oneShot function allows that. -Like most magic functions it has a compulsary unfolding, so there is no need +Like most magic functions it has a compulsory unfolding, so there is no need for a real definition somewhere. We have one in GHC.Magic for the convenience of putting the documentation there. @@ -1438,7 +1438,7 @@ Note [magicDictId magic] ~~~~~~~~~~~~~~~~~~~~~~~~~ The identifier `magicDict` is just a place-holder, which is used to -implement a primitve that we cannot define in Haskell but we can write +implement a primitive that we cannot define in Haskell but we can write in Core. It is declared with a place-holder type: magicDict :: forall a. a diff --git a/compiler/Eta/BasicTypes/Module.hs b/compiler/Eta/BasicTypes/Module.hs index 29edc275..8d2847c9 100644 --- a/compiler/Eta/BasicTypes/Module.hs +++ b/compiler/Eta/BasicTypes/Module.hs @@ -1173,7 +1173,7 @@ To be on the safe side and not pessimize ModuleEnv uses nondeterministic ordering on Module and normalizes by doing the lexicographic sort when turning the env to a list. See Note [Unique Determinism] for more information about the source of -nondeterminism and and Note [Deterministic UniqFM] for explanation of why +nondeterminism and Note [Deterministic UniqFM] for explanation of why it matters for maps. -} diff --git a/compiler/Eta/BasicTypes/Name.hs b/compiler/Eta/BasicTypes/Name.hs index 1c5c7223..29f77981 100644 --- a/compiler/Eta/BasicTypes/Name.hs +++ b/compiler/Eta/BasicTypes/Name.hs @@ -98,7 +98,7 @@ import Data.Data ************************************************************************ -} --- | A unique, unambigious name for something, containing information about where +-- | A unique, unambiguous name for something, containing information about where -- that thing originated. data Name = Name { n_sort :: NameSort, -- What sort of name it is diff --git a/compiler/Eta/BasicTypes/NameEnv.hs b/compiler/Eta/BasicTypes/NameEnv.hs index 6a4aee62..87d76212 100644 --- a/compiler/Eta/BasicTypes/NameEnv.hs +++ b/compiler/Eta/BasicTypes/NameEnv.hs @@ -43,7 +43,7 @@ depAnal :: (node -> [Name]) -- Defs -> (node -> [Name]) -- Uses -> [node] -> [SCC node] --- Peform dependency analysis on a group of definitions, +-- Perform dependency analysis on a group of definitions, -- where each definition may define more than one Name -- -- The get_defs and get_uses functions are called only once per node diff --git a/compiler/Eta/BasicTypes/OccName.hs b/compiler/Eta/BasicTypes/OccName.hs index e7a5a4a0..5bca3951 100644 --- a/compiler/Eta/BasicTypes/OccName.hs +++ b/compiler/Eta/BasicTypes/OccName.hs @@ -564,12 +564,12 @@ a user-written type or function name $f... Dict-fun identifiers (from inst decls) $dmop Default method for 'op' $pnC n'th superclass selector for class C - $wf Worker for functtoin 'f' + $wf Worker for function 'f' $sf.. Specialised version of f T:C Tycon for dictionary for class C D:C Data constructor for dictionary for class C NTCo:T Coercion connecting newtype T with its representation type - TFCo:R Coercion connecting a data family to its respresentation type R + TFCo:R Coercion connecting a data family to its representation type R In encoded form these appear as Zdfxxx etc @@ -851,7 +851,7 @@ tidyOccName env occ@(OccName occ_sp fs) find !k !n = case lookupUFM env new_fs of Just {} -> find (k+1 :: Int) (n+k) - -- By using n+k, the n arguemt to find goes + -- By using n+k, the n argument to find goes -- 1, add 1, add 2, add 3, etc which -- moves at quadratic speed through a dense patch @@ -859,7 +859,7 @@ tidyOccName env occ@(OccName occ_sp fs) where new_fs = mkFastString (base ++ show n) new_env = addToUFM (addToUFM env new_fs 1) base1 (n+1) - -- Update: base_fs, so that next time we'll start whwere we left off + -- Update: base_fs, so that next time we'll start where we left off -- new_fs, so that we know it is taken -- If they are the same (n==1), the former wins -- See Note [TidyOccEnv] diff --git a/compiler/Eta/BasicTypes/PatSyn.hs b/compiler/Eta/BasicTypes/PatSyn.hs index 46a6bf82..701f7a4e 100644 --- a/compiler/Eta/BasicTypes/PatSyn.hs +++ b/compiler/Eta/BasicTypes/PatSyn.hs @@ -56,7 +56,7 @@ data PatSyn psArity :: Arity, -- == length psArgs psInfix :: Bool, -- True <=> declared infix - psUnivTyVars :: [TyVar], -- Universially-quantified type variables + psUnivTyVars :: [TyVar], -- Universally-quantified type variables psReqTheta :: ThetaType, -- Required dictionaries psExTyVars :: [TyVar], -- Existentially-quantified type vars psProvTheta :: ThetaType, -- Provided dictionaries @@ -237,7 +237,7 @@ instance Data.Data PatSyn where -- | Build a new pattern synonym mkPatSyn :: Name -> Bool -- ^ Is the pattern synonym declared infix? - -> ([TyVar], ThetaType) -- ^ Universially-quantified type variables + -> ([TyVar], ThetaType) -- ^ Universally-quantified type variables -- and required dicts -> ([TyVar], ThetaType) -- ^ Existentially-quantified type variables -- and provided dicts diff --git a/compiler/Eta/BasicTypes/RdrName.hs b/compiler/Eta/BasicTypes/RdrName.hs index eb88f7a4..75ec2a2b 100644 --- a/compiler/Eta/BasicTypes/RdrName.hs +++ b/compiler/Eta/BasicTypes/RdrName.hs @@ -594,14 +594,14 @@ isLocalGRE (GRE {gre_prov = LocalDef}) = True isLocalGRE (GRE {gre_prov = Imported is}) = and $ map (is_java . is_decl) is unQualOK :: GlobalRdrElt -> Bool --- ^ Test if an unqualifed version of this thing would be in scope +-- ^ Test if an unqualified version of this thing would be in scope unQualOK (GRE {gre_prov = LocalDef}) = True unQualOK (GRE {gre_prov = Imported is}) = any unQualSpecOK is pickGREs :: RdrName -> [GlobalRdrElt] -> [GlobalRdrElt] -- ^ Take a list of GREs which have the right OccName -- Pick those GREs that are suitable for this RdrName --- And for those, keep only only the Provenances that are suitable +-- And for those, keep only the Provenances that are suitable -- Only used for Qual and Unqual, not Orig or Exact -- -- Consider: diff --git a/compiler/Eta/BasicTypes/SrcLoc.hs b/compiler/Eta/BasicTypes/SrcLoc.hs index f966f814..a65643ff 100644 --- a/compiler/Eta/BasicTypes/SrcLoc.hs +++ b/compiler/Eta/BasicTypes/SrcLoc.hs @@ -8,7 +8,7 @@ {-# LANGUAGE FlexibleInstances #-} {-# OPTIONS_GHC -fno-omit-interface-pragmas #-} -- Workaround for Trac #5252 crashes the bootstrap compiler without -O - -- When the earliest compiler we want to boostrap with is + -- When the earliest compiler we want to bootstrap with is -- GHC 7.2, we can make RealSrcLoc properly abstract -- | This module contains types that relate to the positions of things diff --git a/compiler/Eta/BasicTypes/Unique.hs b/compiler/Eta/BasicTypes/Unique.hs index 9cba4361..9ffa26b6 100644 --- a/compiler/Eta/BasicTypes/Unique.hs +++ b/compiler/Eta/BasicTypes/Unique.hs @@ -23,7 +23,7 @@ module Eta.BasicTypes.Unique ( Unique, Uniquable(..), uNIQUE_BITS, - -- ** Constructors, desctructors and operations on 'Unique's + -- ** Constructors, destructors and operations on 'Unique's hasKey, pprUniqueAlways, diff --git a/compiler/Eta/BasicTypes/VarSet.hs b/compiler/Eta/BasicTypes/VarSet.hs index fc1955ca..d893f26c 100644 --- a/compiler/Eta/BasicTypes/VarSet.hs +++ b/compiler/Eta/BasicTypes/VarSet.hs @@ -317,7 +317,7 @@ seqDVarSet s = sizeDVarSet s `seq` () extendDVarSetList :: DVarSet -> [Var] -> DVarSet extendDVarSetList = addListToUniqDSet --- | Convert a DVarSet to a VarSet by forgeting the order of insertion +-- | Convert a DVarSet to a VarSet by forgetting the order of insertion dVarSetToVarSet :: DVarSet -> VarSet dVarSetToVarSet = unsafeUFMToUniqSet . udfmToUfm diff --git a/compiler/Eta/CodeGen/Foreign.hs b/compiler/Eta/CodeGen/Foreign.hs index 9f5ebffc..c68f1b8e 100644 --- a/compiler/Eta/CodeGen/Foreign.hs +++ b/compiler/Eta/CodeGen/Foreign.hs @@ -61,7 +61,7 @@ cgForeignCall (CCall (CCallSpec target _cconv safety)) args resType argFtCodes <- getNonVoidArgFtCodes shuffledArgs let (argFts, callArgs') = unzip argFtCodes argFtCodes' = if hasObj && isStatic then drop 1 argFtCodes else argFtCodes - mbObj = if hasObj then Just (expectHead "cgForiegnCall: empty callArgs'" + mbObj = if hasObj then Just (expectHead "cgForeignCall: empty callArgs'" callArgs') else Nothing mbObjFt = safeHead argFts case sequel of diff --git a/compiler/Eta/Core/CoreArity.hs b/compiler/Eta/Core/CoreArity.hs index b7f713a7..1897dce0 100644 --- a/compiler/Eta/Core/CoreArity.hs +++ b/compiler/Eta/Core/CoreArity.hs @@ -257,7 +257,7 @@ Or, to put it another way there is no work lost in duplicating the partial application (e x1 .. x(n-1)) -In the divegent case, no work is lost by duplicating because if the thing +In the divergent case, no work is lost by duplicating because if the thing is evaluated once, that's the end of the program. Or, to put it another way, in any context C @@ -307,7 +307,7 @@ do so; it improves some programs significantly, and increasing convergence isn't a bad thing. Hence the ABot/ATop in ArityType. So these two transformations aren't always the Right Thing, and we -have several tickets reporting unexpected bahaviour resulting from +have several tickets reporting unexpected behaviour resulting from this transformation. So we try to limit it as much as possible: (1) Do NOT move a lambda outside a known-bottom case expression @@ -346,7 +346,7 @@ we want to get: coerce T (\x::[T] -> (coerce ([T]->Int) e) x) HOWEVER, note that if you use coerce bogusly you can ge coerce Int negate And since negate has arity 2, you might try to eta expand. But you can't - decopose Int to a function type. Hence the final case in eta_expand. + decompose Int to a function type. Hence the final case in eta_expand. Note [The state-transformer hack] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -387,7 +387,7 @@ Extrude the g2 f' = \p. \s. ((error "...") |> g1) s f = f' |> (String -> g2) -Discard args for bottomming function +Discard args for bottoming function f' = \p. \s. ((error "...") |> g1 |> g3 g3 :: (S -> (S,T)) ~ (S,T) @@ -442,7 +442,7 @@ ArityType 'at', then assuming the calls of f respect the one-shot-ness of of its definition. - NB 'f' is an arbitary expression, eg (f = g e1 e2). This 'f' + NB 'f' is an arbitrary expression, eg (f = g e1 e2). This 'f' can have ArityType as ATop, with length as > 0, only if e1 e2 are themselves. @@ -674,7 +674,7 @@ Consider False -> \s(one-shot). e1 in go2 x We *really* want to eta-expand go and go2. -When combining the barnches of the case we have +When combining the branches of the case we have ATop [] `andAT` ATop [OneShotLam] and we want to get ATop [OneShotLam]. But if the inner lambda wasn't one-shot we don't want to do this. @@ -749,7 +749,7 @@ arityType env (Case scrut _ _ alts) | otherwise = case alts_type of ABot n | n>0 -> ATop [] -- Don't eta expand - | otherwise -> ABot 0 -- if RHS is bottomming + | otherwise -> ABot 0 -- if RHS is bottoming -- See Note [Dealing with bottom (2)] ATop as | not (ae_ped_bot env) -- See Note [Dealing with bottom (3)] @@ -814,7 +814,7 @@ returns a CoreExpr satisfying the same invariant. See Note [Eta expansion and the CorePrep invariants] in CorePrep. This means the eta-expander has to do a bit of on-the-fly -simplification but it's not too hard. The alernative, of relying on +simplification but it's not too hard. The alternative, of relying on a subsequent clean-up phase of the Simplifier to de-crapify the result, means you can't really use it in CorePrep, which is painful. @@ -995,7 +995,7 @@ mkEtaWW orig_n orig_expr in_scope orig_ty -- but its type isn't a function. = WARN( True, (ppr orig_n <+> ppr orig_ty) $$ ppr orig_expr ) (getTvInScope subst, reverse eis) - -- This *can* legitmately happen: + -- This *can* legitimately happen: -- e.g. coerce Int (\x. x) Essentially the programmer is -- playing fast and loose with types (Happy does this a lot). -- So we simply decline to eta-expand. Otherwise we'd end up diff --git a/compiler/Eta/Core/CoreFVs.hs b/compiler/Eta/Core/CoreFVs.hs index bf31b138..41013415 100644 --- a/compiler/Eta/Core/CoreFVs.hs +++ b/compiler/Eta/Core/CoreFVs.hs @@ -140,7 +140,7 @@ exprsSomeFreeVars :: InterestingVarFun -- Says which 'Var's are interesting exprsSomeFreeVars fv_cand es = fvVarSet $ filterFV fv_cand $ foldr (unionFV . expr_fvs) emptyFV es --- Comment about obselete code +-- Comment about obsolete code -- We used to gather the free variables the RULES at a variable occurrence -- with the following cryptic comment: -- "At a variable occurrence, add in any free variables of its rule rhss @@ -175,7 +175,7 @@ someVars vars = foldr (unionFV . oneVar) emptyFV vars addBndr :: CoreBndr -> FV -> FV addBndr bndr fv fv_cand in_scope acc = (varTypeTyFVs bndr `unionFV` - -- Include type varibles in the binder's type + -- Include type variables in the binder's type -- (not just Ids; coercion variables too!) FV.delFV bndr fv) fv_cand in_scope acc diff --git a/compiler/Eta/Core/CoreLint.hs b/compiler/Eta/Core/CoreLint.hs index cfccfb52..be891b99 100644 --- a/compiler/Eta/Core/CoreLint.hs +++ b/compiler/Eta/Core/CoreLint.hs @@ -115,7 +115,7 @@ That is, use a type let. See Note [Type let] in CoreSyn. However, when linting we need to remember that a=Int, else we might reject a correct program. So we carry a type substitution (in this example -[a -> Int]) and apply this substitution before comparing types. The functin +[a -> Int]) and apply this substitution before comparing types. The function lintInTy :: Type -> LintM Type returns a substituted type; that's the only reason it returns anything. @@ -133,7 +133,7 @@ find an occurrence of an Id, we fetch it from the in-scope set. ************************************************************************ These functions are not CoreM monad stuff, but they probably ought to -be, and it makes a conveneint place. place for them. They print out +be, and it makes a convenient place. place for them. They print out stuff before and after core passes, and do Core Lint when necessary. -} @@ -355,7 +355,7 @@ lintCoreBindings pass local_in_scope binds (_, dups) = removeDups compare binders -- dups_ext checks for names with different uniques - -- but but the same External name M.n. We don't + -- but the same External name M.n. We don't -- allow this at top level: -- M.n{r3} = ... -- M.n{r29} = ... @@ -793,7 +793,7 @@ checkCaseAlts :: CoreExpr -> OutType -> [CoreAlt] -> LintM () -- b2) Check that the others are in increasing order -- c) Check that there's a default for infinite types -- NB: Algebraic cases are not necessarily exhaustive, because --- the simplifer correctly eliminates case that can't +-- the simplifier correctly eliminates case that can't -- possibly match. checkCaseAlts e ty alts = @@ -1506,7 +1506,7 @@ checkInScope loc_msg var = checkTys :: OutType -> OutType -> MsgDoc -> LintM () -- check ty2 is subtype of ty1 (ie, has same structure but usage -- annotations need only be consistent, not equal) --- Assumes ty1,ty2 are have alrady had the substitution applied +-- Assumes ty1,ty2 are have already had the substitution applied checkTys ty1 ty2 msg = checkL (ty1 `eqType` ty2) msg checkRole :: Coercion @@ -1588,7 +1588,7 @@ mkScrutMsg var var_ty scrut_ty subst mkNonDefltMsg, mkNonIncreasingAltsMsg :: CoreExpr -> MsgDoc mkNonDefltMsg e - = hang (text "Case expression with DEFAULT not at the beginnning") 4 (ppr e) + = hang (text "Case expression with DEFAULT not at the beginning") 4 (ppr e) mkNonIncreasingAltsMsg e = hang (text "Case expression with badly-ordered alternatives") 4 (ppr e) diff --git a/compiler/Eta/Core/CoreSubst.hs b/compiler/Eta/Core/CoreSubst.hs index eedb4cbf..4db1c986 100644 --- a/compiler/Eta/Core/CoreSubst.hs +++ b/compiler/Eta/Core/CoreSubst.hs @@ -200,7 +200,7 @@ never modified during substitution. Rather: of Ids gis_del that must *not* be looked up in the gbl envt. All this is needed to support SimplEnv.substExpr, which starts off -with a SimplIdSubst, which provides the ambient subsitution. +with a SimplIdSubst, which provides the ambient substitution. -} -- | An environment for substituting for 'Id's @@ -825,11 +825,11 @@ substTickish _subst other = other {- Note [Substitute lazily] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The functions that substitute over IdInfo must be pretty lazy, becuause +The functions that substitute over IdInfo must be pretty lazy, because they are knot-tied by substRecBndrs. One case in point was Trac #10627 in which a rule for a function 'f' -referred to 'f' (at a differnet type) on the RHS. But instead of just +referred to 'f' (at a different type) on the RHS. But instead of just substituting in the rhs of the rule, we were calling simpleOptExpr, which looked at the idInfo for 'f'; result <>. @@ -856,13 +856,13 @@ Breakpoints can't handle free variables with unlifted types anyway. {- Note [Worker inlining] ~~~~~~~~~~~~~~~~~~~~~~ -A worker can get sustituted away entirely. +A worker can get substituted away entirely. - it might be trivial - it might simply be very small We do not treat an InlWrapper as an 'occurrence' in the occurrence analyser, so it's possible that the worker is not even in scope any more. -In all all these cases we simply drop the special case, returning to +In all these cases we simply drop the special case, returning to InlVanilla. The WARN is just so I can see if it happens a lot. @@ -872,7 +872,7 @@ InlVanilla. The WARN is just so I can see if it happens a lot. * * ************************************************************************ -Note [Optimise coercion boxes agressively] +Note [Optimise coercion boxes aggressively] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The simple expression optimiser needs to deal with Eq# boxes as follows: @@ -893,7 +893,7 @@ We do this for two reasons: 2. The test T4356 fails Lint because it creates a coercion between types of kind (* -> * -> *) and (?? -> ? -> *), which differ. If we do this - inlining agressively we can collapse away the intermediate coercion between + inlining aggressively we can collapse away the intermediate coercion between these two types and hence pass Lint again. (This is a sort of a hack.) In fact, our implementation uses slightly liberalised versions of the second rule @@ -931,7 +931,7 @@ simpleOptExpr :: CoreExpr -> CoreExpr -- or where the RHS is trivial -- -- We also inline bindings that bind a Eq# box: see --- See Note [Optimise coercion boxes agressively]. +-- See Note [Optimise coercion boxes aggressively]. -- -- The result is NOT guaranteed occurrence-analysed, because -- in (let x = y in ....) we substitute for x; so y's occ-info @@ -1008,7 +1008,7 @@ simple_opt_expr subst expr go lam@(Lam {}) = go_lam [] subst lam go (Case e b ty as) - -- See Note [Optimise coercion boxes agressively] + -- See Note [Optimise coercion boxes aggressively] | isDeadBinder b , Just (con, _tys, es) <- exprIsConApp_maybe in_scope_env e' , Just (altcon, bs, rhs) <- findAlt (DataAlt con) as @@ -1139,7 +1139,7 @@ maybe_substitute subst b r | (Var fun, args) <- collectArgs r , Just dc <- isDataConWorkId_maybe fun , dc `hasKey` eqBoxDataConKey || dc `hasKey` coercibleDataConKey - , all exprIsTrivial args = True -- See Note [Optimise coercion boxes agressively] + , all exprIsTrivial args = True -- See Note [Optimise coercion boxes aggressively] | otherwise = False ---------------------- @@ -1433,7 +1433,7 @@ to compute the type arguments to the dictionary constructor. Note [DFun arity check] ~~~~~~~~~~~~~~~~~~~~~~~ -Here we check that the total number of supplied arguments (inclding +Here we check that the total number of supplied arguments (including type args) matches what the dfun is expecting. This may be *less* than the ordinary arity of the dfun: see Note [DFun unfoldings] in CoreSyn -} diff --git a/compiler/Eta/Core/CoreSyn.hs b/compiler/Eta/Core/CoreSyn.hs index 3428951e..dd15294e 100644 --- a/compiler/Eta/Core/CoreSyn.hs +++ b/compiler/Eta/Core/CoreSyn.hs @@ -405,7 +405,7 @@ Here's another example: Since T has no data constructors, the case alternatives are of course empty. However note that 'x' is not bound to a visibly-bottom value; it's the *type* that tells us it's going to diverge. Its a bit of a -degnerate situation but we do NOT want to replace +degenerate situation but we do NOT want to replace case x of Bool {} --> error Bool "Inaccessible case" because x might raise an exception, and *that*'s what we want to see! (Trac #6067 is an example.) To preserve semantics we'd have to say @@ -495,7 +495,7 @@ data Tickish id = -- valid. Note that it is still undesirable though, as this reduces -- their usefulness for debugging and profiling. Therefore we will -- generally try only to make use of this property where it is - -- neccessary to enable optimizations. + -- necessary to enable optimizations. | SourceNote { sourceSpan :: RealSrcSpan -- ^ Source covered , sourceName :: String -- ^ Name for source location @@ -554,7 +554,7 @@ data TickishScoping = -- ==> -- tick<...> case foo of x -> bar -- - -- While this is always leagl, we want to make a best effort to + -- While this is always legal, we want to make a best effort to -- only make us of this where it exposes transformation -- opportunities. | SoftScope @@ -738,7 +738,7 @@ notOrphan _ = False chooseOrphanAnchor :: NameSet -> IsOrphan -- Something (rule, instance) is relate to all the Names in this -- list. Choose one of them to be an "anchor" for the orphan. We make --- the choice deterministic to avoid gratuitious changes in the ABI +-- the choice deterministic to avoid gratuitous changes in the ABI -- hash (Trac #4012). Specifically, use lexicographic comparison of -- OccName rather than comparing Uniques -- @@ -773,8 +773,8 @@ its left hand side mentions nothing defined in this module. Orphan-hood has two major consequences * A module that contains orphans is called an "orphan module". If - the module being compiled depends (transitively) on an oprhan - module M, then M.hi is read in regardless of whether M is oherwise + the module being compiled depends (transitively) on an orphan + module M, then M.hi is read in regardless of whether M is otherwise needed. This is to ensure that we don't miss any instance decls in M. But it's painful, because it means we need to keep track of all the orphan modules below us. @@ -784,9 +784,9 @@ has two major consequences mentions on the LHS. For example data T = T1 | T2 instance Eq T where .... - The instance (Eq T) is incorprated as part of T's fingerprint. + The instance (Eq T) is incorporated as part of T's fingerprint. - In constrast, orphans are all fingerprinted together in the + In contrast, orphans are all fingerprinted together in the mi_orph_hash field of the ModIface. See MkIface.addFingerprints. @@ -1170,7 +1170,7 @@ unfoldingTemplate :: Unfolding -> CoreExpr unfoldingTemplate = uf_tmpl -- | Retrieves the template of an unfolding if possible --- maybeUnfoldingTemplate is used mainly wnen specialising, and we do +-- maybeUnfoldingTemplate is used mainly when specialising, and we do -- want to specialise DFuns, so it's important to return a template -- for DFunUnfoldings maybeUnfoldingTemplate :: Unfolding -> Maybe CoreExpr @@ -1305,7 +1305,7 @@ In unfoldings and rules, we guarantee that the template is occ-analysed, so that the occurrence info on the binders is correct. This is important, because the Simplifier does not re-analyse the template when using it. If the occurrence info is wrong - - We may get more simpifier iterations than necessary, because + - We may get more simplifier iterations than necessary, because once-occ info isn't there - More seriously, we may get an infinite loop if there's a Rec without a loop breaker marked @@ -1635,7 +1635,7 @@ collectNBinders orig_n orig_expr go _ _ _ = pprPanic "collectNBinders" $ int orig_n --- | Takes a nested application expression and returns the the function +-- | Takes a nested application expression and returns the function -- being applied and the arguments to which it is applied collectArgs :: Expr b -> (Expr b, [Arg b]) collectArgs expr @@ -1738,7 +1738,7 @@ data AnnBind bndr annot = AnnNonRec bndr (AnnExpr bndr annot) | AnnRec [(bndr, AnnExpr bndr annot)] --- | Takes a nested application expression and returns the the function +-- | Takes a nested application expression and returns the function -- being applied and the arguments to which it is applied collectAnnArgs :: AnnExpr b a -> (AnnExpr b a, [AnnExpr b a]) collectAnnArgs expr diff --git a/compiler/Eta/Core/CoreTidy.hs b/compiler/Eta/Core/CoreTidy.hs index d11eb999..e8015937 100644 --- a/compiler/Eta/Core/CoreTidy.hs +++ b/compiler/Eta/Core/CoreTidy.hs @@ -189,7 +189,7 @@ tidyLetBndr rec_tidy_env env@(tidy_env, var_env) (id,rhs) -- -- Similarly arity info for eta expansion in CorePrep -- - -- Set inline-prag info so that we preseve it across + -- Set inline-prag info so that we preserve it across -- separate compilation boundaries old_info = idInfo id new_info = vanillaIdInfo diff --git a/compiler/Eta/Core/CoreUnfold.hs b/compiler/Eta/Core/CoreUnfold.hs index 28848818..0b1540e6 100644 --- a/compiler/Eta/Core/CoreUnfold.hs +++ b/compiler/Eta/Core/CoreUnfold.hs @@ -97,7 +97,7 @@ mkDFunUnfolding bndrs con ops = DFunUnfolding { df_bndrs = bndrs , df_con = con , df_args = map occurAnalyseExpr ops } - -- See Note [Occurrrence analysis of unfoldings] + -- See Note [Occurrence analysis of unfoldings] mkWwInlineRule :: CoreExpr -> Arity -> Unfolding mkWwInlineRule expr arity @@ -220,7 +220,7 @@ mkCoreUnfolding :: UnfoldingSource -> Bool -> CoreExpr -- Occurrence-analyses the expression before capturing it mkCoreUnfolding src top_lvl expr guidance = CoreUnfolding { uf_tmpl = occurAnalyseExpr expr, - -- See Note [Occurrrence analysis of unfoldings] + -- See Note [Occurrence analysis of unfoldings] uf_src = src, uf_is_top = top_lvl, uf_is_value = exprIsHNF expr, @@ -239,7 +239,7 @@ mkUnfolding dflags src top_lvl is_bottoming expr = NoUnfolding -- See Note [Do not inline top-level bottoming functions] | otherwise = CoreUnfolding { uf_tmpl = occurAnalyseExpr expr, - -- See Note [Occurrrence analysis of unfoldings] + -- See Note [Occurrence analysis of unfoldings] uf_src = src, uf_is_top = top_lvl, uf_is_value = exprIsHNF expr, @@ -269,7 +269,7 @@ the unfolding in question was a DFun unfolding. But more generally, the simplifier is designed on the basis that it is looking at occurrence-analysed expressions, so better -ensure that they acutally are. +ensure that they actually are. Note [Calculate unfolding guidance on the non-occ-anal'd expression] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -397,7 +397,7 @@ are very cheap, because exposing them to a caller is so valuable. [25/5/11] All sizes are now multiplied by 10, except for primops (which have sizes like 1 or 4. This makes primops look fantastically -cheap, and seems to be almost unversally beneficial. Done partly as a +cheap, and seems to be almost universally beneficial. Done partly as a result of #4978. Note [Do not inline top-level bottoming functions] @@ -597,9 +597,9 @@ sizeExpr dflags bOMB_OUT_SIZE top_args expr -- Don't charge for args, so that wrappers look cheap -- (See comments about wrappers with Case) -- - -- IMPORATANT: *do* charge 1 for the alternative, else we + -- IMPORTANT: *do* charge 1 for the alternative, else we -- find that giant case nests are treated as practically free - -- A good example is Foreign.C.Error.errrnoToIOError + -- A good example is Foreign.C.Error.errnoToIOError ------------ -- These addSize things have to be here because @@ -715,7 +715,7 @@ of extra incentive we give a discount of 10*(1 + n_val_args). Simon M tried a MUCH bigger discount: (10 * (10 + n_val_args)), and said it was an "unambiguous win", but its terribly dangerous -because a fuction with many many case branches, each finishing with +because a function with many many case branches, each finishing with a constructor, can have an arbitrarily large discount. This led to terrible code bloat: see Trac #6099. @@ -761,7 +761,7 @@ Conclusion: Note [Literal integer size] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Literal integers *can* be big (mkInteger [...coefficients...]), but -need not be (S# n). We just use an aribitrary big-ish constant here +need not be (S# n). We just use an arbitrary big-ish constant here so that, in particular, we don't inline top-level defns like n = S# 5 There's no point in doing so -- any optimisations will see the S# @@ -783,7 +783,7 @@ buildSize :: ExprSize buildSize = SizeIs (_ILIT(0)) emptyBag (_ILIT(40)) -- We really want to inline applications of build -- build t (\cn -> e) should cost only the cost of e (because build will be inlined later) - -- Indeed, we should add a result_discount becuause build is + -- Indeed, we should add a result_discount because build is -- very like a constructor. We don't bother to check that the -- build is saturated (it usually is). The "-2" discounts for the \c n, -- The "4" is rather arbitrary. @@ -810,7 +810,7 @@ binary sizes shrink significantly either. Note [Discounts and thresholds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Constants for discounts and thesholds are defined in main/DynFlags, +Constants for discounts and thresholds are defined in main/DynFlags, all of form ufXxxx. They are: ufCreationThreshold @@ -981,7 +981,7 @@ StrictAnal.addStrictnessInfoToTopId callSiteInline :: DynFlags -> Id -- The Id -> Bool -- True <=> unfolding is active - -> Bool -- True if there are are no arguments at all (incl type args) + -> Bool -- True if there are no arguments at all (incl type args) -> [ArgSummary] -- One for each value arg; True if it is interesting -> CallCtxt -- True <=> continuation is interesting -> Maybe CoreExpr -- Unfolding, if any @@ -1003,7 +1003,7 @@ nonTriv _ = True data CallCtxt = BoringCtxt | RhsCtxt -- Rhs of a let-binding; see Note [RHS of lets] - | DiscArgCtxt -- Argument of a fuction with non-zero arg discount + | DiscArgCtxt -- Argument of a function with non-zero arg discount | RuleArgCtxt -- We are somewhere in the argument of a function with rules | ValAppCtxt -- We're applied to at least one value arg @@ -1235,7 +1235,7 @@ CONLIKE thing (modulo lets). Note [Lone variables] See also Note [Interaction of exprIsWorkFree and lone variables] ~~~~~~~~~~~~~~~~~~~~~ which appears below The "lone-variable" case is important. I spent ages messing about -with unsatisfactory varaints, but this is nice. The idea is that if a +with unsatisfactory variants, but this is nice. The idea is that if a variable appears all alone as an arg of lazy fn, or rhs BoringCtxt @@ -1287,7 +1287,7 @@ However, watch out: case $fMonadST @ RealWorld of { :DMonad a b c -> c } We had better inline that sucker! The case won't see through it. - For now, I'm treating treating a variable applied to types + For now, I'm treating a variable applied to types in a *lazy* context "lone". The motivating example was f = /\a. \x. BIG g = /\a. \y. h (f a) @@ -1364,6 +1364,6 @@ computeDiscount dflags arg_discounts res_discount arg_infos cont_info -- constructors; but we only want to invoke that large discount -- when there's a case continuation. -- Otherwise we, rather arbitrarily, threshold it. Yuk. - -- But we want to aovid inlining large functions that return + -- But we want to avoid inlining large functions that return -- constructors into contexts that are simply "interesting" diff --git a/compiler/Eta/Core/CoreUtils.hs b/compiler/Eta/Core/CoreUtils.hs index 3f18ba4a..9f4383df 100644 --- a/compiler/Eta/Core/CoreUtils.hs +++ b/compiler/Eta/Core/CoreUtils.hs @@ -8,7 +8,7 @@ Utility functions on @Core@ syntax {-# LANGUAGE CPP #-} --- | Commonly useful utilites for manipulating the Core language +-- | Commonly useful utilities for manipulating the Core language module Eta.Core.CoreUtils ( -- * Constructing expressions mkCast, @@ -132,7 +132,7 @@ coreAltsType [] = panic "corAltsType" Note [Type bindings] ~~~~~~~~~~~~~~~~~~~~ Core does allow type bindings, although such bindings are -not much used, except in the output of the desuguarer. +not much used, except in the output of the desugarer. Example: let a = Int in (\x:a. x) Given this, exprType must be careful to substitute 'a' in the @@ -352,7 +352,7 @@ stripTicksTop p = go [] go ts other = (reverse ts, other) -- | Strip ticks satisfying a predicate from top of an expression, --- returning the remaining expresion +-- returning the remaining expression stripTicksTopE :: (Tickish Id -> Bool) -> Expr b -> Expr b stripTicksTopE p = go where go (Tick t e) | p t = go e @@ -475,7 +475,7 @@ isDefaultAlt _ = False -- | Find the case alternative corresponding to a particular -- constructor: panics if no such constructor exists findAlt :: AltCon -> [(AltCon, a, b)] -> Maybe (AltCon, a, b) - -- A "Nothing" result *is* legitmiate + -- A "Nothing" result *is* legitimate -- See Note [Unreachable code] findAlt con alts = case alts of @@ -510,7 +510,7 @@ trimConArgs :: AltCon -> [CoreArg] -> [CoreArg] -- > C b x y -> ... -- -- We want to drop the leading type argument of the scrutinee --- leaving the arguments to match agains the pattern +-- leaving the arguments to match against the pattern trimConArgs DEFAULT args = ASSERT( null args ) [] trimConArgs (LitAlt _) args = ASSERT( null args ) [] @@ -613,7 +613,7 @@ that cannot match. For example: Suppose that for some silly reason, x isn't substituted in the case expression. (Perhaps there's a NOINLINE on it, or profiling SCC stuff -gets in the way; cf Trac #3118.) Then the full-lazines pass might produce +gets in the way; cf Trac #3118.) Then the full-laziness pass might produce this x = Red @@ -924,7 +924,7 @@ exprIsCheap' good_app other_expr -- Applications and variables go (Var _) [] = True -- Just a type application of a variable -- (f t1 t2 t3) counts as WHNF - -- This case is probably handeld by the good_app case + -- This case is probably handled by the good_app case -- below, which should have a case for n=0, but putting -- it here too is belt and braces; and it's such a common -- case that checking for null directly seems like a @@ -1137,7 +1137,7 @@ app_ok primop_ok fun args ----------------------------- altsAreExhaustive :: [Alt b] -> Bool --- True <=> the case alternatives are definiely exhaustive +-- True <=> the case alternatives are definitely exhaustive -- False <=> they may or may not be altsAreExhaustive [] = False -- Should not happen @@ -1367,7 +1367,7 @@ dataConInstPat :: [FastString] -- A long enough list of FSs to use for -- -- ex_tvs are intended to be used as binders for existential type args -- --- arg_ids are indended to be used as binders for value arguments, +-- arg_ids are intended to be used as binders for value arguments, -- and their types have been instantiated with inst_tys and ex_tys -- The arg_ids include both evidence and -- programmer-specified arguments (both after rep-ing) @@ -1406,7 +1406,7 @@ dataConInstPat fss uniqs con inst_tys -- Make the instantiating substitution for universals univ_subst = zipOpenTvSubst univ_tvs inst_tys - -- Make existential type variables, applyingn and extending the substitution + -- Make existential type variables, applying and extending the substitution (full_subst, ex_bndrs) = mapAccumL mk_ex_var univ_subst (zip3 ex_tvs ex_fss ex_uniqs) @@ -1590,7 +1590,7 @@ diffExpr _ _ e1 e2 -- all possible mappings, which would be seriously expensive. So -- instead we simply match single bindings as far as we can. This -- leaves us just with mutually recursive and/or mismatching bindings, --- which we then specuatively match by ordering them. It's by no means +-- which we then speculatively match by ordering them. It's by no means -- perfect, but gets the job done well enough. diffBinds :: Bool -> RnEnv2 -> [(Var, CoreExpr)] -> [(Var, CoreExpr)] -> ([SDoc], RnEnv2) @@ -1828,7 +1828,7 @@ There are some particularly delicate points here: says f=bottom, and replaces the (f `seq` True) with just (f `cast` unsafe-co). BUT, as thing stand, 'f' got arity 1, and it *keeps* arity 1 (perhaps also wrongly). So CorePrep eta-expands - the definition again, so that it does not termninate after all. + the definition again, so that it does not terminate after all. Result: seg-fault because the boolean case actually gets a function value. See Trac #1947. @@ -2029,7 +2029,7 @@ rhsIsStatic :: Platform -- -- (ii) We treat partial applications as redexes, because in fact we -- make a thunk for them that runs and builds a PAP --- at run-time. The only appliations that are treated as +-- at run-time. The only applications that are treated as -- static are *saturated* applications of constructors. -- We used to try to be clever with nested structures like this: @@ -2061,7 +2061,7 @@ rhsIsStatic :: Platform -- This is a bit like CoreUtils.exprIsHNF, with the following differences: -- a) scc "foo" (\x -> ...) is updatable (so we catch the right SCC) -- --- b) (C x xs), where C is a contructor is updatable if the application is +-- b) (C x xs), where C is a constructor is updatable if the application is -- dynamic -- -- c) don't look through unfolding of f in (f x). diff --git a/compiler/Eta/Core/MkCore.hs b/compiler/Eta/Core/MkCore.hs index 831bfb4b..c55c1f9f 100644 --- a/compiler/Eta/Core/MkCore.hs +++ b/compiler/Eta/Core/MkCore.hs @@ -100,7 +100,7 @@ infixl 4 `mkCoreApp`, `mkCoreApps` sortQuantVars :: [Var] -> [Var] -- Sort the variables (KindVars, TypeVars, and Ids) -- into order: Kind, then Type, then Id --- It is a deterministic sort, meaining it doesn't look at the values of +-- It is a deterministic sort, meaning it doesn't look at the values of -- Uniques. For explanation why it's important See Note [Unique Determinism] -- in Unique. sortQuantVars = sortBy (comparing category) @@ -182,7 +182,7 @@ mk_val_app fun arg arg_ty res_ty -- game, mk_val_app returns an expression that does not have -- have a free wild-id. So the only thing that can go wrong -- is if you take apart this case expression, and pass a - -- fragmet of it as the fun part of a 'mk_val_app'. + -- fragment of it as the fun part of a 'mk_val_app'. ----------- mkWildEvBinder :: PredType -> EvVar @@ -329,7 +329,7 @@ Note [Flattening one-tuples] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This family of functions creates a tuple of variables/expressions/types. mkCoreTup [e1,e2,e3] = (e1,e2,e3) -What if there is just one variable/expression/type in the agument? +What if there is just one variable/expression/type in the argument? We could do one of two things: * Flatten it out, so that @@ -410,7 +410,7 @@ mkBigCoreTupTy = mkChunkified mkBoxedTupleTy ************************************************************************ -} --- | Builds a selector which scrutises the given +-- | Builds a selector which scrutinises the given -- expression and extracts the one name from the list given. -- If you want the no-shadowing rule to apply, the caller -- is responsible for making sure that none of these names @@ -587,7 +587,7 @@ mkFoldrExpr :: MonadThings m -> Type -- ^ Fold result type -> CoreExpr -- ^ "Cons" function expression for the fold -> CoreExpr -- ^ "Nil" expression for the fold - -> CoreExpr -- ^ List expression being folded acress + -> CoreExpr -- ^ List expression being folded across -> m CoreExpr mkFoldrExpr elt_ty result_ty c n list = do foldr_id <- lookupId foldrName diff --git a/compiler/Eta/Core/TrieMap.hs b/compiler/Eta/Core/TrieMap.hs index 60feff5a..18567b2e 100644 --- a/compiler/Eta/Core/TrieMap.hs +++ b/compiler/Eta/Core/TrieMap.hs @@ -49,7 +49,7 @@ numbered on the fly. ************************************************************************ -} -type XT a = Maybe a -> Maybe a -- How to alter a non-existent elt (Nothing) +type XT a = Maybe a -> Maybe a -- How to alter a nonexistent elt (Nothing) -- or an existing elt (Just) class TrieMap m where @@ -245,7 +245,7 @@ Note [Binders] rather than cm_lam :: TypeMap (CoreMap a) - * We don't need to look at the type of some binders, notalby + * We don't need to look at the type of some binders, notably - the case binder in (Case _ b _ _) - the binders in an alternative because they are totally fixed by the context @@ -261,7 +261,7 @@ Note [Empty case alternatives] which is utterly wrong (Trac #6097) We could compare the return type regardless, but the wildly common case -is that it's unnecesary, so we have two fields (cm_case and cm_ecase) +is that it's unnecessary, so we have two fields (cm_case and cm_ecase) for the two possibilities. Only cm_ecase looks at the type. See also Note [Empty case alternatives] in CoreSyn. diff --git a/compiler/Eta/DeSugar/Check.hs b/compiler/Eta/DeSugar/Check.hs index f903c68c..830c3ddf 100644 --- a/compiler/Eta/DeSugar/Check.hs +++ b/compiler/Eta/DeSugar/Check.hs @@ -535,7 +535,7 @@ translateGuards fam_insts guards = do -- It should have been (return all_guards) but it is too expressive. -- Since the term oracle does not handle all constraints we generate, -- we (hackily) replace all constraints the oracle cannot handle with a - -- single one (we need to know if there is a possibility of falure). + -- single one (we need to know if there is a possibility of failure). -- See Note [Guards and Approximation] for all guard-related approximations -- we implement. where @@ -710,7 +710,7 @@ pmPatType (PmGrd { pm_grd_pv = pv }) -- fresh variables of the appropriate type for arguments) mkOneConFull :: Id -> ConLike -> PmM (ValAbs, ComplexEq, Bag EvVar) -- * x :: T tys, where T is an algebraic data type --- NB: in the case of a data familiy, T is the *representation* TyCon +-- NB: in the case of a data family, T is the *representation* TyCon -- e.g. data instance T (a,b) = T1 a b -- leads to -- data TPair a b = T1 a b -- The "representation" type @@ -904,7 +904,7 @@ Generates the initial uncovered set. Term and type constraints in scope set contains only a vector of variables with the constraints in scope. * pmcheck :: PatVec -> [PatVec] -> ValVec -> PmM Triple - Checks redundancy, coverage and inaccessibility, using auxilary functions + Checks redundancy, coverage and inaccessibility, using auxiliary functions `pmcheckGuards` and `pmcheckHd`. Mainly handles the guard case which is common in all three checks (see paper) and calls `pmcheckGuards` when the whole clause is checked, or `pmcheckHd` when the pattern vector does not @@ -923,7 +923,7 @@ it does not even produce the covered and uncovered sets. Since we only care about whether a clause covers SOMETHING or if it may forces ANY argument, we -} --- | Lift a pattern matching action from a single value vector abstration to a +-- | Lift a pattern matching action from a single value vector abstraction to a -- value set abstraction, but calling it on every vector and the combining the -- results. runMany :: (ValVec -> PmM Triple) -> (Uncovered -> PmM Triple) @@ -1335,7 +1335,7 @@ warnPmIters dflags (DsMatchContext kind loc) msg is = fsep [ text "Pattern match checker exceeded" , parens (ppr is), text "iterations in", ctxt <> dot , text "(Use fmax-pmcheck-iterations=n" - , text "to set the maximun number of iterations to n)" ] + , text "to set the maximum number of iterations to n)" ] flag_i = wopt Opt_WarnOverlappingPatterns dflags flag_u = exhaustive dflags kind diff --git a/compiler/Eta/DeSugar/Coverage.hs b/compiler/Eta/DeSugar/Coverage.hs index 6864cc7b..48dbe929 100644 --- a/compiler/Eta/DeSugar/Coverage.hs +++ b/compiler/Eta/DeSugar/Coverage.hs @@ -448,7 +448,7 @@ addBinTickLHsExpr boxLabel (L pos e0) -- ----------------------------------------------------------------------------- --- Decoarate an HsExpr with ticks +-- Decorate an HsExpr with ticks addTickHsExpr :: HsExpr Id -> TM (HsExpr Id) addTickHsExpr e@(HsVar id) = do freeVar id; return e @@ -592,7 +592,7 @@ addTickHsExpr (HsWrap w e) = addTickHsExpr e@(HsType _) = return e addTickHsExpr (HsUnboundVar {}) = panic "addTickHsExpr.HsUnboundVar" --- Others dhould never happen in expression content. +-- Others should never happen in expression content. addTickHsExpr e = pprPanic "addTickHsExpr" (ppr e) addTickTupArg :: LHsTupArg Id -> TM (LHsTupArg Id) @@ -1138,7 +1138,7 @@ mkTickish boxLabel countEntries topOnly pos fvs decl_path = do let ids = filter (not . isUnLiftedType . idType) $ occEnvElts fvs -- unlifted types cause two problems here: -- * we can't bind them at the GHCi prompt - -- (bindLocalsAtBreakpoint already fliters them out), + -- (bindLocalsAtBreakpoint already filters them out), -- * the simplifier might try to substitute a literal for -- the Id, and we can't handle that. diff --git a/compiler/Eta/DeSugar/DeSugar.hs b/compiler/Eta/DeSugar/DeSugar.hs index 4f63ed9c..4af7fa1c 100644 --- a/compiler/Eta/DeSugar/DeSugar.hs +++ b/compiler/Eta/DeSugar/DeSugar.hs @@ -448,7 +448,7 @@ That keeps the desugaring of list comprehensions simple too. Nor do we want to warn of conversion identities on the LHS; -the rule is precisly to optimise them: +the rule is precisely to optimise them: {-# RULES "fromRational/id" fromRational = id :: Rational -> Rational #-} diff --git a/compiler/Eta/DeSugar/DsBinds.hs b/compiler/Eta/DeSugar/DsBinds.hs index 643bd145..84f6c2e1 100644 --- a/compiler/Eta/DeSugar/DsBinds.hs +++ b/compiler/Eta/DeSugar/DsBinds.hs @@ -354,7 +354,7 @@ Note [Rules and inlining] ~~~~~~~~~~~~~~~~~~~~~~~~~ Common special case: no type or dictionary abstraction This is a bit less trivial than you might suppose -The naive way woudl be to desguar to something like +The naive way would be to desguar to something like f_lcl = ...f_lcl... -- The "binds" from AbsBinds M.f = f_lcl -- Generated from "exports" But we don't want that, because if M.f isn't exported, @@ -445,7 +445,7 @@ happen as a result of method sharing), there's a danger that we never get to do the inlining, which is a Terribly Bad thing given that the user said "inline"! -To avoid this we pre-emptively eta-expand the definition, so that foo +To avoid this we preemptively eta-expand the definition, so that foo has the arity with which it is declared in the source code. In this example it has arity 2 (one for the Eq and one for x). Doing this should mean that (foo d) is a PAP and we don't share it. @@ -522,7 +522,7 @@ if there is no variable in the pattern desugaring looks like With `Strict`, we want to force `tm`, but NOT `fm` or `gm`. Alas, `tm` isn't in scope in the `in ` part. - The simplest thing is to return it in the polymoprhic + The simplest thing is to return it in the polymorphic tuple `t`, thus: let t = /\a. letrec tm = rhs[fm,gm] @@ -608,7 +608,7 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl)) = putSrcSpanDs loc $ do { warnDs NoReason (text "Ignoring useless SPECIALISE pragma for NOINLINE function:" <+> quotes (ppr poly_id)) - ; return Nothing } -- Function is NOINLINE, and the specialiation inherits that + ; return Nothing } -- Function is NOINLINE, and the specialisation inherits that -- See Note [Activation pragmas for SPECIALISE] | otherwise @@ -700,7 +700,7 @@ From a user SPECIALISE pragma for f, we generate We need two pragma-like things: * spec_fn's inline pragma: inherited from f's inline pragma (ignoring - activation on SPEC), unless overriden by SPEC INLINE + activation on SPEC), unless overridden by SPEC INLINE * Activation of RULE: from SPECIALISE pragma (if activation given) otherwise from f's inline pragma @@ -753,7 +753,7 @@ decomposeRuleLhs orig_bndrs orig_lhs | Just (fn_id, args) <- decompose fun2 args2 , let extra_dict_bndrs = mk_extra_dict_bndrs fn_id args - = -- pprTrace "decmposeRuleLhs" (vcat [ ptext (sLit "orig_bndrs:") <+> ppr orig_bndrs + = -- pprTrace "decomposeRuleLhs" (vcat [ ptext (sLit "orig_bndrs:") <+> ppr orig_bndrs -- , ptext (sLit "orig_lhs:") <+> ppr orig_lhs -- , ptext (sLit "lhs1:") <+> ppr lhs1 -- , ptext (sLit "extra_dict_bndrs:") <+> ppr extra_dict_bndrs @@ -872,7 +872,7 @@ drop_dicts drops dictionary bindings on the LHS where possible. RULE forall s (d :: MonadBstractIOST (ReaderT s)). useAbstractMonad (ReaderT s) d = $suseAbstractMonad s - Trac #8848 is a good example of where there are some intersting + Trac #8848 is a good example of where there are some interesting dictionary bindings to discard. The drop_dicts algorithm is based on these observations: diff --git a/compiler/Eta/DeSugar/DsExpr.hs b/compiler/Eta/DeSugar/DsExpr.hs index e7cb6a02..a1d352f9 100644 --- a/compiler/Eta/DeSugar/DsExpr.hs +++ b/compiler/Eta/DeSugar/DsExpr.hs @@ -3,7 +3,7 @@ (c) The GRASP/AQUA Project, Glasgow University, 1992-1998 -Desugaring exporessions. +Desugaring expressions. -} {-# LANGUAGE CPP #-} @@ -493,7 +493,7 @@ For record construction we do this (assuming T has three arguments) e (recConErr t1 "M.lhs/230/op3") \end{verbatim} -@recConErr@ then converts its arugment string into a proper message +@recConErr@ then converts its argument string into a proper message before printing it as \begin{verbatim} M.lhs, line 230: missing field op1 was evaluated @@ -576,7 +576,7 @@ dsExpr expr@(RecordUpd record_expr (HsRecFields { rec_flds = fields }) -- It's important to generate the match with matchWrapper, -- and the right hand sides with applications of the wrapper Id -- so that everything works when we are doing fancy unboxing on the - -- constructor aguments. + -- constructor arguments. ; alts <- mapM (mk_alt upd_fld_env) cons_to_upd ; ([discrim_var], matching_code) <- matchWrapper RecUpd Nothing @@ -592,7 +592,7 @@ dsExpr expr@(RecordUpd record_expr (HsRecFields { rec_flds = fields }) where ds_field :: LHsRecField Id (LHsExpr Id) -> DsM (Name, Id, CoreExpr) -- Clone the Id in the HsRecField, because its Name is that - -- of the record selector, and we must not make that a lcoal binder + -- of the record selector, and we must not make that a local binder -- else we shadow other uses of the record selector -- Hence 'lcl_id'. Cf Trac #2735 ds_field (L _ rec_field) = do { rhs <- dsLExpr (hsRecFieldArg rec_field) diff --git a/compiler/Eta/DeSugar/DsGRHSs.hs b/compiler/Eta/DeSugar/DsGRHSs.hs index a5b2661e..46134b75 100644 --- a/compiler/Eta/DeSugar/DsGRHSs.hs +++ b/compiler/Eta/DeSugar/DsGRHSs.hs @@ -133,7 +133,7 @@ isTrueLHsExpr :: LHsExpr Id -> Maybe (CoreExpr -> DsM CoreExpr) -- * 'otherwise' Id -- * Trivial wappings of these -- The arguments to Just are any HsTicks that we have found, --- because we still want to tick then, even it they are aways evaluted. +-- because we still want to tick then, even it they are aways evaluated. isTrueLHsExpr (L _ (HsVar v)) | v `hasKey` otherwiseIdKey || v `hasKey` getUnique trueDataConId = Just return diff --git a/compiler/Eta/DeSugar/DsListComp.hs b/compiler/Eta/DeSugar/DsListComp.hs index 0a3d9812..27d515d2 100644 --- a/compiler/Eta/DeSugar/DsListComp.hs +++ b/compiler/Eta/DeSugar/DsListComp.hs @@ -197,7 +197,7 @@ where (x1, .., xn) are the variables bound in p1, v1, p2 In the translation below, the ParStmt branch translates each parallel branch into a sub-comprehension, and desugars each independently. The resulting lists are fed to a zip function, we create a binding for all the variables bound in all -the comprehensions, and then we hand things off the the desugarer for bindings. +the comprehensions, and then we hand things off the desugarer for bindings. The zip function is generated here a) because it's small, and b) because then we don't have to deal with arbitrary limits on the number of zip functions in the prelude, nor which library the zip function came from. @@ -361,7 +361,7 @@ dfBindComp c_id n_id (pat, core_list1) quals = do -- create some new local id's [b, x] <- newSysLocalsDs [b_ty, x_ty] - -- build rest of the comprehesion + -- build rest of the comprehension core_rest <- dfListComp c_id b quals -- build the pattern match diff --git a/compiler/Eta/DeSugar/DsMeta.hs b/compiler/Eta/DeSugar/DsMeta.hs index 30c77f7b..40084f3a 100644 --- a/compiler/Eta/DeSugar/DsMeta.hs +++ b/compiler/Eta/DeSugar/DsMeta.hs @@ -284,7 +284,7 @@ in repTyClD and repC. Note [Don't quantify implicit type variables in quotes] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If you're not careful, it's suprisingly easy to take this quoted declaration: +If you're not careful, it's surprisingly easy to take this quoted declaration: [d| idProxy :: forall proxy (b :: k). proxy b -> proxy b idProxy x = x @@ -1463,7 +1463,7 @@ repFields (HsRecFields { rec_flds = flds }) -- shadow each other. Consider: [| do { x <- f 1; x <- f x; g x } |] -- First gensym new names for every variable in any of the patterns. -- both static (x'1 and x'2), and dynamic ((gensym "x") and (gensym "y")) --- if variables didn't shaddow, the static gensym wouldn't be necessary +-- if variables didn't shadow, the static gensym wouldn't be necessary -- and we could reuse the original names (x and x). -- -- do { x'1 <- gensym "x" @@ -1724,7 +1724,7 @@ repLambda (L _ (Match { m_pats = ps do { xs <- repLPs ps; body <- repLE e; repLam xs body }) ; wrapGenSyms ss lam } -repLambda (L _ m) = notHandled "Guarded labmdas" (pprMatch (LambdaExpr :: HsMatchContext Name) m) +repLambda (L _ m) = notHandled "Guarded lambdas" (pprMatch (LambdaExpr :: HsMatchContext Name) m) ----------------------------------------------------------------------------- diff --git a/compiler/Eta/DeSugar/DsMonad.hs b/compiler/Eta/DeSugar/DsMonad.hs index a3081c46..33a256b6 100644 --- a/compiler/Eta/DeSugar/DsMonad.hs +++ b/compiler/Eta/DeSugar/DsMonad.hs @@ -241,7 +241,7 @@ initDsTc thing_inside initTcDsForSolver :: TcM a -> DsM (Messages, Maybe a) -- Spin up a TcM context so that we can run the constraint solver -- Returns any error messages generated by the constraint solver --- and (Just res) if no error happened; Nothing if an errror happened +-- and (Just res) if no error happened; Nothing if an error happened -- -- Simon says: I'm not very happy about this. We spin up a complete TcM monad -- only to immediately refine it to a TcS monad. @@ -362,7 +362,7 @@ addTmCsDs tm_cs = updLclEnv (\env -> env { dsl_tm_cs = unionBags tm_cs (dsl_tm_cs env) }) -- | Check that we have not done more iterations --- than we are supposed to and inrease the counter +-- than we are supposed to and increase the counter -- | Increase the counter for elapsed pattern match check iterations. -- If the current counter is already over the limit, fail incrCheckPmIterDs :: DsM () @@ -426,7 +426,7 @@ dsLookupGlobalId name = tyThingId <$> dsLookupGlobal name -- |Get a name from "Data.Array.Parallel" for the desugarer, from the 'ds_parr_bi' component of the --- global desugerar environment. +-- global desugarer environment. -- dsDPHBuiltin :: (PArrBuiltin -> a) -> DsM a dsDPHBuiltin sel = (sel . ds_parr_bi) <$> getGblEnv @@ -525,7 +525,7 @@ dsGetStaticBindsVar = fmap ds_static_binds getGblEnv discardWarningsDs :: DsM a -> DsM a -- Ignore warnings inside the thing inside; --- used to ignore inaccessable cases etc. inside generated code +-- used to ignore inaccessible cases etc. inside generated code discardWarningsDs thing_inside = do { env <- getGblEnv ; old_msgs <- readTcRef (ds_msgs env) diff --git a/compiler/Eta/DeSugar/Match.hs b/compiler/Eta/DeSugar/Match.hs index da717794..31f04b87 100644 --- a/compiler/Eta/DeSugar/Match.hs +++ b/compiler/Eta/DeSugar/Match.hs @@ -154,7 +154,7 @@ corresponds roughly to @matchVarCon@. Note [Match Ids] ~~~~~~~~~~~~~~~~ -Most of the matching fuctions take an Id or [Id] as argument. This Id +Most of the matching functions take an Id or [Id] as argument. This Id is the scrutinee(s) of the match. The desugared expression may sometimes use that Id in a local binding or as a case binder. So it should not have an External name; Lint rejects non-top-level binders @@ -621,7 +621,7 @@ is collected here, in @matchWrapper@. This function takes as arguments: \begin{itemize} \item -Typchecked @Matches@ (of a function definition, or a case or lambda +Typechecked @Matches@ (of a function definition, or a case or lambda expression)---the main input; \item An error message to be inserted into any (runtime) pattern-matching diff --git a/compiler/Eta/DeSugar/MatchCon.hs b/compiler/Eta/DeSugar/MatchCon.hs index 3e432b92..c45c061d 100644 --- a/compiler/Eta/DeSugar/MatchCon.hs +++ b/compiler/Eta/DeSugar/MatchCon.hs @@ -247,7 +247,7 @@ Now consider: In the first we must test y first; in the second we must test x first. So we must divide even the equations for a single constructor -T into sub-goups, based on whether they match the same field in the +T into sub-groups, based on whether they match the same field in the same order. That's what the (runs compatible_pats) grouping. All non-record patterns are "compatible" in this sense, because the diff --git a/compiler/Eta/DeSugar/PmExpr.hs b/compiler/Eta/DeSugar/PmExpr.hs index dab006bb..a97ecc90 100644 --- a/compiler/Eta/DeSugar/PmExpr.hs +++ b/compiler/Eta/DeSugar/PmExpr.hs @@ -219,7 +219,7 @@ hsExprToPmExpr e@(ExplicitList _ mb_ol elems) -- hsExprToPmExpr (ExplicitPArr _elem_ty elems) -- = PmExprCon (parrFakeCon (length elems)) (map lhsExprToPmExpr elems) --- we want this but we would have to make evrything monadic :/ +-- we want this but we would have to make everything monadic :/ -- ./compiler/deSugar/DsMonad.hs:397:dsLookupDataCon :: Name -> DsM DataCon -- -- hsExprToPmExpr (RecordCon c _ binds) = do diff --git a/compiler/Eta/DeSugar/TmOracle.hs b/compiler/Eta/DeSugar/TmOracle.hs index 020416b4..3babd477 100644 --- a/compiler/Eta/DeSugar/TmOracle.hs +++ b/compiler/Eta/DeSugar/TmOracle.hs @@ -171,7 +171,7 @@ simplifyPmExpr e = case e of simplifyEqExpr :: PmExpr -> PmExpr -> (PmExpr, Bool) -- See Note [Deep equalities] simplifyEqExpr e1 e2 = case (e1, e2) of - -- Varables + -- Variables (PmExprVar x, PmExprVar y) | x == y -> (truePmExpr, True) diff --git a/compiler/Eta/HsSyn/Convert.hs b/compiler/Eta/HsSyn/Convert.hs index aa8ab5c7..3cedee41 100644 --- a/compiler/Eta/HsSyn/Convert.hs +++ b/compiler/Eta/HsSyn/Convert.hs @@ -869,7 +869,7 @@ cvtDD (FromThenR x y) = do { x' <- cvtl x; y' <- cvtl y; return $ FromThen x cvtDD (FromToR x y) = do { x' <- cvtl x; y' <- cvtl y; return $ FromTo x' y' } cvtDD (FromThenToR x y z) = do { x' <- cvtl x; y' <- cvtl y; z' <- cvtl z; return $ FromThenTo x' y' z' } -{- Note [Operator assocation] +{- Note [Operator association] We must be quite careful about adding parens: * Infix (UInfix ...) op arg Needs parens round the first arg * Infix (Infix ...) op arg Needs parens round the first arg diff --git a/compiler/Eta/HsSyn/HsBinds.hs b/compiler/Eta/HsSyn/HsBinds.hs index 6845ede6..b92bcddf 100644 --- a/compiler/Eta/HsSyn/HsBinds.hs +++ b/compiler/Eta/HsSyn/HsBinds.hs @@ -61,7 +61,7 @@ Global bindings (where clauses) -} -- During renaming, we need bindings where the left-hand sides --- have been renamed but the the right-hand sides have not. +-- have been renamed but the right-hand sides have not. -- the ...LR datatypes are parametrized by two id types, -- one for the left and one for the right. -- Other than during renaming, these will be the same. @@ -329,7 +329,7 @@ This ultimately desugars to something like this: (fm::a->a,gm:Any->Any) -> fm ...similarly for g... -The abe_wrap field deals with impedence-matching between +The abe_wrap field deals with impedance-matching between (/\a b. case tup a b of { (f,g) -> f }) and the thing we really want, which may have fewer type variables. The action happens in TcBinds.mkExport. diff --git a/compiler/Eta/HsSyn/HsDecls.hs b/compiler/Eta/HsSyn/HsDecls.hs index 0b777dc9..8a2f6dd3 100644 --- a/compiler/Eta/HsSyn/HsDecls.hs +++ b/compiler/Eta/HsSyn/HsDecls.hs @@ -492,7 +492,7 @@ data TyClDecl name -- For details on above see note [Api annotations] in ApiAnnotation DataDecl { tcdLName :: Located name -- ^ Type constructor - , tcdTyVars :: LHsTyVarBndrs name -- ^ Type variables; for an assoicated type + , tcdTyVars :: LHsTyVarBndrs name -- ^ Type variables; for an associated type -- these include outer binders -- Eg class T a where -- type F a :: * @@ -528,7 +528,7 @@ deriving instance (DataId id) => Data (TyClDecl id) -- This is used in TcTyClsDecls to represent -- strongly connected components of decls - -- No familiy instances in here + -- No family instances in here -- The role annotations must be grouped with their decls for the -- type-checker to infer roles correctly data TyClGroup name @@ -907,7 +907,7 @@ data ConDecl name -- ^ TEMPORARY field; True <=> user has employed now-deprecated syntax for -- GADT-style record decl C { blah } :: T a b -- Remove this when we no longer parse this stuff, and hence do not - -- need to report decprecated use + -- need to report deprecated use } deriving (Typeable) deriving instance (DataId name) => Data (ConDecl name) diff --git a/compiler/Eta/HsSyn/HsExpr.hs b/compiler/Eta/HsSyn/HsExpr.hs index 9c811f67..54e803c7 100644 --- a/compiler/Eta/HsSyn/HsExpr.hs +++ b/compiler/Eta/HsSyn/HsExpr.hs @@ -525,7 +525,7 @@ HsPar (and ParPat in patterns, HsParTy in types) is used as follows necessary. Eg (HsApp f (HsApp g x)) is fine, and prints 'f (g x)' * HsPars are pretty printed as '( .. )' regardless of whether - or not they are strictly necssary + or not they are strictly necessary * HsPars are respected when rearranging operator fixities. So a * (b + c) means what it says (where the parens are an HsPar) @@ -777,7 +777,7 @@ However, some code is internally generated, and in some places parens are absolutely required; so for these places we use pprParendExpr (but don't print double parens of course). -For operator applications we don't add parens, because the oprerator +For operator applications we don't add parens, because the operator fixities should do the job, except in debug mode (-dppr-debug) so we can see the structure of the parse tree. -} diff --git a/compiler/Eta/HsSyn/HsPat.hs b/compiler/Eta/HsSyn/HsPat.hs index 00bbb7a8..618c19c1 100644 --- a/compiler/Eta/HsSyn/HsPat.hs +++ b/compiler/Eta/HsSyn/HsPat.hs @@ -141,7 +141,7 @@ data Pat id | ConPatOut { pat_con :: Located ConLike, - pat_arg_tys :: [Type], -- The univeral arg types, 1-1 with the universal + pat_arg_tys :: [Type], -- The universal arg types, 1-1 with the universal -- tyvars of the constructor/pattern synonym -- Use (conLikeResTy pat_con pat_arg_tys) to get -- the type of the pattern @@ -535,7 +535,7 @@ conPatNeedsParens (RecCon {}) = True -- -- Note that this is different from 'hsPatNeedsParens', which only says if -- a pattern needs to be parenthesized to parse in /any/ position, whereas --- 'isCompountPat' says if a pattern needs to be parenthesized in an /argument/ +-- 'isCompoundPat' says if a pattern needs to be parenthesized in an /argument/ -- position. In other words, @'hsPatNeedsParens' x@ implies -- @'isCompoundPat' x@, but not necessarily the other way around. isCompoundPat :: Pat a -> Bool @@ -566,7 +566,7 @@ isCompoundPat (NPat (L _ p) _ _) = isCompoundHsOverLit p -- -- Note that this is different from 'conPatNeedsParens', which only says if -- a constructor pattern needs to be parenthesized to parse in /any/ position, --- whereas 'isCompountConPat' says if a pattern needs to be parenthesized in an +-- whereas 'isCompoundConPat' says if a pattern needs to be parenthesized in an -- /argument/ position. In other words, @'conPatNeedsParens' x@ implies -- @'isCompoundConPat' x@, but not necessarily the other way around. isCompoundConPat :: HsConDetails a b -> Bool diff --git a/compiler/Eta/HsSyn/HsTypes.hs b/compiler/Eta/HsSyn/HsTypes.hs index 5367c154..fba1616c 100644 --- a/compiler/Eta/HsSyn/HsTypes.hs +++ b/compiler/Eta/HsSyn/HsTypes.hs @@ -110,7 +110,7 @@ This is the syntax for types as seen in type signatures. Note [HsBSig binder lists] ~~~~~~~~~~~~~~~~~~~~~~~~~~ -Consider a binder (or pattern) decoarated with a type or kind, +Consider a binder (or pattern) decorated with a type or kind, \ (x :: a -> a). blah forall (a :: k -> *) (b :: k). blah Then we use a LHsBndrSig on the binder, so that the diff --git a/compiler/Eta/HsSyn/HsUtils.hs b/compiler/Eta/HsSyn/HsUtils.hs index 0f1d26aa..1877025b 100644 --- a/compiler/Eta/HsSyn/HsUtils.hs +++ b/compiler/Eta/HsSyn/HsUtils.hs @@ -458,7 +458,7 @@ mkBigLHsPatTup = mkChunkified mkLHsPatTup -- #big_tuples# -- -- GHCs built in tuples can only go up to 'mAX_TUPLE_SIZE' in arity, but --- we might concievably want to build such a massive tuple as part of the +-- we might conceivably want to build such a massive tuple as part of the -- output of a desugaring stage (notably that for list comprehensions). -- -- We call tuples above this size \"big tuples\", and emulate them by @@ -473,7 +473,7 @@ mkBigLHsPatTup = mkChunkified mkLHsPatTup -- and 'mkTupleCase' functions to do all your work with tuples you should be -- fine, and not have to worry about the arity limitation at all. --- | Lifts a \"small\" constructor into a \"big\" constructor by recursive decompositon +-- | Lifts a \"small\" constructor into a \"big\" constructor by recursive decomposition mkChunkified :: ([a] -> a) -- ^ \"Small\" constructor function, of maximum input arity 'mAX_TUPLE_SIZE' -> [a] -- ^ Possible \"big\" list of things to construct from -> a -- ^ Constructed thing made possible by recursive decomposition @@ -676,7 +676,7 @@ collectLocalBinders (HsIPBinds _) = [] collectLocalBinders EmptyLocalBinds = [] collectHsIdBinders, collectHsValBinders :: HsValBindsLR idL idR -> [idL] --- Collect Id binders only, or Ids + pattern synonmys, respectively +-- Collect Id binders only, or Ids + pattern synonyms, respectively collectHsIdBinders = collect_hs_val_binders True collectHsValBinders = collect_hs_val_binders False diff --git a/compiler/Eta/Iface/BinFingerprint.hs b/compiler/Eta/Iface/BinFingerprint.hs index 2f63f20c..e5203d48 100644 --- a/compiler/Eta/Iface/BinFingerprint.hs +++ b/compiler/Eta/Iface/BinFingerprint.hs @@ -1,6 +1,6 @@ {-# LANGUAGE CPP #-} --- | Computing fingerprints of values serializeable with GHC's "Binary" module. +-- | Computing fingerprints of values serializable with GHC's "Binary" module. module Eta.Iface.BinFingerprint ( -- * Computing fingerprints fingerprintBinMem diff --git a/compiler/Eta/Iface/BinIface.hs b/compiler/Eta/Iface/BinIface.hs index 306526a2..48902f13 100644 --- a/compiler/Eta/Iface/BinIface.hs +++ b/compiler/Eta/Iface/BinIface.hs @@ -184,7 +184,7 @@ writeBinIface dflags hi_path mod_iface = do symtab_p_p <- tellBin bh put_ bh symtab_p_p - -- Make some intial state + -- Make some initial state symtab_next <- newFastMutInt writeFastMutInt symtab_next 0 symtab_map <- newIORef emptyUFM @@ -219,7 +219,7 @@ writeBinIface dflags hi_path mod_iface = do -- NB. write the dictionary after the symbol table, because -- writing the symbol table may create more dictionary entries. - -- Write the dictionary pointer at the fornt of the file + -- Write the dictionary pointer at the front of the file dict_p <- tellBin bh -- This is where the dictionary will start putAt bh dict_p_p dict_p -- Fill in the placeholder seekBin bh dict_p -- Seek back to the end of the file diff --git a/compiler/Eta/Iface/IfaceEnv.hs b/compiler/Eta/Iface/IfaceEnv.hs index 0afeaf07..d19d3a20 100644 --- a/compiler/Eta/Iface/IfaceEnv.hs +++ b/compiler/Eta/Iface/IfaceEnv.hs @@ -47,7 +47,7 @@ import Control.Exception Note [The Name Cache] ~~~~~~~~~~~~~~~~~~~~~ -The Name Cache makes sure that, during any invovcation of GHC, each +The Name Cache makes sure that, during any invocation of GHC, each External Name "M.x" has one, and only one globally-agreed Unique. * The first time we come across M.x we make up a Unique and record that @@ -66,7 +66,7 @@ newGlobalBinder :: Module -> OccName -> SrcSpan -> TcRnIf a b Name -- Name for a thing, given its Module and OccName -- See Note [The Name Cache] -- --- The cache may already already have a binding for this thing, +-- The cache may already have a binding for this thing, -- because we may have seen an occurrence before, but now is the -- moment when we know its Module and SrcLoc in their full glory @@ -96,7 +96,7 @@ allocateGlobalBinder name_supply mod occ loc -- of the Name, so we set this field in the Name we return. -- -- Then (bogus) multiple bindings of the same Name - -- get different SrcLocs can can be reported as such. + -- get different SrcLocs can be reported as such. -- -- Possible other reason: it might be in the cache because we -- encountered an occurrence before the binding site for an @@ -132,7 +132,7 @@ newImplicitBinder :: Name -- Base name -> TcRnIf m n Name -- Implicit name -- Called in BuildTyCl to allocate the implicit binders of type/class decls -- For source type/class decls, this is the first occurrence --- For iface ones, the LoadIface has alrady allocated a suitable name in the cache +-- For iface ones, the LoadIface has already allocated a suitable name in the cache newImplicitBinder base_name mk_sys_occ | Just mod <- nameModule_maybe base_name = newGlobalBinder mod occ loc diff --git a/compiler/Eta/Iface/IfaceSyn.hs b/compiler/Eta/Iface/IfaceSyn.hs index 29501bf0..af529d9f 100644 --- a/compiler/Eta/Iface/IfaceSyn.hs +++ b/compiler/Eta/Iface/IfaceSyn.hs @@ -82,7 +82,7 @@ infixl 3 &&& -- | A binding top-level 'Name' in an interface file (e.g. the name of an -- 'IfaceDecl'). type IfaceTopBndr = Name - -- It's convenient to have an OccName in the IfaceSyn, altough in each + -- It's convenient to have an OccName in the IfaceSyn, although in each -- case the namespace is implied by the context. However, having an -- Name makes things like ifaceDeclImplicitBndrs and ifaceDeclFingerprints -- very convenient. Moreover, having the key of the binder means that @@ -90,7 +90,7 @@ type IfaceTopBndr = Name -- [Symbol table representation of Names] -- -- We don't serialise the namespace onto the disk though; rather we - -- drop it when serialising and add it back in when deserialising. + -- drop it when serialising and add it back in when deserializing. getIfaceTopBndr :: BinHandle -> IO IfaceTopBndr getIfaceTopBndr bh = get bh diff --git a/compiler/Eta/Iface/LoadIface.hs b/compiler/Eta/Iface/LoadIface.hs index 9f2d5d2b..00801721 100644 --- a/compiler/Eta/Iface/LoadIface.hs +++ b/compiler/Eta/Iface/LoadIface.hs @@ -208,7 +208,7 @@ checkWiredInTyCon tc ; ASSERT( isExternalName tc_name ) when (mod /= nameModule tc_name) (initIfaceTcRn (loadWiredInHomeIface tc_name)) - -- Don't look for (non-existent) Float.hi when + -- Don't look for (nonexistent) Float.hi when -- compiling Float.lhs, which mentions Float of course -- A bit yukky to call initIfaceTcRn here } @@ -677,7 +677,7 @@ When dynamically loading a plugin (via loadPluginInterface) we populate the same External Package State (EPS), even though plugin modules are to link with the compiler itself, and not with the compiled program. That's fine: mostly the EPS is just a cache for -the interace files on disk. +the interface files on disk. But it's NOT ok for the RULES or instance environment. We do not want to fire a RULE from the plugin on the code we are compiling, otherwise diff --git a/compiler/Eta/Iface/MkIface.hs b/compiler/Eta/Iface/MkIface.hs index 11b4e53f..285b2155 100644 --- a/compiler/Eta/Iface/MkIface.hs +++ b/compiler/Eta/Iface/MkIface.hs @@ -716,7 +716,7 @@ When we specialise an INLINEABLE function, or when we have We don't want to warn about these, at least not by default, or we'd generate a lot of warnings. Hence -fwarn-auto-orphans. -Indeed, we don't even treat the module as an oprhan module if it has +Indeed, we don't even treat the module as an orphan module if it has auto-generated *rule* orphans. Orphan modules are read every time we compile, so they are pretty obtrusive and slow down every compilation, even non-optimised ones. (Reason: for type class instances it's a @@ -1041,8 +1041,8 @@ recompileRequired _ = True -- is equivalent to the current source file the user asked us to compile. -- If the same, we can avoid recompilation. We return a tuple where the -- first element is a bool saying if we should recompile the object file --- and the second is maybe the interface file, where Nothng means to --- rebuild the interface file not use the exisitng one. +-- and the second is maybe the interface file, where Nothing means to +-- rebuild the interface file not use the existing one. checkOldIface :: HscEnv -> ModSummary @@ -1127,7 +1127,7 @@ check_old_iface hsc_env mod_summary src_modified maybe_iface -- two things may have changed that mean we should recompile M: -- * The interface export by a dependency of M has changed. -- * The compiler flags specified this time for M have changed --- in a manner that is significant for recompilaiton. +-- in a manner that is significant for recompilation. -- We return not just if we should recompile the object file but also -- if we should rebuild the interface file. checkVersions :: HscEnv diff --git a/compiler/Eta/Iface/TcIface.hs b/compiler/Eta/Iface/TcIface.hs index 5e144b14..9db2aee2 100644 --- a/compiler/Eta/Iface/TcIface.hs +++ b/compiler/Eta/Iface/TcIface.hs @@ -364,7 +364,7 @@ tcHiBootIface hsc_src mod -- In --make and interactive mode, if this module has an hs-boot file -- we'll have compiled it already, and it'll be in the HPT -- - -- We check wheher the interface is a *boot* interface. + -- We check whether the interface is a *boot* interface. -- It can happen (when using GHC from Visual Studio) that we -- compile a module in TypecheckOnly mode, with a stable, -- fully-populated HPT. In that case the boot interface isn't there @@ -431,7 +431,7 @@ mkSelfBootInfo iface mds return $ SelfBoot { sb_mds = mds , sb_tcs = mkNameSet tcs } where - -- | Retuerns @True@ if, when you call 'tcIfaceDecl' on + -- | Returns @True@ if, when you call 'tcIfaceDecl' on -- this 'IfaceDecl', an ATyCon would be returned. -- NB: This code assumes that a TyCon cannot be implicit. isIfaceTyCon IfaceId{} = False @@ -456,7 +456,7 @@ interface files for types mentioned in the arg types. E.g. data Foo.S = MkS Baz.T -Mabye we can get away without even loading the interface for Baz! +Maybe we can get away without even loading the interface for Baz! This is not just a performance thing. Suppose we have data Foo.S = MkS Baz.T @@ -720,7 +720,7 @@ tcIfaceDataCons tycon_name tycon tc_tyvars if_cons ifConStricts = if_stricts, ifConSrcStricts = if_src_stricts}) = -- Universally-quantified tyvars are shared with - -- parent TyCon, and are alrady in scope + -- parent TyCon, and are already in scope bindIfaceTyVars ex_tvs $ \ ex_tyvars -> do { traceIf (text "Start interface-file tc_con_decl" <+> ppr dc_name) @@ -884,7 +884,7 @@ tcIfaceRule (IfaceRule {ifRuleName = name, ifActivation = act, ifRuleBndrs = bnd -- We could have stored the ru_rough field in the iface file -- but that would be redundant, I think. -- The only wrinkle is that we must not be deceived by - -- type syononyms at the top of a type arg. Since + -- type synonyms at the top of a type arg. Since -- we can't tell at this point, we are careful not -- to write them out in coreRuleToIfaceRule ifTopFreeName :: IfaceExpr -> Maybe Name diff --git a/compiler/Eta/Main/DriverMkDepend.hs b/compiler/Eta/Main/DriverMkDepend.hs index 197e8c4e..41f8a297 100644 --- a/compiler/Eta/Main/DriverMkDepend.hs +++ b/compiler/Eta/Main/DriverMkDepend.hs @@ -82,7 +82,7 @@ doMkDependHS srcs = do -- Print out the dependencies if wanted liftIO $ debugTraceMsg dflags 2 (text "Module dependencies" $$ ppr sorted) - -- Prcess them one by one, dumping results into makefile + -- Process them one by one, dumping results into makefile -- and complaining about cycles hsc_env <- getSession root <- liftIO getCurrentDirectory @@ -279,7 +279,7 @@ insertSuffixes :: FilePath -- Original filename; e.g. "foo.o" -> [String] -- Suffix prefixes e.g. ["x_", "y_"] -> [FilePath] -- Zapped filenames e.g. ["foo.x_o", "foo.y_o"] - -- Note that that the extra bit gets inserted *before* the old suffix + -- Note that the extra bit gets inserted *before* the old suffix -- We assume the old suffix contains no dots, so we know where to -- split it insertSuffixes file_name extras @@ -366,7 +366,7 @@ pprCycle summaries = pp_group (CyclicSCC summaries) pp_group (CyclicSCC mss) = ASSERT( not (null boot_only) ) -- The boot-only list must be non-empty, else there would - -- be an infinite chain of non-boot imoprts, and we've + -- be an infinite chain of non-boot imports, and we've -- already checked for that in processModDeps pp_ms loop_breaker $$ vcat (map pp_group groups) where diff --git a/compiler/Eta/Main/DriverPipeline.hs b/compiler/Eta/Main/DriverPipeline.hs index 8da8fb1a..895d94b2 100644 --- a/compiler/Eta/Main/DriverPipeline.hs +++ b/compiler/Eta/Main/DriverPipeline.hs @@ -793,7 +793,7 @@ runPhase (RealPhase (Unlit sf)) input_fn _dflags -- Unicode or anything else (so we don't use Util.charToC -- here). If we get this wrong, then in -- Coverage.addTicksToBinds where we check that the filename in - -- a SrcLoc is the same as the source filenaame, the two will + -- a SrcLoc is the same as the source filename, the two will -- look bogusly different. See test: -- libraries/hpc/tests/function/subdir/tough2.lhs escape ('\\':cs) = '\\':'\\': escape cs diff --git a/compiler/Eta/Main/DynFlags.hs b/compiler/Eta/Main/DynFlags.hs index d1049228..cc8748a3 100644 --- a/compiler/Eta/Main/DynFlags.hs +++ b/compiler/Eta/Main/DynFlags.hs @@ -378,7 +378,7 @@ data GeneralFlag | Opt_RegsGraph -- do graph coloring register allocation | Opt_RegsIterative -- do iterative coalescing graph coloring register allocation | Opt_PedanticBottoms -- Be picky about how we treat bottom - | Opt_LlvmTBAA -- Use LLVM TBAA infastructure for improving AA (hidden flag) + | Opt_LlvmTBAA -- Use LLVM TBAA infrastructure for improving AA (hidden flag) | Opt_LlvmPassVectorsInRegisters -- Pass SIMD vectors in registers (requires a patched LLVM) (hidden flag) | Opt_IrrefutableTuples | Opt_CmmSink @@ -1561,7 +1561,7 @@ defaultDynFlags mySettings = ufCreationThreshold = 750, ufUseThreshold = 60, ufFunAppDiscount = 60, - -- Be fairly keen to inline a fuction if that means + -- Be fairly keen to inline a function if that means -- we'll be able to pick the right method from a dictionary ufDictDiscount = 30, ufKeenessFactor = 1.5, @@ -1914,7 +1914,7 @@ setSafeHaskell s = updM f safeM <- combineSafeFlags sf s case s of Sf_Safe -> return $ dfs { safeHaskell = safeM, safeInfer = False } - -- leave safe inferrence on in Trustworthy mode so we can warn + -- leave safe inference on in Trustworthy mode so we can warn -- if it could have been inferred safe. Sf_Trustworthy -> do l <- getCurLoc @@ -1999,7 +1999,7 @@ setMetricsDir f d = d{ metricsDir = Just f} setHiDir f d = d{ hiDir = Just f} setStubDir f d = d{ stubDir = Just f, includePaths = f : includePaths d } -- -stubdir D adds an implicit -I D, so that gcc can find the _stub.h file - -- \#included from the .hc file when compiling via C (i.e. unregisterised + -- \#included from the .hc file when compiling via C (i.e. unregistered -- builds). setDumpDir f d = d{ dumpDir = Just f} setOutputDir f = setObjectDir f . setHiDir f . setStubDir f . setDumpDir f @@ -2580,8 +2580,8 @@ dynamic_flags = [ , defFlag "fdiagnostics-color=never" (NoArg (upd (\d -> d { useColor = Never }))) , defGhcFlag "dtrace-level" (intSuffix (\n d -> d{ traceLevel = n })) - -- Suppress all that is suppressable in core dumps. - -- Except for uniques, as some simplifier phases introduce new varibles that + -- Suppress all that is suppressible in core dumps. + -- Except for uniques, as some simplifier phases introduce new variables that -- have otherwise identical names. , defGhcFlag "dsuppress-all" (NoArg $ do setGeneralFlag Opt_SuppressCoercions @@ -3482,7 +3482,7 @@ impliedXFlags -- * docs/users_guide/flags.xml -- * docs/users_guide/using.xml -- --- The first contains the Flag Refrence section, which breifly lists all +-- The first contains the Flag Reference section, which briefly lists all -- available flags. The second contains a detailed description of the -- flags. Both places should contain information whether a flag is implied by -- -O0, -O or -O2. @@ -3874,7 +3874,7 @@ setDumpFlag' dump_flag Opt_D_dump_hi_diffs] forceRecompile :: DynP () --- Whenver we -ddump, force recompilation (by switching off the +-- Whenever we -ddump, force recompilation (by switching off the -- recompilation checker), else you don't see the dump! However, -- don't switch it off in --make mode, else *everything* gets -- recompiled which probably isn't what you want diff --git a/compiler/Eta/Main/FileCleanup.hs b/compiler/Eta/Main/FileCleanup.hs index b000e8b0..be2e3978 100644 --- a/compiler/Eta/Main/FileCleanup.hs +++ b/compiler/Eta/Main/FileCleanup.hs @@ -235,7 +235,7 @@ removeWith :: DynFlags -> (FilePath -> IO ()) -> FilePath -> IO () removeWith dflags remover f = remover f `catchIO` (\e -> let msg = if isDoesNotExistError e - then text "Warning: deleting non-existent" <+> text f + then text "Warning: deleting nonexistent" <+> text f else text "Warning: exception raised when deleting" <+> text f <> colon $$ text (show e) diff --git a/compiler/Eta/Main/GHC.hs b/compiler/Eta/Main/GHC.hs index 9ca49f8f..e8d7745d 100644 --- a/compiler/Eta/Main/GHC.hs +++ b/compiler/Eta/Main/GHC.hs @@ -178,7 +178,7 @@ module Eta.Main.GHC ( isRecordSelector, isPrimOpId, isFCallId, isClassOpId_maybe, isDataConWorkId, idDataCon, - isBottomingId, isDictonaryId, + isBottomingId, isDictionaryId, recordSelectorFieldLabel, -- ** Type constructors @@ -584,7 +584,7 @@ setProgramDynFlags dflags = do -- that the next downsweep will think that all the files have changed -- and preprocess them again. This won't necessarily cause everything -- to be recompiled, because by the time we check whether we need to --- recopmile a module, we'll have re-summarised the module and have a +-- recompile a module, we'll have re-summarised the module and have a -- correct ModSummary. -- invalidateModSummaryCache :: GhcMonad m => m () @@ -1169,8 +1169,8 @@ modInfoModBreaks :: ModuleInfo -> ModBreaks modInfoModBreaks = minf_modBreaks #endif -isDictonaryId :: Id -> Bool -isDictonaryId id +isDictionaryId :: Id -> Bool +isDictionaryId id = case tcSplitSigmaTy (idType id) of { (_tvs, _theta, tau) -> isDictTy tau } -- | Looks up a global name: that is, any top-level name in any @@ -1276,7 +1276,7 @@ pprParenSymName a = parenSymOcc (getOccName a) (ppr (getName a)) #endif --- Extract the filename, stringbuffer content and dynflags associed to a module +-- Extract the filename, stringbuffer content and dynflags associated to a module -- -- XXX: Explain pre-conditions getModuleSourceAndFlags :: GhcMonad m => Module -> m (String, StringBuffer, DynFlags) diff --git a/compiler/Eta/Main/GhcMake.hs b/compiler/Eta/Main/GhcMake.hs index f18edb3d..513b4a35 100644 --- a/compiler/Eta/Main/GhcMake.hs +++ b/compiler/Eta/Main/GhcMake.hs @@ -139,7 +139,7 @@ depanal excluded_mods allow_dup_roots = do -- when building a library, so that GHC warns user about modules, not listed -- neither in `exposed-modules`, nor in `other-modules`. -- --- Here "home module" means a module, that doesn't come from an other package. +-- Here "home module" means a module, that doesn't come from another package. -- -- For example, if GHC is invoked with modules "A" and "B" as targets, -- but "A" imports some other module "C", then GHC will issue a warning @@ -542,7 +542,7 @@ guessOutputFile = modifySession $ \env -> name_exe = do -- #if defined(mingw32_HOST_OS) --- -- we must add the .exe extention unconditionally here, otherwise +-- -- we must add the .exe extension unconditionally here, otherwise -- -- when name has an extension of its own, the .exe extension will -- -- not be added by DriverPipeline.exeFileName. See #2248 -- name' <- fmap (<.> "exe") name @@ -629,7 +629,7 @@ findPartiallyCompletedCycles modsDone theGraph -- -- | Unloading unload :: HscEnv -> [Linkable] -> IO () -unload hsc_env stable_linkables -- Unload everthing *except* 'stable_linkables' +unload hsc_env stable_linkables -- Unload everything *except* 'stable_linkables' = case ghcLink (hsc_dflags hsc_env) of LinkInMemory -> Linker.unload hsc_env stable_linkables _other -> return () @@ -734,7 +734,7 @@ checkStability hpt sccs all_home_mods = -> isObjectLinkable l && t == linkableTime l _other -> True -- why '>=' rather than '>' above? If the filesystem stores - -- times to the nearset second, we may occasionally find that + -- times to the nearest second, we may occasionally find that -- the object & source have the same modification time, -- especially if the source was automatically generated -- and compiled. Using >= is slightly unsafe, but it matches @@ -845,7 +845,7 @@ parUpsweep n_jobs mHscMessage old_hpt stable_mods cleanup sccs = do hsc_env_var <- liftIO $ newMVar hsc_env -- The old HPT is used for recompilation checking in upsweep_mod. When a - -- module sucessfully gets compiled, its HMI is pruned from the old HPT. + -- module successfully gets compiled, its HMI is pruned from the old HPT. old_hpt_var <- liftIO $ newIORef old_hpt -- What we use to limit parallelism with. @@ -1934,7 +1934,7 @@ summariseFile hsc_env old_summaries file mb_phase obj_allowed maybe_buf get_src_timestamp = case maybe_buf of Just (_,t) -> return t Nothing -> liftIO $ getModificationUTCTime file - -- getMofificationUTCTime may fail + -- getModificationUTCTime may fail new_summary src_timestamp = do let dflags = hsc_dflags hsc_env @@ -2165,7 +2165,7 @@ preprocessFile hsc_env src_fn mb_phase (Just (buf, _time)) | otherwise = False when needs_preprocessing $ - throwGhcExceptionIO (ProgramError "buffer needs preprocesing; interactive check disabled") + throwGhcExceptionIO (ProgramError "buffer needs preprocessing; interactive check disabled") return (dflags', src_fn, buf) diff --git a/compiler/Eta/Main/HeaderInfo.hs b/compiler/Eta/Main/HeaderInfo.hs index cb716827..1aab5e57 100644 --- a/compiler/Eta/Main/HeaderInfo.hs +++ b/compiler/Eta/Main/HeaderInfo.hs @@ -93,7 +93,7 @@ mkPrelImports :: ModuleName -> SrcSpan -- Attribute the "import Prelude" to this location -> Bool -> [LImportDecl RdrName] -> [LImportDecl RdrName] --- Consruct the implicit declaration "import Prelude" (or not) +-- Construct the implicit declaration "import Prelude" (or not) -- -- NB: opt_NoImplicitPrelude is slightly different to import Prelude (); -- because the former doesn't even look at Prelude.hi for instance diff --git a/compiler/Eta/Main/HscMain.hs b/compiler/Eta/Main/HscMain.hs index 88af2c36..51b7f775 100644 --- a/compiler/Eta/Main/HscMain.hs +++ b/compiler/Eta/Main/HscMain.hs @@ -22,7 +22,7 @@ -- Warning messages are dealt with consistently throughout this API: -- during compilation warnings are collected, and before any function -- in @HscMain@ returns, the warnings are either printed, or turned --- into a real compialtion error if the @-Werror@ flag is enabled. +-- into a real compilation error if the @-Werror@ flag is enabled. -- -- (c) The GRASP/AQUA Project, Glasgow University, 1993-2000 -- @@ -349,7 +349,7 @@ hscParse' mod_summary -- that the parser gave us, -- - eliminate files beginning with '<'. gcc likes to use -- pseudo-filenames like "" and "" - -- - normalise them (elimiante differences between ./f and f) + -- - normalise them (eliminate differences between ./f and f) -- - filter out the preprocessed source file -- - filter out anything beginning with tmpdir -- - remove duplicates @@ -799,7 +799,7 @@ hscFileFrontEnd mod_summary = hscTypecheck False mod_summary Nothing -- Note [Safe Haskell Inference] -- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -- Safe Haskell does Safe inference on modules that don't have any specific --- safe haskell mode flag. The basic aproach to this is: +-- safe haskell mode flag. The basic approach to this is: -- * When deciding if we need to do a Safe language check, treat -- an unmarked module as having -XSafe mode specified. -- * For checks, don't throw errors but return them to the caller. @@ -856,7 +856,7 @@ hscCheckSafeImports tcg_env = do -- | Validate that safe imported modules are actually safe. For modules in the -- HomePackage (the package the module we are compiling in resides) this just -- involves checking its trust type is 'Safe' or 'Trustworthy'. For modules --- that reside in another package we also must check that the external pacakge +-- that reside in another package we also must check that the external package -- is trusted. See the Note [Safe Haskell Trust Check] above for more -- information. -- @@ -1094,7 +1094,7 @@ markUnsafeInfer tcg_env whyUnsafe = do mkPlainWarnMsg dflags (warnUnsafeOnLoc dflags) (whyUnsafe' dflags)) liftIO $ writeIORef (tcg_safeInfer tcg_env) False - -- NOTE: Only wipe trust when not in an explicity safe haskell mode. Other + -- NOTE: Only wipe trust when not in an explicitly safe haskell mode. Other -- times inference may be on but we are in Trustworthy mode -- so we want -- to record safe-inference failed but not wipe the trust dependencies. case safeHaskell dflags == Sf_None of diff --git a/compiler/Eta/Main/HscTypes.hs b/compiler/Eta/Main/HscTypes.hs index da42cdd8..c073f3ab 100644 --- a/compiler/Eta/Main/HscTypes.hs +++ b/compiler/Eta/Main/HscTypes.hs @@ -559,7 +559,7 @@ emptyPackageIfaceTable :: PackageIfaceTable emptyPackageIfaceTable = emptyModuleEnv pprHPT :: HomePackageTable -> SDoc --- A bit aribitrary for now +-- A bit arbitrary for now pprHPT hpt = pprUDFM hpt $ \hms -> vcat [ hang (ppr (mi_module (hm_iface hm))) 2 (ppr (md_types (hm_details hm))) @@ -1340,7 +1340,7 @@ data ForeignStubs -- 2) C stubs to use when calling -- "foreign exported" functions -- - -- 3) Map of class strings to method defintions: + -- 3) Map of class strings to method definitions: -- "place.Garage extends place.Home" --> [defs...] foreignExportsList :: ExportMethods -> [(Text, ([MethodDef], [FieldDef]))] @@ -1515,7 +1515,7 @@ It's exactly the same for type-family instances. See Trac #7102 data InteractiveContext = InteractiveContext { ic_dflags :: DynFlags, - -- ^ The 'DynFlags' used to evaluate interative expressions + -- ^ The 'DynFlags' used to evaluate interactive expressions -- and statements. ic_mod_index :: Int, @@ -1941,7 +1941,7 @@ implicitTyConThings tc implicitCoTyCon tc ++ -- for each data constructor in order, - -- the contructor, worker, and (possibly) wrapper + -- the constructor, worker, and (possibly) wrapper concatMap (extras_plus . AConLike . RealDataCon) (tyConDataCons tc) -- NB. record selectors are *not* implicit, they have fully-fledged -- bindings that pass through the compilation pipeline as normal. @@ -2544,7 +2544,7 @@ data ExternalPackageState eps_mod_fam_inst_env :: !(ModuleEnv FamInstEnv), -- ^ The family instances accumulated from external -- packages, keyed off the module that declared them - eps_stats :: !EpsStats -- ^ Stastics about what was loaded from external packages + eps_stats :: !EpsStats -- ^ Statistics about what was loaded from external packages } -- | Accumulated statistics about what we are putting into the 'ExternalPackageState'. @@ -2792,7 +2792,7 @@ showModMsg dflags target recomp mod_summary {- ************************************************************************ * * -\subsection{Recmpilation} +\subsection{Recompilation} * * ************************************************************************ -} @@ -2973,7 +2973,7 @@ primarily about storing under what trust type a module has been compiled. type IsSafeImport = Bool -- | Safe Haskell information for 'ModIface' --- Simply a wrapper around SafeHaskellMode to sepperate iface and flags +-- Simply a wrapper around SafeHaskellMode to separate iface and flags newtype IfaceTrustInfo = TrustInfo SafeHaskellMode getSafeMode :: IfaceTrustInfo -> SafeHaskellMode @@ -2999,7 +2999,7 @@ numToTrustInfo 1 = setSafeMode Sf_Unsafe numToTrustInfo 2 = setSafeMode Sf_Trustworthy numToTrustInfo 3 = setSafeMode Sf_Safe numToTrustInfo 4 = setSafeMode Sf_Safe -- retained for backwards compat, used - -- to be Sf_SafeInfered but we no longer + -- to be Sf_SafeInferred but we no longer -- differentiate. numToTrustInfo n = error $ "numToTrustInfo: bad input number! (" ++ show n ++ ")" diff --git a/compiler/Eta/Main/InteractiveEval.hs b/compiler/Eta/Main/InteractiveEval.hs index e5ae7576..2124ae33 100644 --- a/compiler/Eta/Main/InteractiveEval.hs +++ b/compiler/Eta/Main/InteractiveEval.hs @@ -223,7 +223,7 @@ runDeclsWithLocation source linenumber expr = ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We don't want to display internally-generated bindings to users. Things like the coercion axiom for newtypes. These bindings all get -OccNames that users can't write, to avoid the possiblity of name +OccNames that users can't write, to avoid the possibility of name clashes (in linker symbols). That gives a convenient way to suppress them. The relevant predicate is OccName.isDerivedOccName. See Trac #11051 for more background and examples. @@ -542,7 +542,7 @@ bindLocalsAtBreakpoint hsc_env _apStack_fhv (Just BreakInfo{..}) = do where -- We need a fresh Unique for each Id we bind, because the linker -- state is single-threaded and otherwise we'd spam old bindings - -- whenever we stop at a breakpoint. The InteractveContext is properly + -- whenever we stop at a breakpoint. The InteractiveContext is properly -- saved/restored, but not the linker state. See #1743, test break026. mkNewId :: OccName -> Type -> Id -> IO Id mkNewId occ ty old_id @@ -742,7 +742,7 @@ moduleIsInterpreted modl = withSession $ \h -> _not_a_home_module -> return False -- | Looks up an identifier in the current interactive context (for :info) --- Filter the instances by the ones whose tycons (or clases resp) +-- Filter the instances by the ones whose tycons (or classes resp) -- are in scope (qualified or otherwise). Otherwise we list a whole lot too many! -- The exact choice of which ones to show, and which to hide, is a judgement call. -- (see Trac #1581) diff --git a/compiler/Eta/Main/Packages.hs b/compiler/Eta/Main/Packages.hs index 1315636f..abe68129 100644 --- a/compiler/Eta/Main/Packages.hs +++ b/compiler/Eta/Main/Packages.hs @@ -424,7 +424,7 @@ extendPackageConfigMap (PackageConfigMap pkg_map closure) new_pkgs where add pkg_map p = addToUDFM (addToUDFM pkg_map (expandedPackageConfigId p) p) (installedPackageConfigId p) p --- | Looks up the package with the given id in the package state, panicing if it is +-- | Looks up the package with the given id in the package state, panicking if it is -- not found getPackageDetails :: DynFlags -> UnitId -> PackageConfig getPackageDetails dflags pid = diff --git a/compiler/Eta/Main/SysTools.hs b/compiler/Eta/Main/SysTools.hs index fbaeddce..643335fb 100644 --- a/compiler/Eta/Main/SysTools.hs +++ b/compiler/Eta/Main/SysTools.hs @@ -178,7 +178,7 @@ initSysTools mbMinusB let platform = Platform { platformWordSize = 4 , platformArch = undefined , platformOS = undefined - , platformUnregisterised = undefined + , platformUnregistered = undefined , platformHasGnuNonexecStack = undefined , platformHasIdentDirective = undefined , platformHasSubsectionsViaSymbols = undefined @@ -406,9 +406,9 @@ readProcessEnvWithExitCode readProcessEnvWithExitCode prog args env_update = do current_env <- getEnvironment let new_env = env_update ++ [ (k, v) - | let overriden_keys = map fst env_update + | let overridden_keys = map fst env_update , (k, v) <- current_env - , k `notElem` overriden_keys + , k `notElem` overridden_keys ] p = proc prog args diff --git a/compiler/Eta/Main/TidyPgm.hs b/compiler/Eta/Main/TidyPgm.hs index a910ae76..68a67bc7 100644 --- a/compiler/Eta/Main/TidyPgm.hs +++ b/compiler/Eta/Main/TidyPgm.hs @@ -559,7 +559,7 @@ Oh: two other reasons for injecting them late: the sense of chooseExternalIds); else the Ids mentioned in *their* RHSs will be treated as external and you get an interface file saying a18 = - but nothing refererring to a18 (because the implicit Id is the + but nothing referring to a18 (because the implicit Id is the one that does, and implicit Ids don't appear in interface files). - More seriously, the tidied type-envt will include the implicit @@ -617,12 +617,12 @@ chooseExternalIds hsc_env mod omit_prags expose_all binds implicit_binds imp_id_ where nc_var = hsc_NC hsc_env - -- init_ext_ids is the intial list of Ids that should be + -- init_ext_ids is the initial list of Ids that should be -- externalised. It serves as the starting point for finding a -- deterministic, tidy, renaming for all external Ids in this -- module. -- - -- It is sorted, so that it has adeterministic order (i.e. it's the + -- It is sorted, so that it has a deterministic order (i.e. it's the -- same list every time this module is compiled), in contrast to the -- bindings, which are ordered non-deterministically. init_work_list = zip init_ext_ids init_ext_ids @@ -664,7 +664,7 @@ chooseExternalIds hsc_env mod omit_prags expose_all binds implicit_binds imp_id_ init_occ_env = initTidyOccEnv avoids - search :: [(Id,Id)] -- The work-list: (external id, referrring id) + search :: [(Id,Id)] -- The work-list: (external id, referring id) -- Make a tidy, external Name for the external id, -- add it to the UnfoldEnv, and do the same for the -- transitive closure of Ids it refers to @@ -932,7 +932,7 @@ findExternalRules omit_prags binds imp_id_rules unfold_env -- local binder (on LHS or RHS) that we have now discarded. -- (NB: ruleFreeVars only includes LocalIds) -- - -- LHS: we have alrady filtered out rules that mention internal Ids + -- LHS: we have already filtered out rules that mention internal Ids -- on LHS but that isn't enough because we might have by now -- discarded a binding with an external Id. (How? -- chooseExternalIds is a bit conservative.) @@ -1067,7 +1067,7 @@ tidyTopName mod nc_var maybe_ref occ_env id -- If we want to externalise a currently-local name, check -- whether we have already assigned a unique for it. -- If so, use it; if not, extend the table. - -- All this is done by allcoateGlobalBinder. + -- All this is done by allocateGlobalBinder. -- This is needed when *re*-compiling a module in GHCi; we must -- use the same name for externally-visible things as we did before. @@ -1396,7 +1396,7 @@ First, Template Haskell. Consider (Trac #2386) this data T = Yay String makeOne = [| Yay "Yep" |] Notice that T is exported abstractly, but makeOne effectively exports it too! -A module that splices in $(makeOne) will then look for a declartion of Yay, +A module that splices in $(makeOne) will then look for a declaration of Yay, so it'd better be there. Hence, brutally but simply, we switch off type constructor trimming if TH is enabled in this module. diff --git a/compiler/Eta/Parser/Parse.hs b/compiler/Eta/Parser/Parse.hs index a256477e..27bf82af 100644 --- a/compiler/Eta/Parser/Parse.hs +++ b/compiler/Eta/Parser/Parse.hs @@ -56,7 +56,7 @@ hscParse' mod_summary = do -- that the parser gave us, -- - eliminate files beginning with '<'. gcc likes to use -- pseudo-filenames like "" and "" - -- - normalise them (elimiante differences between ./f and f) + -- - normalise them (eliminate differences between ./f and f) -- - filter out the preprocessed source file -- - filter out anything beginning with tmpdir -- - remove duplicates diff --git a/compiler/Eta/Parser/Parser.y b/compiler/Eta/Parser/Parser.y index 020bb9e1..a9adcb44 100644 --- a/compiler/Eta/Parser/Parser.y +++ b/compiler/Eta/Parser/Parser.y @@ -10,7 +10,7 @@ { {-# LANGUAGE BangPatterns #-} -- required for versions of Happy before 1.18.6 {-# OPTIONS -Wwarn -w #-} --- The above warning supression flag is a temporary kludge. +-- The above warning suppression flag is a temporary kludge. -- While working on this module you are encouraged to remove it and fix -- any warnings in the module. See -- http://ghc.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#Warnings @@ -172,7 +172,7 @@ would think the two should never occur in the same context. ----------------------------------------------------------------------------- Conflicts: 38 shift/reduce (1.25) -10 for abiguity in 'if x then y else z + 1' [State 178] +10 for ambiguity in 'if x then y else z + 1' [State 178] (shift parses as 'if x then y else (z + 1)', as per longest-parse rule) 10 because op might be: : - ! * . `x` VARSYM CONSYM QVARSYM QCONSYM @@ -1866,9 +1866,9 @@ varids0 :: { Located [Located RdrName] } Note [Parsing ~] ~~~~~~~~~~~~~~~~ -Due to parsing conflicts between lazyness annotations in data type +Due to parsing conflicts between laziness annotations in data type declarations (see strict_mark) and equality types ~'s are always -parsed as lazyness annotations, and turned into HsEqTy's in the +parsed as laziness annotations, and turned into HsEqTy's in the correct places using RdrHsSyn.splitTilde. Since strict_mark is parsed as part of atype which is part of type, @@ -2019,7 +2019,7 @@ constr_stuff :: { Located (Located RdrName, HsConDeclDetails RdrName) } -- a data constructor. Reason: it might continue like this: -- C t1 t2 %: D Int -- in which case C really would be a type constructor. We can't resolve this --- ambiguity till we come across the constructor oprerator :% (or not, more usually) +-- ambiguity till we come across the constructor operator :% (or not, more usually) : btype {% splitCon $1 >>= return.sLL $1 $> } | btype conop btype { sLL $1 $> ($2, InfixCon $1 $3) } @@ -2451,7 +2451,7 @@ cvtopdecls0 :: { [LHsDecl RdrName] } -- "texp" is short for tuple expressions: -- things that can appear unparenthesized as long as they're --- inside parens or delimitted by commas +-- inside parens or delimited by commas texp :: { LHsExpr RdrName } : exp { $1 } @@ -2782,7 +2782,7 @@ fbind :: { LHsRecField RdrName (LHsExpr RdrName) } : qvar '=' texp {% ams (sLL $1 $> $ HsRecField $1 $3 False) [mj AnnEqual $2] } -- RHS is a 'texp', allowing view patterns (Trac #6038) - -- and, incidentaly, sections. Eg + -- and, incidentally, sections. Eg -- f (R { x = show -> s }) = ... | qvar { sLL $1 $> $ HsRecField $1 placeHolderPunRhs True } diff --git a/compiler/Eta/Parser/RdrHsSyn.hs b/compiler/Eta/Parser/RdrHsSyn.hs index d37ac21e..d5a44b5f 100644 --- a/compiler/Eta/Parser/RdrHsSyn.hs +++ b/compiler/Eta/Parser/RdrHsSyn.hs @@ -1124,7 +1124,7 @@ checkPartialTypeSignature fullTy = case fullTy of case unnamedInCtxt of (Found lc : _) -> err hintUnnamedConstraint lc fullTy _ -> return () - -- Calculcate the set of named wildcards in the context that aren't in the + -- Calculate the set of named wildcards in the context that aren't in the -- monotype (tau) let namedWildcardsNotInTau = Set.fromList (namedWildcards namedInCtxt) `Set.difference` @@ -1476,7 +1476,7 @@ mkImport (L lc cconv) (L ls safety) (L loc entity, v, ty) | otherwise = entity -- TODO: Z-encode the result? --- the string "foo" is ambigous: either a header or a C identifier. The +-- the string "foo" is ambiguous: either a header or a C identifier. The -- C identifier case comes first in the alternatives below, so we pick -- that one. parseCImport :: Located CCallConv -> Located Safety -> FastString -> String diff --git a/compiler/Eta/Prelude/PrelInfo.hs b/compiler/Eta/Prelude/PrelInfo.hs index 75e0c04b..301c14f6 100644 --- a/compiler/Eta/Prelude/PrelInfo.hs +++ b/compiler/Eta/Prelude/PrelInfo.hs @@ -173,7 +173,7 @@ wiredInThings -- GHCi's ':info' command. lookupKnownNameInfo :: Name -> SDoc lookupKnownNameInfo name = case lookupNameEnv knownNamesInfo name of - -- If we do find a doc, we add comment delimeters to make the output + -- If we do find a doc, we add comment delimiters to make the output -- of ':info' valid Haskell. Nothing -> empty Just doc -> vcat [text "{-", doc, text "-}"] diff --git a/compiler/Eta/Prelude/PrelNames.hs b/compiler/Eta/Prelude/PrelNames.hs index 68ed712c..dccf2b82 100644 --- a/compiler/Eta/Prelude/PrelNames.hs +++ b/compiler/Eta/Prelude/PrelNames.hs @@ -533,7 +533,7 @@ mAIN = mkMainModule_ mAIN_NAME rOOT_MAIN = mkMainModule (fsLit ":Main") -- Root module for initialisation mkInteractiveModule :: Int -> Module --- (mkInteractiveMoudule 9) makes module 'interactive:M9' +-- (mkInteractiveModule 9) makes module 'interactive:M9' mkInteractiveModule n = mkModule interactiveUnitId (mkModuleName ("EtaRepl" ++ show n)) pRELUDE_NAME, mAIN_NAME :: ModuleName diff --git a/compiler/Eta/Prelude/PrelRules.hs b/compiler/Eta/Prelude/PrelRules.hs index ac196a09..da190488 100644 --- a/compiler/Eta/Prelude/PrelRules.hs +++ b/compiler/Eta/Prelude/PrelRules.hs @@ -606,7 +606,7 @@ Shift.$wgo = \ (w_sCS :: GHC.Prim.Int#) (w1_sCT :: [GHC.Types.Bool]) -> Note the massive shift on line "!!!!". It can't happen, because we've checked that w < 64, but the optimiser didn't spot that. We DO NO want to constant-fold this! Moreover, if the programmer writes (n `uncheckedShiftL` 9223372036854775807), we -can't constant fold it, but if it gets to the assember we get +can't constant fold it, but if it gets to the assembler we get Error: operand type mismatch for `shl' So the best thing to do is to rewrite the shift with a call to error, diff --git a/compiler/Eta/Prelude/PrimOp.hs b/compiler/Eta/Prelude/PrimOp.hs index 4c446b59..9e38f99a 100644 --- a/compiler/Eta/Prelude/PrimOp.hs +++ b/compiler/Eta/Prelude/PrimOp.hs @@ -4525,7 +4525,7 @@ data dependencies of the state token to enforce write-effect ordering ---------- can_fail ---------------------------- A primop "can_fail" if it can fail with an *unchecked* exception on some elements of its input domain. Main examples: - division (fails on zero demoninator) + division (fails on zero denominator) array indexing (fails if the index is out of bounds) An "unchecked exception" is one that is an outright error, (not @@ -4616,7 +4616,7 @@ Duplicate YES NO Note [Implementation: how can_fail/has_side_effects affect transformations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -How do we ensure that that floating/duplication/discarding are done right +How do we ensure that floating/duplication/discarding are done right in the simplifier? Two main predicates on primpops test these flags: diff --git a/compiler/Eta/Prelude/TysWiredIn.hs b/compiler/Eta/Prelude/TysWiredIn.hs index c39ae4cc..4c9922c4 100644 --- a/compiler/Eta/Prelude/TysWiredIn.hs +++ b/compiler/Eta/Prelude/TysWiredIn.hs @@ -396,7 +396,7 @@ Note [How tuples work] See also Note [Known-key names] in PrelNames * When looking up an OccName in the original-name cache (IfaceEnv.lookupOrigNameCache), we spot the tuple OccName to make sure we get the right wired-in name. This guy can't tell the difference - betweeen BoxedTuple and ConstraintTuple (same OccName!), so tuples + between BoxedTuple and ConstraintTuple (same OccName!), so tuples are not serialised into interface files using OccNames at all. * Serialization to interface files works via the usual mechanism for known-key @@ -411,7 +411,7 @@ GHC supports both boxed and unboxed one-tuples: single value after CPR analysis - A boxed one-tuple is used by DsUtils.mkSelectorBinds, when there is just one binder -Basically it keeps everythig uniform. +Basically it keeps everything uniform. However the /naming/ of the type/data constructors for one-tuples is a bit odd: diff --git a/compiler/Eta/Prelude/primops.txt.pp b/compiler/Eta/Prelude/primops.txt.pp index 6865d149..70f82b53 100644 --- a/compiler/Eta/Prelude/primops.txt.pp +++ b/compiler/Eta/Prelude/primops.txt.pp @@ -57,8 +57,8 @@ defaults has_side_effects = False - out_of_line = False -- See Note Note [PrimOp can_fail and has_side_effects] in PrimOp - can_fail = False -- See Note Note [PrimOp can_fail and has_side_effects] in PrimOp + out_of_line = False -- See Note [PrimOp can_fail and has_side_effects] in PrimOp + can_fail = False -- See Note [PrimOp can_fail and has_side_effects] in PrimOp commutable = False code_size = { primOpCodeSizeDefault } strictness = { \ arity -> mkClosedStrictSig (replicate arity topDmd) topRes } @@ -97,7 +97,7 @@ arithmetic operations, comparisons, and a range of conversions. The 8-bit and 16-bit sizes are always represented as {\tt Int\#} and {\tt Word\#}, and the - operations implemented in terms of the the primops on these + operations implemented in terms of the primops on these types, with suitable range restrictions on the results (using the {\tt narrow$n$Int\#} and {\tt narrow$n$Word\#} families of primops. The 32-bit sizes are represented using {\tt @@ -200,7 +200,7 @@ {Return non-zero if there is any possibility that the upper word of a signed integer multiply might contain useful information. Return zero only if you are completely sure that no overflow can occur. - On a 32-bit platform, the recommmended implementation is to do a + On a 32-bit platform, the recommended implementation is to do a 32 x 32 -> 64 signed multiply, and subtract result[63:32] from (result[31] >>signed 31). If this is zero, meaning that the upper word is merely a sign extension of the lower one, no @@ -1209,7 +1209,7 @@ primop ReadByteArrayOp_Int "readIntArray#" GenPrimOp MutableByteArray# s -> Int# -> State# s -> (# State# s, Int# #) - {Read intger; offset in words.} + {Read integer; offset in words.} with has_side_effects = True can_fail = True @@ -2393,7 +2393,7 @@ a -> Int# with -- Note that Par is lazy to avoid that the sparked thing - -- gets evaluted strictly, which it should *not* be + -- gets evaluated strictly, which it should *not* be has_side_effects = True code_size = { primOpCodeSizeForeignCall } @@ -2646,7 +2646,7 @@ {\tt unsafeCoerce\#} to cast a T to an algebraic data type D, unless T is also an algebraic data type. For example, do not cast {\tt Int->Int} to {\tt Bool}, even if you later cast that {\tt Bool} back to {\tt Int->Int} before applying it. The reasons - have to do with GHC's internal representation details (for the congnoscenti, data values + have to do with GHC's internal representation details (for the cognoscenti, data values can be entered but function closures cannot). If you want a safe type to cast things to, use {\tt Any}, which is not an algebraic data type. diff --git a/compiler/Eta/Profiling/CostCentre.hs b/compiler/Eta/Profiling/CostCentre.hs index 0c4914cf..f1789551 100644 --- a/compiler/Eta/Profiling/CostCentre.hs +++ b/compiler/Eta/Profiling/CostCentre.hs @@ -320,7 +320,7 @@ instance Binary CostCentre where return (AllCafsCC ae noSrcSpan) -- We ignore the SrcSpans in CostCentres when we serialise them, - -- and set the SrcSpans to noSrcSpan when deserialising. This is + -- and set the SrcSpans to noSrcSpan when deserializing. This is -- ok, because we only need the SrcSpan when declaring the -- CostCentre in the original module, it is not used by importing -- modules. diff --git a/compiler/Eta/REPL.hs b/compiler/Eta/REPL.hs index 286079a6..13984015 100644 --- a/compiler/Eta/REPL.hs +++ b/compiler/Eta/REPL.hs @@ -143,7 +143,7 @@ iservCmd hsc_env@HscEnv{..} msg -- -- If we receive an async exception, such as ^C, while communicating -- with the iserv process then we will be out-of-sync and not be able --- to recoever. Thus we use uninterruptibleMask_ during +-- to recover. Thus we use uninterruptibleMask_ during -- communication. A ^C will be delivered to the iserv process (because -- signals get sent to the whole process group) which will interrupt -- the running computation and return an EvalException result. diff --git a/compiler/Eta/REPL/Leak.hs b/compiler/Eta/REPL/Leak.hs index c209626a..9ea3ca5e 100644 --- a/compiler/Eta/REPL/Leak.hs +++ b/compiler/Eta/REPL/Leak.hs @@ -37,7 +37,7 @@ getLeakIndicators HscEnv{..} = return $ LeakModIndicators{..} -- | Look at the LeakIndicators collected by an earlier call to --- `getLeakIndicators`, and print messasges if any of them are still +-- `getLeakIndicators`, and print messages if any of them are still -- alive. checkLeakIndicators :: DynFlags -> LeakIndicators -> IO () checkLeakIndicators dflags (LeakIndicators leakmods) = do diff --git a/compiler/Eta/Rename/RnEnv.hs b/compiler/Eta/Rename/RnEnv.hs index 987c46be..c1af64cf 100644 --- a/compiler/Eta/Rename/RnEnv.hs +++ b/compiler/Eta/Rename/RnEnv.hs @@ -413,7 +413,7 @@ lookupConstructorFields :: Name -> RnM [Name] -- data type decls -- -- * For constructors from imported modules, use the *type* environment --- since imported modles are already compiled, the info is conveniently +-- since imported models are already compiled, the info is conveniently -- right there lookupConstructorFields con_name @@ -485,7 +485,7 @@ greRdrName gre = unqual_rdr -- An unqualified import is available | otherwise = -- Only qualified imports available, so make up - -- a suitable qualifed name from the first imp_spec + -- a suitable qualified name from the first imp_spec ASSERT( not (null imp_specs) ) mkRdrQual (is_as (is_decl (head imp_specs))) occ @@ -541,7 +541,7 @@ of 'G' in the 'instance C S' decl is unambiguous, because C has only one associated type called G. This is exactly what happens for methods, and it is only consistent to do the same thing for types. That's the role of the function lookupTcdName; the (Maybe Name) give the class of -the encloseing instance decl, if any. +the enclosing instance decl, if any. Note [Looking up Exact RdrNames] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -612,7 +612,7 @@ But when adding to the UsedRdrNames we must make that qualification explicit (saying "used M.f"), otherwise we get "Redundant import of M.f". So we make up a suitable (fake) RdrName. But be careful - import qualifed M + import qualified M import M( C(f) ) instance C T where f x = x @@ -851,7 +851,7 @@ Note [Handling of deprecations] * We report deprecations at each *occurrence* of the deprecated thing (see Trac #5867) -* We do not report deprectations for locally-definded names. For a +* We do not report deprectations for locally-defined names. For a start, we may be exporting a deprecated thing. Also we may use a deprecated thing in the defn of another deprecated things. We may even use a deprecated thing in the defn of a non-deprecated thing, @@ -876,7 +876,7 @@ addUsedRdrName warnIfDeprec gre rdr addUsedRdrNames :: [RdrName] -> RnM () -- Record used sub-binders -- We don't check for imported-ness here, because it's inconvenient --- and not stritly necessary. +-- and not strictly necessary. -- NB: no call to warnIfDeprecated; see Note [Handling of deprecations] addUsedRdrNames rdrs = do { env <- getGblEnv @@ -1038,10 +1038,10 @@ data HsSigCtxt = TopSigCtxt NameSet -- At top level, binding these names -- See Note [Signatures for top level things] -- Bool <=> ok to give sig for - -- class method or record selctor + -- class method or record selector | LocalBindCtxt NameSet -- In a local binding, binding these names | ClsDeclCtxt Name -- Class decl for this class - | InstDeclCtxt Name -- Intsance decl for this class + | InstDeclCtxt Name -- Instance decl for this class | HsBootCtxt NameSet -- Top level of a hs-boot file | RoleAnnotCtxt NameSet -- A role annotation, with the names of all types -- in the group @@ -1218,7 +1218,7 @@ type MiniFixityEnv = FastStringEnv (Located Fixity) -- -- It is keyed by the *FastString*, not the *OccName*, because -- the single fixity decl infix 3 T - -- affects both the data constructor T and the type constrctor T + -- affects both the data constructor T and the type constructor T -- -- We keep the location so that if we find -- a duplicate, we can report it sensibly diff --git a/compiler/Eta/Rename/RnExpr.hs b/compiler/Eta/Rename/RnExpr.hs index 644d7f8c..200f6ef8 100644 --- a/compiler/Eta/Rename/RnExpr.hs +++ b/compiler/Eta/Rename/RnExpr.hs @@ -133,7 +133,7 @@ rnExpr (OpApp e1 (L op_loc (HsVar op_rdr)) _ e2) ; op_name <- setSrcSpan op_loc (lookupOccRn op_rdr) ; (op', fv_op) <- finishHsVar op_name -- NB: op' is usually just a variable, but might be - -- an applicatoin (assert "Foo.hs:47") + -- an application (assert "Foo.hs:47") -- Deal with fixity -- When renaming code synthesised from "deriving" declarations -- we used to avoid fixity stuff, but we can't easily tell any @@ -663,7 +663,7 @@ transformation loses the ability to do A and C in parallel. The algorithm works by first splitting the sequence of statements into independent "segments", and a separate "tail" (the final statement). In -our example above, the segements would be +our example above, the segments would be [ x <- A , y <- B x ] @@ -1007,7 +1007,7 @@ allowed this to be transformed into (\(x,y) -> \z -> C) <$> A <*> B -then it could be lazier than the standard desuraging using >>=. See #13875 +then it could be lazier than the standard desugaring using >>=. See #13875 for more examples. Thus, whenever we have a strict pattern match, we treat it as a @@ -1373,7 +1373,7 @@ rnStmt ctxt _ (L loc (TransStmt { trS_stmts = stmts, trS_by = by, trS_form = for ; (thing, fvs_thing) <- thing_inside bndrs ; let fvs = fvs_by `plusFV` fvs_thing used_bndrs = filter (`elemNameSet` fvs) bndrs - -- The paper (Fig 5) has a bug here; we must treat any free varaible + -- The paper (Fig 5) has a bug here; we must treat any free variable -- of the "thing inside", **or of the by-expression**, as used ; return ((by', used_bndrs, thing), fvs) } @@ -1469,7 +1469,7 @@ Note that (c) The 'bs' in the second group must obviously not be captured by the binding in the first group -To satisfy (a) we nest the segements. +To satisfy (a) we nest the segments. To satisfy (b) we check for duplicates just before thing_inside. To satisfy (c) we reset the LocalRdrEnv each time. diff --git a/compiler/Eta/Rename/RnFixity.hs b/compiler/Eta/Rename/RnFixity.hs index 64ba7c5e..a0614c30 100644 --- a/compiler/Eta/Rename/RnFixity.hs +++ b/compiler/Eta/Rename/RnFixity.hs @@ -61,7 +61,7 @@ type MiniFixityEnv = FastStringEnv (Located Fixity) -- -- It is keyed by the *FastString*, not the *OccName*, because -- the single fixity decl infix 3 T - -- affects both the data constructor T and the type constrctor T + -- affects both the data constructor T and the type constructor T -- -- We keep the location so that if we find -- a duplicate, we can report it sensibly @@ -172,7 +172,7 @@ lookupTyFixityRn :: Located Name -> RnM Fixity lookupTyFixityRn (L _ n) = lookupFixityRn n -- | Look up the fixity of a (possibly ambiguous) occurrence of a record field --- selector. We use 'lookupFixityRn'' so that we can specifiy the 'OccName' as +-- selector. We use 'lookupFixityRn'' so that we can specify the 'OccName' as -- the field label, which might be different to the 'OccName' of the selector -- 'Name' if @DuplicateRecordFields@ is in use (Trac #1173). If there are -- multiple possible selectors with different fixities, generate an error. diff --git a/compiler/Eta/Rename/RnNames.hs b/compiler/Eta/Rename/RnNames.hs index 6d9eebbf..4c135477 100644 --- a/compiler/Eta/Rename/RnNames.hs +++ b/compiler/Eta/Rename/RnNames.hs @@ -72,12 +72,12 @@ we must also check that these rules hold transitively for all dependent modules and packages. Doing this without caching any trust information would be very slow as we would need to touch all packages and interface files a module depends on. To avoid this we make use of the property that if a modules Safe Haskell -mode changes, this triggers a recompilation from that module in the dependcy +mode changes, this triggers a recompilation from that module in the dependency graph. So we can just worry mostly about direct imports. There is one trust property that can change for a package though without recompliation being triggered: package trust. So we must check that all -packages a module tranitively depends on to be trusted are still trusted when +packages a module transitively depends on to be trusted are still trusted when we are compiling this module (as due to recompilation avoidance some modules below may not be considered trusted any more without recompilation being triggered). @@ -121,7 +121,7 @@ So there is an interesting design question in regards to transitive trust checking. Say I have a module B compiled with -XSafe. B is dependent on a bunch of modules and packages, some packages it requires to be trusted as its using -XTrustworthy modules from them. Now if I have a module A that doesn't use safe -haskell at all and simply imports B, should A inherit all the the trust +haskell at all and simply imports B, should A inherit all the trust requirements from B? Should A now also require that a package p is trusted since B required it? @@ -342,7 +342,7 @@ calculateAvails dflags iface mod_safe' want_boot imported_by = -- reported. Easiest thing is just to filter them out up -- front. This situation only arises if a module imports -- itself, or another module that imported it. (Necessarily, - -- this invoves a loop.) + -- this involves a loop.) -- -- We do this *after* filterImports, so that if you say -- module A where @@ -789,7 +789,7 @@ top level binders specially in two ways it seem like qualified import. * We only shadow *External* names (which come from the main module) - Do not shadow *Inernal* names because in the bracket + Do not shadow *Internal* names because in the bracket [d| class C a where f :: a f = 4 |] rnSrcDecls will first call extendGlobalRdrEnvRn with C[f] from the @@ -1599,7 +1599,7 @@ isModuleExported implicit_prelude mod (GRE { gre_name = name, gre_prov = prov }) ------------------------------- check_occs :: IE RdrName -> ExportOccMap -> [Name] -> RnM ExportOccMap -check_occs ie occs names -- 'names' are the entities specifed by 'ie' +check_occs ie occs names -- 'names' are the entities specified by 'ie' = foldlM check occs names where check occs name @@ -1642,7 +1642,7 @@ dupExport_ok :: Name -> IE RdrName -> IE RdrName -> Bool -- Example of "yes" (Trac #2436) -- module M( C(..), T(..) ) where -- class C a where { data T a } --- instace C Int where { data T Int = TInt } +-- instance C Int where { data T Int = TInt } -- -- Example of "yes" (Trac #2436) -- module Foo ( T ) where @@ -1811,7 +1811,7 @@ warnMissingSig flag msg id {- Note [The ImportMap] ~~~~~~~~~~~~~~~~~~~~ -The ImportMap is a short-lived intermediate data struture records, for +The ImportMap is a short-lived intermediate data structure records, for each import declaration, what stuff brought into scope by that declaration is actually used in the module. diff --git a/compiler/Eta/Rename/RnSource.hs b/compiler/Eta/Rename/RnSource.hs index 61ec0813..f3d1f086 100644 --- a/compiler/Eta/Rename/RnSource.hs +++ b/compiler/Eta/Rename/RnSource.hs @@ -86,7 +86,7 @@ rnSrcDecls group@(HsGroup { hs_valds = val_decls, = do { -- (A) Process the fixity declarations, creating a mapping from -- FastStrings to FixItems. - -- Also checks for duplcates. + -- Also checks for duplicates. local_fix_env <- makeMiniFixityEnv fix_decls ; -- (B) Bring top level binders (and their fixities) into scope, @@ -97,7 +97,7 @@ rnSrcDecls group@(HsGroup { hs_valds = val_decls, -- because they do not have value declarations. -- Aso step (C) depends on datacons and record fields -- - -- * Pattern synonyms, becuase they (and data constructors) + -- * Pattern synonyms, because they (and data constructors) -- are needed for rnTopBindLHS (Trac #9889) -- -- * For hs-boot files, include the value signatures @@ -415,7 +415,7 @@ rnHsForeignDecl (ForeignExport name ty _ spec) -- | For Windows DLLs we need to know what packages imported symbols are from -- to generate correct calls. Imported symbols are tagged with the current --- package, so if they get inlined across a package boundry we'll still +-- package, so if they get inlined across a package boundary we'll still -- know where they're from. -- patchForeignImport :: UnitId -> ForeignImport -> ForeignImport @@ -770,7 +770,7 @@ with LHSs with a complicated desugaring (and hence unlikely to match); (e.g. a case expression is not allowed: too elaborate.) But there are legitimate non-trivial args ei, like sections and -lambdas. So it seems simmpler not to check at all, and that is why +lambdas. So it seems simpler not to check at all, and that is why check_e is commented out. -} @@ -939,7 +939,7 @@ See also Note [Grouping of type and class declarations] in TcTyClsDecls. rnTyClDecls :: [TyClGroup RdrName] -> RnM ([TyClGroup Name], FreeVars) --- Rename the declarations and do depedency analysis on them +-- Rename the declarations and do dependency analysis on them rnTyClDecls tycl_ds = do { ds_w_fvs <- mapM (wrapLocFstM rnTyClDecl) (tyClGroupConcat tycl_ds) ; let decl_names = mkNameSet (map (tcdName . unLoc . fst) ds_w_fvs) @@ -1191,7 +1191,7 @@ rnDataDefn doc (HsDataDefn { dd_ND = new_or_data, dd_metaData = (cType, _java_an badGadtStupidTheta :: HsDocContext -> SDoc badGadtStupidTheta _ = vcat [ptext (sLit "No context is allowed on a GADT-style data declaration"), - ptext (sLit "(You can put a context on each contructor, though.)")] + ptext (sLit "(You can put a context on each constructor, though.)")] rnFamDecl :: Maybe Name -- Just cls => this FamilyDecl is nested diff --git a/compiler/Eta/Rename/RnSplice.hs b/compiler/Eta/Rename/RnSplice.hs index 86318e44..4adf9cef 100644 --- a/compiler/Eta/Rename/RnSplice.hs +++ b/compiler/Eta/Rename/RnSplice.hs @@ -831,7 +831,7 @@ A thing can have a bind_lvl of outerLevel, but have an internal name: foo = [d| op = 3 bop = op + 1 |] Here the bind_lvl of 'op' is (bogusly) outerLevel, even though it is -bound inside a bracket. That is because we don't even even record +bound inside a bracket. That is because we don't even record binding levels for top-level things; the binding levels are in the LocalRdrEnv. diff --git a/compiler/Eta/Rename/RnTypes.hs b/compiler/Eta/Rename/RnTypes.hs index 3fd0c8d2..880858ce 100644 --- a/compiler/Eta/Rename/RnTypes.hs +++ b/compiler/Eta/Rename/RnTypes.hs @@ -12,7 +12,7 @@ module Eta.Rename.RnTypes ( rnHsSigType, rnLHsInstType, rnConDeclFields, newTyVarNameRn, - -- Precence related stuff + -- Presence related stuff mkOpAppRn, mkNegAppRn, mkOpFormRn, mkConOpPatRn, checkPrecMatch, checkSectionPrec, @@ -286,7 +286,7 @@ rnHsTyKiForAll :: Bool -> HsDocContext -> HsType RdrName -> RnM (HsType Name, FreeVars) rnHsTyKiForAll isType doc (HsForAllTy Implicit extra _ lctxt@(L _ ctxt) ty) = ASSERT( isType ) do - -- Implicit quantifiction in source code (no kinds on tyvars) + -- Implicit quantification in source code (no kinds on tyvars) -- Given the signature C => T we universally quantify -- over FV(T) \ {in-scope-tyvars} rdr_env <- getLocalRdrEnv @@ -331,7 +331,7 @@ rnHsTyKiForAll isType doc in_type_doc = ptext (sLit "In the type") <+> quotes (ppr ty) ; warnUnusedForAlls (in_type_doc $$ docOfHsDocContext doc) forall_tyvars mentioned - ; traceRn "rnHsTyKiForAll:Exlicit" (vcat + ; traceRn "rnHsTyKiForAll:Explicit" (vcat [ppr forall_tyvars, ppr lctxt,ppr tau ]) ; rnForAll doc Explicit extra kvs forall_tyvars lctxt tau } @@ -436,7 +436,7 @@ bindHsTyVars doc mb_assoc kv_bndrs tv_bndrs thing_inside ; (kind', fvs) <- rnLHsKind doc kind ; return (L loc (KindedTyVar (L lv nm) kind'), fvs) } - -- Check for duplicate or shadowed tyvar bindrs + -- Check for duplicate or shadowed tyvar bndrs ; checkDupRdrNames tv_names_w_loc ; when (isNothing mb_assoc) (checkShadowedRdrNames tv_names_w_loc) @@ -556,7 +556,7 @@ the programmer actually wrote, so you can't find it out from the Name. Furthermore, the second argument is guaranteed not to be another operator application. Why? Because the parser parses all -operator appications left-associatively, EXCEPT negation, which +operator applications left-associatively, EXCEPT negation, which we need to handle specially. Infix types are read in a *right-associative* way, so that a `op` b `op` c @@ -584,7 +584,7 @@ mkHsOpTyRn mk1 pp_op1 fix1 ty1 (L loc2 (HsFunTy ty21 ty22)) = mk_hs_op_ty mk1 pp_op1 fix1 ty1 HsFunTy funTyConName funTyFixity ty21 ty22 loc2 -mkHsOpTyRn mk1 _ _ ty1 ty2 -- Default case, no rearrangment +mkHsOpTyRn mk1 _ _ ty1 ty2 -- Default case, no rearrangement = return (mk1 ty1 ty2) --------------- @@ -650,7 +650,7 @@ mkOpAppRn e1 op1 fix1 e2@(L _ (NegApp _ _)) -- NegApp can occur on the right --------------------------- -- Default case -mkOpAppRn e1 op fix e2 -- Default case, no rearrangment +mkOpAppRn e1 op fix e2 -- Default case, no rearrangement = ASSERT2( right_op_ok fix (unLoc e2), ppr e1 $$ text "---" $$ ppr op $$ text "---" $$ ppr fix $$ text "---" $$ ppr e2 ) @@ -706,7 +706,7 @@ mkOpFormRn a1@(L loc (HsCmdTop (L _ (HsCmdArrForm op1 (Just fix1) [a11,a12])) _ (nofix_error, associate_right) = compareFixity fix1 fix2 -- Default case -mkOpFormRn arg1 op fix arg2 -- Default case, no rearrangment +mkOpFormRn arg1 op fix arg2 -- Default case, no rearrangement = return (HsCmdArrForm op (Just fix) [arg1, arg2]) @@ -727,7 +727,7 @@ mkConOpPatRn op2 fix2 p1@(L loc (ConPatIn op1 (InfixCon p11 p12))) p2 ; return (ConPatIn op1 (InfixCon p11 (L loc new_p))) } -- XXX loc right? else return (ConPatIn op2 (InfixCon p1 p2)) } -mkConOpPatRn op _ p1 p2 -- Default case, no rearrangment +mkConOpPatRn op _ p1 p2 -- Default case, no rearrangement = ASSERT( not_op_pat (unLoc p2) ) return (ConPatIn op (InfixCon p1 p2)) @@ -883,7 +883,7 @@ opTyErr _ ty = pprPanic "opTyErr: Not an op" (ppr ty) Note [Kind and type-variable binders] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -In a type signature we may implicitly bind type varaible and, more +In a type signature we may implicitly bind type variable and, more recently, kind variables. For example: * f :: a -> a f = ... diff --git a/compiler/Eta/SimplCore/CallArity.hs b/compiler/Eta/SimplCore/CallArity.hs index f9b0d782..d8c5a9c8 100644 --- a/compiler/Eta/SimplCore/CallArity.hs +++ b/compiler/Eta/SimplCore/CallArity.hs @@ -26,7 +26,7 @@ import Control.Arrow ( first, second ) {- %************************************************************************ %* * - Call Arity Analyis + Call Arity Analysis %* * %************************************************************************ @@ -76,7 +76,7 @@ correct. What we want to know from an expression --------------------------------------- -In order to obtain that information for variables, we analyize expression and +In order to obtain that information for variables, we analyze expression and obtain bits of information: I. The arity analysis: @@ -95,7 +95,7 @@ For efficiency reasons, we gather this information only for a set of The two analysis are not completely independent, as a higher arity can improve the information about what variables are being called once or multiple times. -Note [Analysis I: The arity analyis] +Note [Analysis I: The arity analysis] ------------------------------------ The arity analysis is quite straight forward: The information about an @@ -104,8 +104,8 @@ expression is an where absent variables are bound to Nothing and otherwise to a lower bound to their arity. -When we analyize an expression, we analyize it with a given context arity. -Lambdas decrease and applications increase the incoming arity. Analysizing a +When we analyze an expression, we analyze it with a given context arity. +Lambdas decrease and applications increase the incoming arity. Analyzing a variable will put that arity in the environment. In lets or cases all the results from the various subexpressions are lubed, which takes the point-wise minimum (considering Nothing an infinity). @@ -115,7 +115,7 @@ Note [Analysis II: The Co-Called analysis] ------------------------------------------ The second part is more sophisticated. For reasons explained below, it is not -sufficient to simply know how often an expression evalutes a variable. Instead +sufficient to simply know how often an expression evaluates a variable. Instead we need to know which variables are possibly called together. The data structure here is an undirected graph of variables, which is provided @@ -147,10 +147,10 @@ The interesting cases of the analysis: any useful co-call information. Return (fv e)² * Case alternatives alt₁,alt₂,...: - Only one can be execuded, so + Only one can be executed, so Return (alt₁ ∪ alt₂ ∪...) * App e₁ e₂ (and analogously Case scrut alts), with non-trivial e₂: - We get the results from both sides, with the argument evaluted at most once. + We get the results from both sides, with the argument evaluated at most once. Additionally, anything called by e₁ can possibly be called with anything from e₂. Return: C(e₁) ∪ C(e₂) ∪ (fv e₁) × (fv e₂) @@ -160,7 +160,7 @@ The interesting cases of the analysis: Return: C(e₁) ∪ (fv e₁) × {x} ∪ {(x,x)} * Let v = rhs in body: In addition to the results from the subexpressions, add all co-calls from - everything that the body calls together with v to everthing that is called + everything that the body calls together with v to everything that is called by v. Return: C'(rhs) ∪ C(body) ∪ (fv rhs) × {v'| {v,v'} ∈ C(body)} * Letrec v₁ = rhs₁ ... vₙ = rhsₙ in body @@ -274,7 +274,7 @@ together with what other functions. Note [Analysis type signature] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The work-hourse of the analysis is the function `callArityAnal`, with the +The work-horse of the analysis is the function `callArityAnal`, with the following type: type CallArityRes = (UnVarGraph, VarEnv Arity) @@ -321,7 +321,7 @@ everytime we would be lookup up `x` in the analysis result of `e2`. that this variable might be called many times with no variables. * Instead of checking `calledWith x`, we assume that everything can be called with it. - * In the recursive case, when calclulating the `cross_calls`, if there is + * In the recursive case, when calculating the `cross_calls`, if there is any boring variable in the recursive group, we ignore all co-call-results and directly go to a very conservative assumption. @@ -340,9 +340,9 @@ For a mutually recursive let, we begin by 3. We combine the analysis result from the body and the memoized results for the arguments (if already present). 4. For each variable, we find out the incoming arity and whether it is called - once, based on the the current analysis result. If this differs from the + once, based on the current analysis result. If this differs from the memoized results, we re-analyse the rhs and update the memoized table. - 5. If nothing had to be reanalized, we are done. + 5. If nothing had to be reanalyzed, we are done. Otherwise, repeat from step 3. @@ -350,10 +350,10 @@ Note [Thunks in recursive groups] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We never eta-expand a thunk in a recursive group, on the grounds that if it is -part of a recursive group, then it will be called multipe times. +part of a recursive group, then it will be called multiple times. This is not necessarily true, e.g. it would be safe to eta-expand t2 (but not -t1) in the follwing code: +t1) in the following code: let go x = t1 t1 = if ... then t2 else ... @@ -371,7 +371,7 @@ Note [Analysing top-level binds] We can eta-expand top-level-binds if they are not exported, as we see all calls to them. The plan is as follows: Treat the top-level binds as nested lets around a body representing “all external calls”, which returns a pessimistic -CallArityRes (the co-call graph is the complete graph, all arityies 0). +CallArityRes (the co-call graph is the complete graph, all arities 0). Note [Trimming arity] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -468,7 +468,7 @@ callArityAnal arity int (Lam v e) where (ae, e') = callArityAnal (arity - 1) (int `delVarSet` v) e --- Application. Increase arity for the called expresion, nothing to know about +-- Application. Increase arity for the called expression, nothing to know about -- the second callArityAnal arity int (App e (Type t)) = second (\e -> App e (Type t)) $ callArityAnal arity int e @@ -599,11 +599,11 @@ callArityBind boring_vars ae_body int b@(Rec binds) | Just (old_called_once, old_arity, _) <- mbLastRun , called_once == old_called_once , new_arity == old_arity - -- No change, no need to re-analize + -- No change, no need to reanalyze = (False, (i, mbLastRun, rhs)) | otherwise - -- We previously analized this with a different arity (or not at all) + -- We previously analyzed this with a different arity (or not at all) = let is_thunk = not (exprIsHNF rhs) safe_arity | is_thunk = 0 -- See Note [Thunks in recursive groups] @@ -677,7 +677,7 @@ trimArity v a = minimum [a, max_arity_by_type, max_arity_by_strsig] --------------------------------------- -- Result type for the two analyses. --- See Note [Analysis I: The arity analyis] +-- See Note [Analysis I: The arity analysis] -- and Note [Analysis II: The Co-Called analysis] type CallArityRes = (UnVarGraph, VarEnv Arity) diff --git a/compiler/Eta/SimplCore/CoreMonad.hs b/compiler/Eta/SimplCore/CoreMonad.hs index 10ac51c9..1df6583f 100644 --- a/compiler/Eta/SimplCore/CoreMonad.hs +++ b/compiler/Eta/SimplCore/CoreMonad.hs @@ -883,7 +883,7 @@ instance MonadThings CoreM where -- | Attempt to convert a Template Haskell name to one that GHC can -- understand. Original TH names such as those you get when you use -- the @'foo@ syntax will be translated to their equivalent GHC name --- exactly. Qualified or unqualifed TH names will be dynamically bound +-- exactly. Qualified or unqualified TH names will be dynamically bound -- to names in the module being compiled, if possible. Exact TH names -- will be bound to the name they represent, exactly. thNameToGhcName :: TH.Name -> CoreM (Maybe Name) diff --git a/compiler/Eta/SimplCore/FloatIn.hs b/compiler/Eta/SimplCore/FloatIn.hs index b4573a59..b2df681b 100644 --- a/compiler/Eta/SimplCore/FloatIn.hs +++ b/compiler/Eta/SimplCore/FloatIn.hs @@ -427,7 +427,7 @@ okToFloatInside bndrs = all ok bndrs noFloatIntoRhs :: AnnExpr' Var (UniqDFM Var) -> Type -> Bool -- ^ True if it's a bad idea to float bindings into this RHS --- Preconditio: rhs :: rhs_ty +-- Precondition: rhs :: rhs_ty noFloatIntoRhs rhs rhs_ty = isUnLiftedType rhs_ty -- See Note [Do not destroy the let/app invariant] || noFloatIntoExpr rhs diff --git a/compiler/Eta/SimplCore/FloatOut.hs b/compiler/Eta/SimplCore/FloatOut.hs index 01fa3a18..ddc3b792 100644 --- a/compiler/Eta/SimplCore/FloatOut.hs +++ b/compiler/Eta/SimplCore/FloatOut.hs @@ -99,7 +99,7 @@ It turns out that this generates a subexpression of the form @ \deq x ys -> let eq = eqFromEqDict deq in ... @ -vwhich might usefully be separated to +which might usefully be separated to @ \deq -> let eq = eqFromEqDict deq in \xy -> ... @ @@ -233,7 +233,7 @@ Note [floatBind for top level] We may have a *nested* binding whose destination level is (FloatMe tOP_LEVEL), thus letrec { foo <0,0> = .... (let bar<0,0> = .. in ..) .... } The binding for bar will be in the "tops" part of the floating binds, -and thus not partioned by floatBody. +and thus not partitioned by floatBody. We could perhaps get rid of the 'tops' component of the floating binds, but this case works just as well. diff --git a/compiler/Eta/SimplCore/LiberateCase.hs b/compiler/Eta/SimplCore/LiberateCase.hs index 2cb6ddf6..157fe09e 100644 --- a/compiler/Eta/SimplCore/LiberateCase.hs +++ b/compiler/Eta/SimplCore/LiberateCase.hs @@ -269,7 +269,7 @@ We get the following levels y 2 Then 'x' is being scrutinised at a deeper level than its binding, so -it's added to lc_sruts: [(x,1)] +it's added to lc_scruts: [(x,1)] We do *not* want to specialise the call to 'f', because 'x' is not free in 'f'. So here the bind-level of 'x' (=1) is not <= the bind-level of 'f' (=0). diff --git a/compiler/Eta/SimplCore/OccurAnal.hs b/compiler/Eta/SimplCore/OccurAnal.hs index 685c7dcb..b84e54c4 100644 --- a/compiler/Eta/SimplCore/OccurAnal.hs +++ b/compiler/Eta/SimplCore/OccurAnal.hs @@ -361,7 +361,7 @@ That's why we compute So we must *not* postInlineUnconditionally 'g', even though its RHS turns out to be trivial. (I'm assuming that 'g' is - not choosen as a loop breaker.) Why not? Because then we + not chosen as a loop breaker.) Why not? Because then we drop the binding for 'g', which leaves it out of scope in the RULE! @@ -513,7 +513,7 @@ Note [Specialising imported functions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ BUT for *automatically-generated* rules, the programmer can't be responsible for the "programmer error" in Note [Rules for imported -functions]. In paricular, consider specialising a recursive function +functions]. In particular, consider specialising a recursive function defined in another module. If we specialise a recursive function B.g, we get g_spec = .....(B.g Int)..... @@ -933,7 +933,7 @@ reOrderNodes depth bndr_set weak_fvs (node : nodes) binds | otherwise = 0 -- Checking for a constructor application - -- Cheap and cheerful; the simplifer moves casts out of the way + -- Cheap and cheerful; the simplifier moves casts out of the way -- The lambda case is important to spot x = /\a. C (f a) -- which comes up when C is a dictionary constructor and -- f is a default method. @@ -1282,7 +1282,7 @@ occAnal env (Case scrut bndr ty alts) | t `tickishScopesLike` SoftScope -- No reason to not look through all ticks here, but only -- for soft-scoped ticks we can do so without having to - -- update returned occurance info (see occAnal) + -- update returned occurrence info (see occAnal) = second (Tick t) $ occ_anal_scrut e alts occ_anal_scrut scrut _alts @@ -1376,7 +1376,7 @@ markManyIf False uds = uds {- Note [Use one-shot information] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The occurrrence analyser propagates one-shot-lambda information in two situation +The occurrence analyser propagates one-shot-lambda information in two situation * Applications: eg build (\cn -> blah) Propagate one-shot info from the strictness signature of 'build' to the \cn @@ -1401,7 +1401,7 @@ binders are unused. See esp the call to isDeadBinder in Simplify.mkDupableAlt In this example, though, the Simplifier will bring 'a' and 'b' back to -life, beause it binds 'y' to (a,b) (imagine got inlined and +life, because it binds 'y' to (a,b) (imagine got inlined and scrutinised y). -} @@ -1468,8 +1468,8 @@ type GlobalScruts = IdSet -- See Note [Binder swap on GlobalId scrutinees] -- x = (p,q) -- Don't inline p or q -- y = /\a -> (p a, q a) -- Still don't inline p or q -- z = f (p,q) -- Do inline p,q; it may make a rule fire --- So OccEncl tells enought about the context to know what to do when --- we encounter a contructor application or PAP. +-- So OccEncl tells enough about the context to know what to do when +-- we encounter a constructor application or PAP. data OccEncl = OccRhs -- RHS of let(rec), albeit perhaps inside a type lambda @@ -1517,7 +1517,7 @@ oneShotGroup :: OccEnv -> [CoreBndr] -- The result binders have one-shot-ness set that they might not have had originally. -- This happens in (build (\cn -> e)). Here the occurrence analyser -- linearity context knows that c,n are one-shot, and it records that fact in - -- the binder. This is useful to guide subsequent float-in/float-out tranformations + -- the binder. This is useful to guide subsequent float-in/float-out transformations oneShotGroup env@(OccEnv { occ_one_shots = ctxt }) bndrs = go ctxt bndrs [] diff --git a/compiler/Eta/SimplCore/SetLevels.hs b/compiler/Eta/SimplCore/SetLevels.hs index 4f88f329..98bb15de 100644 --- a/compiler/Eta/SimplCore/SetLevels.hs +++ b/compiler/Eta/SimplCore/SetLevels.hs @@ -342,7 +342,7 @@ lvlExpr env expr@(_, AnnLam {}) (bndrs, body) = collectAnnBndrs expr (env1, bndrs1) = substBndrsSL NonRecursive env bndrs (new_env, new_bndrs) = lvlLamBndrs env1 (le_ctxt_lvl env) bndrs1 - -- At one time we called a special verion of collectBinders, + -- At one time we called a special version of collectBinders, -- which ignored coercions, because we don't want to split -- a lambda like this (\x -> coerce t (\s -> ...)) -- This used to happen quite a bit in state-transformer programs, @@ -391,7 +391,7 @@ lvlCase env scrut_fvs scrut' case_bndr ty alts where incd_lvl = incMinorLvl (le_ctxt_lvl env) dest_lvl = maxFvLevel (const True) env scrut_fvs - -- Don't abstact over type variables, hence const True + -- Don't abstract over type variables, hence const True lvl_alt alts_env (con, bs, rhs) = do { rhs' <- lvlMFE True new_env rhs @@ -514,13 +514,13 @@ lvlMFE strict_ctxt env ann_expr@(fvs, _) -- concat = /\ a -> lvl a -- which is pretty stupid. Hence the strict_ctxt test -- - -- Also a strict contxt includes uboxed values, and they + -- Also a strict contxt includes unboxed values, and they -- can't be bound at top level {- Note [Unlifted MFEs] ~~~~~~~~~~~~~~~~~~~~ -We don't float unlifted MFEs, which potentially loses big opportunites. +We don't float unlifted MFEs, which potentially loses big opportunities. For example: \x -> f (h y) where h :: Int -> Int# is expensive. We'd like to float the (h y) outside @@ -536,7 +536,7 @@ we'd like to float the call to error, to get Furthermore, we want to float a bottoming expression even if it has free variables: f = \x. g (let v = h x in error ("urk" ++ v)) -Then we'd like to abstact over 'x' can float the whole arg of g: +Then we'd like to abstract over 'x' can float the whole arg of g: lvl = \x. let v = h x in error ("urk" ++ v) f = \x. g (lvl x) See Maessen's paper 1999 "Bottom extraction: factoring error handling out @@ -995,7 +995,7 @@ lookupVar le v = case lookupVarEnv (le_env le) v of _ -> Var v abstractVars :: Level -> LevelEnv -> DVarSet -> [OutVar] - -- Find the variables in fvs, free vars of the target expresion, + -- Find the variables in fvs, free vars of the target expression, -- whose level is greater than the destination level -- These are the ones we are going to abstract out -- @@ -1115,7 +1115,7 @@ zap_demand_info v Note [Zapping the demand info] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ VERY IMPORTANT: we must zap the demand info if the thing is going to -float out, becuause it may be less demanded than at its original +float out, because it may be less demanded than at its original binding site. Eg f :: Int -> Int f x = let v = 3*4 in v+x diff --git a/compiler/Eta/SimplCore/SimplCore.hs b/compiler/Eta/SimplCore/SimplCore.hs index 13c39642..556c7394 100644 --- a/compiler/Eta/SimplCore/SimplCore.hs +++ b/compiler/Eta/SimplCore/SimplCore.hs @@ -83,7 +83,7 @@ core2core hsc_env guts hpt_rule_base = mkRuleBase home_pkg_rules mod = mg_module guts -- mod: get the module out of the current HscEnv so we can retrieve it from the monad. - -- This is very convienent for the users of the monad (e.g. plugins do not have to + -- This is very convenient for the users of the monad (e.g. plugins do not have to -- consume the ModGuts to find the module) but somewhat ugly because mg_module may -- _theoretically_ be changed during the Core pipeline (it's part of ModGuts), which -- would mean our cached value would go out of date. @@ -241,7 +241,7 @@ getCoreToDo dflags -- At least 3 iterations because otherwise we land up with -- huge dead expressions because of an infelicity in the - -- simpifier. + -- simplifier. -- let k = BIG in foldr k z xs -- ==> let k = BIG in letrec go = \xs -> ...(k x).... in go xs -- ==> let k = BIG in letrec go = \xs -> ...(BIG x).... in go xs @@ -289,7 +289,7 @@ getCoreToDo dflags CoreLiberateCase, simpl_phase 0 ["post-liberate-case"] max_iter ]), -- Run the simplifier after LiberateCase to vastly - -- reduce the possiblility of shadowing + -- reduce the possibility of shadowing -- Reason: see Note [Shadowing] in SpecConstr.lhs runWhen spec_constr CoreDoSpecConstr, @@ -709,7 +709,7 @@ the final phase, but it's tidier to do it here. Note [Transferring IdInfo] ~~~~~~~~~~~~~~~~~~~~~~~~~~ -We want to propagage any useful IdInfo on x_local to x_exported. +We want to propagate any useful IdInfo on x_local to x_exported. STRICTNESS: if we have done strictness analysis, we want the strictness info on x_local to transfer to x_exported. Hence the copyIdInfo call. @@ -805,7 +805,7 @@ Hence,there's a possibility of leaving unchanged something like this: By the time we've thrown away the types in STG land this could be eliminated. But I don't think it's very common and it's dangerous to do this fiddling in STG land -because we might elminate a binding that's mentioned in the +because we might eliminate a binding that's mentioned in the unfolding for something. Note [Indirection zapping and ticks] @@ -822,7 +822,7 @@ Which we want to become: x_exported = tick As it makes no sense to keep the tick and the expression on separate -bindings. Note however that that this might increase the ticks scoping +bindings. Note however that this might increase the ticks scoping over the execution of x_local, so we can only do this for floatable ticks. More often than not, other references will be unfoldings of x_exported, and therefore carry the tick anyway. diff --git a/compiler/Eta/SimplCore/SimplEnv.hs b/compiler/Eta/SimplCore/SimplEnv.hs index 6eff9191..57ab5b25 100644 --- a/compiler/Eta/SimplCore/SimplEnv.hs +++ b/compiler/Eta/SimplCore/SimplEnv.hs @@ -221,7 +221,7 @@ seIdSubst: binding site. * The in-scope "set" usually maps x->x; we use it simply for its domain. - But sometimes we have two in-scope Ids that are synomyms, and should + But sometimes we have two in-scope Ids that are synonyms, and should map to the same target: x->x, y->x. Notably: case y of x { ... } That's why the "set" is actually a VarEnv Var @@ -646,7 +646,7 @@ seqIds (id:ids) = seqId id `seq` seqIds ids {- Note [Arity robustness] ~~~~~~~~~~~~~~~~~~~~~~~ -We *do* transfer the arity from from the in_id of a let binding to the +We *do* transfer the arity from the in_id of a let binding to the out_id. This is important, so that the arity of an Id is visible in its own RHS. For example: f = \x. ....g (\y. f y).... @@ -684,7 +684,7 @@ the letrec. {- ************************************************************************ * * - Impedence matching to type substitution + Impedance matching to type substitution * * ************************************************************************ -} @@ -759,7 +759,7 @@ In just one place (sigh) we need to lazily substitute over a CoreExpr. For that we need CoreSubst.substExpr. But there is a difficulty: SimplEnv has a SimplIdSubst, whose range is SimplSR, not just CoreExpr. -So SimplEnv.substExpr has to perform impedence-matching, via the ambient +So SimplEnv.substExpr has to perform impedance-matching, via the ambient substitution provided by mkGblSubst. It seems like a lot of work for a small thing. Previously we attempted to construct a (VarEnv CoreExpr) from the SimplIdSubst, but that had absolutely terrible performance diff --git a/compiler/Eta/SimplCore/SimplUtils.hs b/compiler/Eta/SimplCore/SimplUtils.hs index 6daa2770..a7d53281 100644 --- a/compiler/Eta/SimplCore/SimplUtils.hs +++ b/compiler/Eta/SimplCore/SimplUtils.hs @@ -822,7 +822,7 @@ Similarly, consider and suppose that there are auto-generated specialisations and a strictness wrapper for g. The specialisations get activation AlwaysActive, and the strictness wrapper get activation (ActiveAfter 0). So the strictness -wrepper fails the test and won't be inlined into f's stable unfolding. That +wrapper fails the test and won't be inlined into f's stable unfolding. That means f can inline, expose the specialised call to g, so the specialisation rules can fire. @@ -830,7 +830,7 @@ A note about wrappers ~~~~~~~~~~~~~~~~~~~~~ It's also important not to inline a worker back into a wrapper. A wrapper looks like - wraper = inline_me (\x -> ...worker... ) + wrapper = inline_me (\x -> ...worker... ) Normally, the inline_me prevents the worker getting inlined into the wrapper (initially, the worker's only call site!). But, if the wrapper is sure to be called, the strictness analyser will @@ -866,7 +866,7 @@ getUnfoldingInRuleMatch env | otherwise = isActive (sm_phase mode) (idInlineActivation id) active_unfolding_minimal :: Id -> Bool --- Compuslory unfoldings only +-- Compulsory unfoldings only -- Ignore SimplGently, because we want to inline regardless; -- the Id has no top-level binding at all -- @@ -946,7 +946,7 @@ For example, it's tempting to look at trivial binding like and inline it unconditionally. But suppose x is used many times, but this is the unique occurrence of y. Then inlining x would change y's occurrence info, which breaks the invariant. It matters: y -might have a BIG rhs, which will now be dup'd at every occurrenc of x. +might have a BIG rhs, which will now be dup'd at every occurrence of x. Even RHSs labelled InlineMe aren't caught here, because there might be @@ -981,7 +981,7 @@ spectral/mandel/Mandel.hs, where the mandelset function gets a useful let-float if you inline windowToViewport However, as usual for Gentle mode, do not inline things that are -inactive in the intial stages. See Note [Gentle mode]. +inactive in the initial stages. See Note [Gentle mode]. Note [Stable unfoldings and preInlineUnconditionally] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1004,7 +1004,7 @@ for exactly this reason; and we don't want PreInlineUnconditionally to second-guess it. A live example is Trac #3736. c.f. Note [Stable unfoldings and postInlineUnconditionally] -Note [Top-level botomming Ids] +Note [Top-level bottoming Ids] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Don't inline top-level Ids that are bottoming, even if they are used just once, because FloatOut has gone to some trouble to extract them out. @@ -1105,7 +1105,7 @@ only have *forward* references. Hence, it's safe to discard the binding NOTE: This isn't our last opportunity to inline. We're at the binding site right now, and we'll get another opportunity when we get to the -ocurrence(s) +occurrence(s) Note that we do this unconditional inlining only for trival RHSs. Don't inline even WHNFs inside lambdas; doing so may simply increase @@ -1155,7 +1155,7 @@ postInlineUnconditionally dflags env top_lvl bndr occ_info rhs unfolding -- case v of -- True -> case x of ... -- False -> case x of ... - -- This is very important in practice; e.g. wheel-seive1 doubles + -- This is very important in practice; e.g. wheel-sieve1 doubles -- in allocation if you miss this out OneOcc in_lam _one_br int_cxt -- OneOcc => no code-duplication issue -> smallEnoughToInline dflags unfolding -- Small enough to dup @@ -1754,7 +1754,7 @@ This gave rise to a horrible sequence of cases and similarly in cascade for all the join points! NB: it's important that all this is done in [InAlt], *before* we work -on the alternatives themselves, because Simpify.simplAlt may zap the +on the alternatives themselves, because Simplify.simplAlt may zap the occurrence info on the binders in the alternatives, which in turn defeats combineIdenticalAlts (see Trac #7360). @@ -1768,7 +1768,7 @@ Suppose we have (Trac #10538) A -> e2 B -> e1 -When calling combineIdentialAlts, we'll have computed that the "impossible +When calling combineIdenticalAlts, we'll have computed that the "impossible constructors" for the DEFAULT alt is {A,B}, since if x is A or B we'll take the other alternatives. But suppose we combine B into the DEFAULT, to get @@ -1824,7 +1824,7 @@ mkCase tries these things } which merges two cases in one case when -- the default alternative of - the outer case scrutises the same variable as the outer case. This + the outer case scrutinises the same variable as the outer case. This transformation is called Case Merging. It avoids that the same variable is scrutinised multiple times. @@ -1937,7 +1937,7 @@ mkCase1 dflags scrut bndr alts_ty alts = mkCase2 dflags scrut bndr alts_ty alts mkCase2 dflags scrut bndr alts_ty alts | -- See Note [Scrutinee Constant Folding] - case alts of -- Not if there is just a DEFAULT alterantive + case alts of -- Not if there is just a DEFAULT alternative [(DEFAULT,_,_)] -> False _ -> True , gopt Opt_CaseFolding dflags @@ -1994,7 +1994,7 @@ mkCase2 dflags scrut bndr alts_ty alts add_default :: [CoreAlt] -> [CoreAlt] -- TagToEnum may change a boolean True/False set of alternatives - -- to LitAlt 0#/1# alterantives. But literal alternatives always + -- to LitAlt 0#/1# alternatives. But literal alternatives always -- have a DEFAULT (I think). So add it. add_default ((LitAlt {}, bs, rhs) : alts) = (DEFAULT, bs, rhs) : alts add_default alts = alts diff --git a/compiler/Eta/SimplCore/Simplify.hs b/compiler/Eta/SimplCore/Simplify.hs index 7a1a8c9e..ef440c08 100644 --- a/compiler/Eta/SimplCore/Simplify.hs +++ b/compiler/Eta/SimplCore/Simplify.hs @@ -16,7 +16,7 @@ import Eta.Types.Type hiding ( substTy, extendTvSubst, substTyVar ) import Eta.SimplCore.SimplEnv import Eta.SimplCore.SimplUtils import Eta.Types.FamInstEnv ( FamInstEnv ) -import Eta.BasicTypes.Literal ( litIsLifted ) --, mkMachInt ) -- temporalily commented out. See #8326 +import Eta.BasicTypes.Literal ( litIsLifted ) --, mkMachInt ) -- temporarily commented out. See #8326 import Eta.BasicTypes.Id import Eta.BasicTypes.MkId ( seqId, voidPrimId ) import Eta.Core.MkCore ( mkImpossibleExpr, castBottomExpr ) @@ -27,7 +27,7 @@ import Eta.Types.OptCoercion ( optCoercion ) import Eta.Types.FamInstEnv ( topNormaliseType_maybe ) import Eta.BasicTypes.DataCon ( DataCon, dataConWorkId, dataConRepStrictness , isMarkedStrict ) --, dataConTyCon, dataConTag, fIRST_TAG ) ---import Eta.Types.TyCon ( isEnumerationTyCon ) -- temporalily commented out. See #8326 +--import Eta.Types.TyCon ( isEnumerationTyCon ) -- temporarily commented out. See #8326 import Eta.SimplCore.CoreMonad ( Tick(..), SimplifierMode(..) ) import Eta.Core.CoreSyn import Eta.BasicTypes.Demand ( StrictSig(..), dmdTypeDepth, isStrictDmd ) @@ -35,13 +35,13 @@ import Eta.Core.PprCore ( pprCoreExpr ) import Eta.Core.CoreUnfold import Eta.Core.CoreUtils import Eta.Core.CoreArity ---import Eta.Prelude.PrimOp ( tagToEnumKey ) -- temporalily commented out. See #8326 +--import Eta.Prelude.PrimOp ( tagToEnumKey ) -- temporarily commented out. See #8326 import Eta.Specialise.Rules ( mkRuleInfo, lookupRule, getRules ) -import Eta.Prelude.TysPrim ( voidPrimTy ) --, intPrimTy ) -- temporalily commented out. See #8326 +import Eta.Prelude.TysPrim ( voidPrimTy ) --, intPrimTy ) -- temporarily commented out. See #8326 import Eta.BasicTypes.BasicTypes ( TopLevelFlag(..), isTopLevel, RecFlag(..) ) import Eta.Utils.MonadUtils ( foldlM, mapAccumLM, liftIO ) import Eta.Utils.Maybes ( orElse ) ---import Eta.BasicTypes.Unique ( hasKey ) -- temporalily commented out. See #8326 +--import Eta.BasicTypes.Unique ( hasKey ) -- temporarily commented out. See #8326 import Control.Monad import Eta.Utils.Outputable import Eta.Utils.FastString @@ -88,7 +88,7 @@ simplExpr (Let (NonRec ...) ..) ==> simplNonRecBind simplExpr (Let (Rec ...) ..) ==> simplify binders; simplRecBind ------------------------------ -simplRecBind [binders already simplfied] +simplRecBind [binders already simplified] - use simplRecOrTopPair on each pair in turn simplRecOrTopPair [binder already simplified] @@ -136,7 +136,7 @@ simplLazyBind: [binder already simplified, RHS not] completeNonRecX: [binder and rhs both simplified] - - if the the thing needs case binding (unlifted and not ok-for-spec) + - if the thing needs case binding (unlifted and not ok-for-spec) build a Case else completeBind @@ -410,14 +410,14 @@ completeNonRecX top_lvl env is_strict old_bndr new_bndr new_rhs {- {- No, no, no! Do not try preInlineUnconditionally in completeNonRecX Doing so risks exponential behaviour, because new_rhs has been simplified once already - In the cases described by the folowing commment, postInlineUnconditionally will + In the cases described by the following comment, postInlineUnconditionally will catch many of the relevant cases. -- This happens; for example, the case_bndr during case of -- known constructor: case (a,b) of x { (p,q) -> ... } -- Here x isn't mentioned in the RHS, so we don't want to -- create the (dead) let-binding let x = (a,b) in ... -- - -- Similarly, single occurrences can be inlined vigourously + -- Similarly, single occurrences can be inlined vigorously -- e.g. case (f x, g y) of (a,b) -> .... -- If a,b occur once we can avoid constructing the let binding for them. @@ -513,7 +513,7 @@ we'd like to transform it to x' = e x = x `cast` co -- A trivial binding There's a chance that e will be a constructor application or function, or something -like that, so moving the coerion to the usage site may well cancel the coersions +like that, so moving the coercion to the usage site may well cancel the coersions and lead to further optimisation. Example: data family T a :: * @@ -581,7 +581,7 @@ makeTrivialWithInfo top_lvl env info expr ; env' <- completeNonRecX top_lvl env False var var expr ; expr' <- simplVar env' var ; return (env', expr') } - -- The simplVar is needed becase we're constructing a new binding + -- The simplVar is needed because we're constructing a new binding -- a = rhs -- And if rhs is of form (rhs1 |> co), then we might get -- a1 = rhs1 @@ -1181,7 +1181,7 @@ simplCast env body co0 cont0 -- Example of use: Trac #995 = do { let arg' = substExpr arg_se arg -- It's important that this is lazy, because this argument - -- may be disarded if turns out to be the argument of + -- may be discarded if turns out to be the argument of -- (\_ -> e) This can make a huge difference; -- see Trac #10527 ; cont' <- addCoerce co2 cont @@ -1425,7 +1425,7 @@ rebuildCall env (ArgInfo { ai_fun = fun, ai_args = rev_args, ai_strs = [] }) con -- the continuation, leaving just the bottoming expression. But the -- type might not be right, so we may have to add a coerce. | not (contIsTrivial cont) -- Only do this if there is a non-trivial - = return (env, castBottomExpr res cont_ty) -- contination to discard, else we do it + = return (env, castBottomExpr res cont_ty) -- continuation to discard, else we do it where -- again and again! res = argInfoExpr fun rev_args cont_ty = contResultType cont @@ -1671,7 +1671,7 @@ Note that SimplUtils.mkCase combines identical RHSs. So True -> r False -> r -Now again the case may be elminated by the CaseElim transformation. +Now again the case may be eliminated by the CaseElim transformation. This includes things like (==# a# b#)::Bool so that we simplify case ==# a# b# of { True -> x; False -> x } to just @@ -1816,7 +1816,7 @@ rebuildCase, reallyRebuildCase :: SimplEnv -> OutExpr -- Scrutinee -> InId -- Case binder - -> [InAlt] -- Alternatives (inceasing order) + -> [InAlt] -- Alternatives (increasing order) -> SimplCont -> SimplM (SimplEnv, OutExpr) @@ -1952,7 +1952,7 @@ inlined. Historical note: we use to do the "case binder swap" in the Simplifier so there were additional complications if the scrutinee was a variable. -Now the binder-swap stuff is done in the occurrence analyer; see +Now the binder-swap stuff is done in the occurrence analyzer; see OccurAnal Note [Binder swap]. Note [knownCon occ info] @@ -2175,7 +2175,7 @@ Note [Add unfolding for scrutinee] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In general it's unlikely that a variable scrutinee will appear in the case alternatives case x of { ...x unlikely to appear... } -because the binder-swap in OccAnal has got rid of all such occcurrences +because the binder-swap in OccAnal has got rid of all such occurrences See Note [Binder swap] in OccAnal. BUT it is still VERY IMPORTANT to add a suitable unfolding for a @@ -2188,7 +2188,7 @@ the unfolding (a,b), and *that* mentions b. If f has a RULE RULE f (p, I# q) = ... we want that rule to match, so we must extend the in-scope env with a suitable unfolding for 'y'. It's *essential* for rule matching; but -it's also good for case-elimintation -- suppose that 'f' was inlined +it's also good for case-elimination -- suppose that 'f' was inlined and did multi-level case analysis, then we'd solve it in one simplifier sweep instead of two. @@ -2283,7 +2283,7 @@ knownCon env scrut dc dc_ty_args dc_args bndr bs rhs cont ------------------- missingAlt :: SimplEnv -> Id -> [InAlt] -> SimplCont -> SimplM (SimplEnv, OutExpr) -- This isn't strictly an error, although it is unusual. - -- It's possible that the simplifer might "see" that + -- It's possible that the simplifier might "see" that -- an inner case has no accessible alternatives before -- it "sees" that the entire branch of an outer case is -- inaccessible. So we simply put an error case here instead. @@ -2605,7 +2605,7 @@ Rather than do this we simply agree to re-simplify the original (small) thing la Note [Funky mkPiTypes] ~~~~~~~~~~~~~~~~~~~~~~ -Notice the funky mkPiTypes. If the contructor has existentials +Notice the funky mkPiTypes. If the constructor has existentials it's possible that the join point will be abstracted over type variables as well as term variables. Example: Suppose we have @@ -2755,7 +2755,7 @@ But now we do *NOT* want to make a join point etc, giving True -> $j (I# (negate# x')) False -> $j (I# x') In this case the $j will inline again, but suppose there was a big -strict computation enclosing the orginal call to MkT. Then, it won't +strict computation enclosing the original call to MkT. Then, it won't "see" the MkT any more, because it's big and won't get duplicated. And, what is worse, nothing was gained by the case-of-case transform. diff --git a/compiler/Eta/SimplCore/simplifier.tib b/compiler/Eta/SimplCore/simplifier.tib index 18acd279..00bedbbf 100644 --- a/compiler/Eta/SimplCore/simplifier.tib +++ b/compiler/Eta/SimplCore/simplifier.tib @@ -75,7 +75,7 @@ a short-hand, not an algorithm. (y:ys) -> E1[y,ys] [] -> E2 @ -Transformations of this kind are almost embarassingly simple. How could +Transformations of this kind are almost embarrassingly simple. How could anyone write a paper about them? \end{itemize} This paper is about humble transformations, and how to implement them. @@ -335,7 +335,7 @@ should first be eliminated by the dead-alternative transformation. \subsection{Inlining} -The inlining transformtion is simple enough: +The inlining transformation is simple enough: @ let x = R in B[x] ===> B[R] @ @@ -706,7 +706,7 @@ each iteration of Step 2 only performs one transformation, then the entire program will to be re-analysed by Step 1, and re-traversed by Step 2, for each transformation of the sequence. Sometimes this is unavoidable, but it is often possible to perform a sequence of -transformtions in a single pass. +transformations in a single pass. The key function, which simplifies expressions, has the following type: @ diff --git a/compiler/Eta/Specialise/Rules.hs b/compiler/Eta/Specialise/Rules.hs index f742c468..2294fa8d 100644 --- a/compiler/Eta/Specialise/Rules.hs +++ b/compiler/Eta/Specialise/Rules.hs @@ -125,7 +125,7 @@ Note [Overall plumbing for rules] (b) from the ModGuts, (c) from the CoreMonad, and (d) from its mutable variable - [Of coures this means that we won't see new EPS rules that come in + [Of course this means that we won't see new EPS rules that come in during a single simplifier iteration, but that probably does not matter.] @@ -321,7 +321,7 @@ but that isn't quite right: -- | Gathers a collection of 'CoreRule's. Maps (the name of) an 'Id' to its rules type RuleBase = NameEnv [CoreRule] - -- The rules are are unordered; + -- The rules are unordered; -- we sort out any overlaps on lookup emptyRuleBase :: RuleBase @@ -394,7 +394,7 @@ lookupRule dflags in_scope is_active fn args rules findBest :: (Id, [CoreExpr]) -> (CoreRule,CoreExpr) -> [(CoreRule,CoreExpr)] -> (CoreRule,CoreExpr) -- All these pairs matched the expression --- Return the pair the the most specific rule +-- Return the pair the most specific rule -- The (fn,args) is just for overlap reporting findBest _ (rule,ans) [] = (rule,ans) @@ -673,7 +673,7 @@ match _ _ e@Tick{} _ -- Consider matching -- \x->f against \f->f -- When we meet the lambdas we must remember to rename f to f' in the --- second expresion. The RnEnv2 does that. +-- second expression. The RnEnv2 does that. -- -- Consider matching -- forall a. \b->b against \a->3 diff --git a/compiler/Eta/Specialise/SpecConstr.hs b/compiler/Eta/Specialise/SpecConstr.hs index 9b1441d6..ab73f5bb 100644 --- a/compiler/Eta/Specialise/SpecConstr.hs +++ b/compiler/Eta/Specialise/SpecConstr.hs @@ -214,7 +214,7 @@ This only makes sense if either b) the type variable 'a' is an argument to f (and hence fs) Actually, (a) may hold for value arguments too, in which case -we may not want to pass them. Supose 'x' is in scope at f's +we may not want to pass them. Suppose 'x' is in scope at f's defn, but xs is not. Then we'd like f_spec xs = let p = (:) [a] x xs in ....as before.... @@ -1090,10 +1090,10 @@ instance Outputable ArgOcc where evalScrutOcc :: ArgOcc evalScrutOcc = ScrutOcc emptyUFM --- Experimentally, this vesion of combineOcc makes ScrutOcc "win", so +-- Experimentally, this version of combineOcc makes ScrutOcc "win", so -- that if the thing is scrutinised anywhere then we get to see that -- in the overall result, even if it's also used in a boxed way --- This might be too agressive; see Note [Reboxing] Alternative 3 +-- This might be too aggressive; see Note [Reboxing] Alternative 3 combineOcc :: ArgOcc -> ArgOcc -> ArgOcc combineOcc NoOcc occ = occ combineOcc occ NoOcc = occ @@ -1660,7 +1660,7 @@ the passed-in RuleInfo, unless there are no calls at all to the function. The caller can, indeed must, assume this. He should not combine in rhs_usg himself, or he'll get rhs_usg twice -- and that can lead to an exponential blowup of duplicates in the CallEnv. This is what gave rise to the massive -performace loss in Trac #8852. +performance loss in Trac #8852. Note [Specialise original body] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1684,7 +1684,7 @@ that specialisations didn't fire inside wrappers; see test simplCore/should_compile/spec-inline. So now I just use the inline-activation of the parent Id, as the -activation for the specialiation RULE, just like the main specialiser; +activation for the specialisation RULE, just like the main specialiser; This in turn means there is no point in specialising NOINLINE things, so we test for that. diff --git a/compiler/Eta/Specialise/Specialise.hs b/compiler/Eta/Specialise/Specialise.hs index 6e0448a5..ebe1987e 100644 --- a/compiler/Eta/Specialise/Specialise.hs +++ b/compiler/Eta/Specialise/Specialise.hs @@ -145,8 +145,8 @@ becomes in fl -We still have recusion for non-overloaded functions which we -speciailise, but the recursive call should get specialised to the +We still have recursion for non-overloaded functions which we +specialise, but the recursive call should get specialised to the same recursive version. @@ -194,7 +194,7 @@ the two instances of +.sel weren't originally at the same type. Further notes on (b) * There are quite a few variations here. For example, the defn of - +.sel could be floated ouside the \y, to attempt to gain laziness. + +.sel could be floated outside the \y, to attempt to gain laziness. It certainly mustn't be floated outside the \d because the d has to be in scope too. @@ -289,7 +289,7 @@ only it knows how to build the dictionaries d1 and d2! For example Here, the specialised version of g is an application of g's rhs to the Ord dictionary for (Tree Int), which only the type checker can conjure up. There might not even *be* one, if (Tree Int) is not an instance of -Ord! (All the other specialision has suitable dictionaries to hand +Ord! (All the other specialisation has suitable dictionaries to hand from actual calls.) Problem. The type checker doesn't have to hand a convenient , because @@ -400,7 +400,7 @@ Seems quite reasonable. Similar things could be done with instance decls: Ho hum. Things are complex enough without this. I pass. -Requirements for the simplifer +Requirements for the simplifier ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The simplifier has to be able to take advantage of the specialisation. @@ -852,7 +852,7 @@ specCase env scrut' case_bndr [(con, args, rhs)] | sc_arg' <- sc_args' ] -- Extend the substitution for RHS to map the *original* binders - -- to their floated verions. + -- to their floated versions. mb_sc_flts :: [Maybe DictId] mb_sc_flts = map (lookupVarEnv clone_env) args' clone_env = zipVarEnv sc_args' sc_args_flt @@ -1146,7 +1146,7 @@ specCalls mb_mod env rules_for_me calls_for_me fn rhs = ASSERT( call_ts `lengthIs` n_tyvars && call_ds `lengthIs` n_dicts ) -- Suppose f's defn is f = /\ a b c -> \ d1 d2 -> rhs - -- Supppose the call is for f [Just t1, Nothing, Just t3] [dx1, dx2] + -- Supposed the call is for f [Just t1, Nothing, Just t3] [dx1, dx2] -- Construct the new binding -- f1 = SUBST[a->t1,c->t3, d1->d1', d2->d2'] (/\ b -> rhs) @@ -1195,7 +1195,7 @@ specCalls mb_mod env rules_for_me calls_for_me fn rhs herald = case mb_mod of Nothing -- Specialising local fn -> ptext (sLit "SPEC") - Just this_mod -- Specialising imoprted fn + Just this_mod -- Specialising imported fn -> ptext (sLit "SPEC/") <> ppr this_mod rule_name = mkFastString $ showSDocForUser dflags neverQualify $ @@ -1407,7 +1407,7 @@ So we use the following heuristic: * Then go through the block a second time, feeding call-info from the RHSs back in the bottom, as it were -In effect, the ordering maxmimises the effectiveness of each sweep, +In effect, the ordering maximises the effectiveness of each sweep, and we do just two sweeps. This should catch almost every case of monomorphic recursion -- the exception could be a very knotted-up recursion with multiple cycles tied up together. @@ -1483,7 +1483,7 @@ This doesn't always work. One example I came across was this: oneof = choose (1::Int) -It's a silly exapmle, but we get +It's a silly example, but we get choose = /\a. g `cast` co where choose doesn't have any dict arguments. Thus far I have not tried to fix this (wait till there's a real example). @@ -1562,7 +1562,7 @@ have the big, un-optimised of f (albeit specialised) captured in an INLINABLE pragma for f_spec, we won't get that optimisation. So we simply drop INLINABLE pragmas when specialising. It's not really -a complete solution; ignoring specalisation for now, INLINABLE functions +a complete solution; ignoring specialisation for now, INLINABLE functions don't get properly strictness analysed, for example. But it works well for examples involving specialisation, which is the dominant use of INLINABLE. See Trac #4874. @@ -1739,7 +1739,7 @@ Note [Type determines value] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Only specialise if all overloading is on non-IP *class* params, because these are the ones whose *type* determines their *value*. In -parrticular, with implicit params, the type args *don't* say what the +particular, with implicit params, the type args *don't* say what the value of the implicit param is! See Trac #7101 However, consider @@ -2106,12 +2106,12 @@ is used: Now give it to the simplifier and the _Lifting will be optimised away. -The benfit is that we have given the specialised "unboxed" values a +The benefit is that we have given the specialised "unboxed" values a very simplep lifted semantics and then leave it up to the simplifier to optimise it --- knowing that the overheads will be removed in nearly all cases. -In particular, the value will only be evaluted in the branches of the +In particular, the value will only be evaluated in the branches of the program which use it, rather than being forced at the point where the value is bound. For example: diff --git a/compiler/Eta/StgSyn/CoreToStg.hs b/compiler/Eta/StgSyn/CoreToStg.hs index 66a02113..3a7136a1 100644 --- a/compiler/Eta/StgSyn/CoreToStg.hs +++ b/compiler/Eta/StgSyn/CoreToStg.hs @@ -792,7 +792,7 @@ coreToStgLet let_no_escape bind body = do is_join_var :: Id -> Bool --- A hack (used only for compiler debuggging) to tell if +-- A hack (used only for compiler debugging) to tell if -- a variable started life as a join point ($j) is_join_var j = occNameString (getOccName j) == "$j" @@ -855,7 +855,7 @@ mkStgRhs' con_updateable rhs_fvs srt bndr binder_info rhs -- and lots of PAP_enters. -- -- - in the case where the thunk is top-level, we save building --- a black hole and futhermore the thunk isn't considered to +-- a black hole and furthermore the thunk isn't considered to -- be a CAF any more, so it doesn't appear in any SRTs. -- -- We do it here, because the arity information is accurate, and we need @@ -936,7 +936,7 @@ topLevelBound _ = False -- For a let(rec)-bound variable, x, we record LiveInfo, the set of -- variables that are live if x is live. This LiveInfo comprises -- (a) dynamic live variables (ones with a non-top-level binding) --- (b) static live variabes (CAFs or things that refer to CAFs) +-- (b) static live variables (CAFs or things that refer to CAFs) -- -- For "normal" variables (a) is just x alone. If x is a let-no-escaped -- variable then x is represented by a code pointer and a stack pointer diff --git a/compiler/Eta/StgSyn/StgLint.hs b/compiler/Eta/StgSyn/StgLint.hs index 693a1fd9..deb0c39c 100644 --- a/compiler/Eta/StgSyn/StgLint.hs +++ b/compiler/Eta/StgSyn/StgLint.hs @@ -210,7 +210,7 @@ lintStgExpr (StgCase scrut _ _ bndr _ alts_type alts) = runMaybeT $ do lintStgAlts :: [StgAlt] -> Type -- Type of scrutinee - -> LintM (Maybe Type) -- Just ty => type is accurage + -> LintM (Maybe Type) -- Just ty => type is accurate lintStgAlts alts scrut_ty = do maybe_result_tys <- mapM (lintAlt scrut_ty) alts diff --git a/compiler/Eta/StgSyn/StgSyn.hs b/compiler/Eta/StgSyn/StgSyn.hs index 9d39b17a..d8ec2739 100644 --- a/compiler/Eta/StgSyn/StgSyn.hs +++ b/compiler/Eta/StgSyn/StgSyn.hs @@ -126,7 +126,7 @@ isDllConApp _dflags _this_mod _con _args = False -- | Type of an @StgArg@ -- --- Very half baked becase we have lost the type arguments. +-- Very half baked because we have lost the type arguments. stgArgType :: StgArg -> Type stgArgType (StgVarArg v) = idType v stgArgType (StgLitArg lit) = literalType lit @@ -485,7 +485,7 @@ Very like in @CoreSyntax@ (except no type-world stuff). The type constructor is guaranteed not to be abstract; that is, we can see its representation. This is important because the code generator uses it to determine return conventions etc. But it's not trivial -where there's a moduule loop involved, because some versions of a type +where there's a module loop involved, because some versions of a type constructor might not have all the constructors visible. So mkStgAlgAlts (in CoreToStg) ensures that it gets the TyCon from the constructors or literals (which are guaranteed to have the Real McCoy) diff --git a/compiler/Eta/StrAnal/DmdAnal.hs b/compiler/Eta/StrAnal/DmdAnal.hs index 6f8b14b3..578723a0 100644 --- a/compiler/Eta/StrAnal/DmdAnal.hs +++ b/compiler/Eta/StrAnal/DmdAnal.hs @@ -145,7 +145,7 @@ dmdAnalStar env dmd e , (dmd_ty, e') <- dmdAnal env cd e = (postProcessDmdType defer_and_use dmd_ty, e') --- Main Demand Analsysis machinery +-- Main Demand Analysis machinery dmdAnal, dmdAnal' :: AnalEnv -> CleanDemand -- The main one takes a *CleanDemand* -> CoreExpr -> (DmdType, CoreExpr) @@ -514,7 +514,7 @@ dmdFix :: TopLevelFlag -> AnalEnv -- Does not include bindings for this binding -> CleanDemand -> [(Id,CoreExpr)] - -> (AnalEnv, DmdEnv, [(Id,CoreExpr)]) -- Binders annotated with stricness info + -> (AnalEnv, DmdEnv, [(Id,CoreExpr)]) -- Binders annotated with strictness info dmdFix top_lvl env let_dmd orig_pairs = loop 1 initial_pairs @@ -1001,7 +1001,7 @@ by dmdAnalTopBind. Note [NOINLINE and strictness] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The strictness analyser used to have a HACK which ensured that NOINLNE +The strictness analyser used to have a HACK which ensured that NOINLINE things were not strictness-analysed. The reason was unsafePerformIO. Left to itself, the strictness analyser would discover this strictness for unsafePerformIO: @@ -1438,7 +1438,7 @@ point: all of these functions can have the CPR property. ------- f3 ----------- -- h is strict in x, so x will be unboxed before it - -- is rerturned in the otherwise case. + -- is returned in the otherwise case. data T3 = MkT3 Int Int diff --git a/compiler/Eta/StrAnal/WorkWrap.hs b/compiler/Eta/StrAnal/WorkWrap.hs index 838c4898..4d94e9f4 100644 --- a/compiler/Eta/StrAnal/WorkWrap.hs +++ b/compiler/Eta/StrAnal/WorkWrap.hs @@ -200,7 +200,7 @@ the wrapper (or later). That is necessary to allow the wrapper to inline into the worker's unfolding: see SimplUtils Note [Simplifying inside stable unfoldings]. -Notihng is lost by giving the worker the same activation as the +Nothing is lost by giving the worker the same activation as the worker, because the worker won't have any chance of inlining until the wrapper does; there's no point in giving it an earlier activation. diff --git a/compiler/Eta/StrAnal/WwLib.hs b/compiler/Eta/StrAnal/WwLib.hs index f56b034a..6ee45a55 100644 --- a/compiler/Eta/StrAnal/WwLib.hs +++ b/compiler/Eta/StrAnal/WwLib.hs @@ -169,7 +169,7 @@ Note [Always do CPR w/w] ~~~~~~~~~~~~~~~~~~~~~~~~ At one time we refrained from doing CPR w/w for thunks, on the grounds that we might duplicate work. But that is already handled by the demand analyser, -which doesn't give the CPR proprety if w/w might waste work: see +which doesn't give the CPR property if w/w might waste work: see Note [CPR for thunks] in DmdAnal. And if something *has* been given the CPR property and we don't w/w, it's @@ -243,7 +243,7 @@ Then we drop the unused args to give foo = \pq. $wfoo void# $wfoo = \void(one-shot). y + 3 -But suppse foo didn't have all one-shot args: +But suppose foo didn't have all one-shot args: foo = \p(not-one-shot) q(one-shot). expensive y + 3 Then we drop the unused args to give foo = \pq. $wfoo void# @@ -361,7 +361,7 @@ Note [Freshen type variables] Wen we do a worker/wrapper split, we must not use shadowed names, else we'll get f = /\ a /\a. fw a a -which is obviously wrong. Type variables can can in principle shadow, +which is obviously wrong. Type variables can in principle shadow, within a type (e.g. forall a. a -> forall a. a->a). But type variables *are* mentioned in , so we must substitute. @@ -418,7 +418,7 @@ we end up unpacking massive tuples passed to the bottoming function. Example: main = print (f fst (1, error "no")) Does 'main' print "error 1" or "error no"? We don't really want 'f' -to unbox its second argument. This actually happened in GHC's onwn +to unbox its second argument. This actually happened in GHC's own source code, in Packages.applyPackageFlag, which ended up un-boxing the enormous DynFlags tuple, and being strict in the as-yet-un-filled-in pkgState files. @@ -509,7 +509,7 @@ bug. The fix here is simply to decline to do w/w if that happens. ************************************************************************ * * - Type scrutiny that is specfic to demand analysis + Type scrutiny that is specific to demand analysis * * ************************************************************************ @@ -518,7 +518,7 @@ Note [Do not unpack class dictionaries] If we have f :: Ord a => [a] -> Int -> a {-# INLINABLE f #-} -and we worker/wrapper f, we'll get a worker with an INLINALBE pragma +and we worker/wrapper f, we'll get a worker with an INLINABLE pragma (see Note [Worker-wrapper for INLINABLE functions] in WorkWrap), which can still be specialised by the type-class specialiser, something like fw :: Ord a => [a] -> Int# -> a @@ -528,7 +528,7 @@ BUT if f is strict in the Ord dictionary, we might unpack it, to get and the type-class specialiser can't specialise that. An example is Trac #6056. -Moreover, dictinoaries can have a lot of fields, so unpacking them can +Moreover, dictionaries can have a lot of fields, so unpacking them can increase closure sizes. Conclusion: don't unpack dictionaries. diff --git a/compiler/Eta/TypeCheck/FamInst.hs b/compiler/Eta/TypeCheck/FamInst.hs index e40197ff..d9b54c70 100644 --- a/compiler/Eta/TypeCheck/FamInst.hs +++ b/compiler/Eta/TypeCheck/FamInst.hs @@ -387,7 +387,7 @@ addFamInstsErr herald insts sorted = sortWith getSpan insts fi1 = head sorted srcSpan = coAxBranchSpan (coAxiomSingleBranch (famInstAxiom fi1)) - -- The sortWith just arranges that instances are dislayed in order + -- The sortWith just arranges that instances are displayed in order -- of source location, which reduced wobbling in error messages, -- and is better for users diff --git a/compiler/Eta/TypeCheck/FunDeps.hs b/compiler/Eta/TypeCheck/FunDeps.hs index f4b5e1c7..336e87f7 100644 --- a/compiler/Eta/TypeCheck/FunDeps.hs +++ b/compiler/Eta/TypeCheck/FunDeps.hs @@ -61,7 +61,7 @@ Will generate: fd1 = FDEq { fd_pos = 1, fd_ty_left = alpha, fd_ty_right = Bool } and fd2 = FDEq { fd_pos = 2, fd_ty_left = alpha, fd_ty_right = beta } -We record the paremeter position so that can immediately rewrite a constraint +We record the parameter position so that can immediately rewrite a constraint using the produced FDEqs and remove it from our worklist. @@ -221,7 +221,7 @@ improveFromInstEnv inst_env pred -- because there often are none! , let trimmed_tcs = trimRoughMatchTcs cls_tvs fd rough_tcs -- Trim the rough_tcs based on the head of the fundep. - -- Remember that instanceCantMatch treats both argumnents + -- Remember that instanceCantMatch treats both arguments -- symmetrically, so it's ok to trim the rough_tcs, -- rather than trimming each inst_tcs in turn , ispec <- instances @@ -324,7 +324,7 @@ checkClsFD fd clas_tvs -- -- But note (a) we get them from the dfun_id, so they are *in order* -- because the kind variables may be mentioned in the - -- type variabes' kinds + -- type variables' kinds -- (b) we must apply 'subst' to the kinds, in case we have -- matched out a kind variable, but not a type variable -- whose kind mentions that kind variable! @@ -457,7 +457,7 @@ Here is a more subtle example, from HList-0.4.0.0 (Trac #10564) Is the instance OK? Does {l,r,xs} determine v? Well: * From the instance constraint HMemberM (Label k l) (LabelsOf xs) b, - plus the fundep "| el l -> r" in class HMameberM, + plus the fundep "| el l -> r" in class HMemberM, we get {l,k,xs} -> b * Note the 'k'!! We must call closeOverKinds on the seed set diff --git a/compiler/Eta/TypeCheck/Inst.hs b/compiler/Eta/TypeCheck/Inst.hs index abd178a1..9a6ac2ce 100644 --- a/compiler/Eta/TypeCheck/Inst.hs +++ b/compiler/Eta/TypeCheck/Inst.hs @@ -574,7 +574,7 @@ addClsInstsErr herald ispecs addErr (hang herald 2 (pprInstances sorted)) where sorted = sortWith getSrcLoc ispecs - -- The sortWith just arranges that instances are dislayed in order + -- The sortWith just arranges that instances are displayed in order -- of source location, which reduced wobbling in error messages, -- and is better for users diff --git a/compiler/Eta/TypeCheck/TcArrows.hs b/compiler/Eta/TypeCheck/TcArrows.hs index 09372019..3c542447 100644 --- a/compiler/Eta/TypeCheck/TcArrows.hs +++ b/compiler/Eta/TypeCheck/TcArrows.hs @@ -38,7 +38,7 @@ import Eta.Utils.Util import Control.Monad {- -Note [Arrow overivew] +Note [Arrow overview] ~~~~~~~~~~~~~~~~~~~~~ Here's a summary of arrows and how they typecheck. First, here's a cut-down syntax: diff --git a/compiler/Eta/TypeCheck/TcBinds.hs b/compiler/Eta/TypeCheck/TcBinds.hs index 39e1951f..c8a67fb7 100644 --- a/compiler/Eta/TypeCheck/TcBinds.hs +++ b/compiler/Eta/TypeCheck/TcBinds.hs @@ -115,7 +115,7 @@ If we don't take care, after typechecking we get in \ys:[a] -> ...f'... -Notice the the stupid construction of (f a d), which is of course +Notice the stupid construction of (f a d), which is of course identical to the function we're executing. In this case, the polymorphic recursion isn't being used (but that's a very common case). This can lead to a massive space leak, from the following top-level defn @@ -137,7 +137,7 @@ up with a chain of identical values all hung onto by the CAF ff. Etc. -NOTE: a bit of arity anaysis would push the (f a d) inside the (\ys...), +NOTE: a bit of arity analysis would push the (f a d) inside the (\ys...), which would make the space leak go away in this case Solution: when typechecking the RHSs we always have in hand the @@ -248,7 +248,7 @@ tcLocalBinds (HsIPBinds (IPBinds ip_binds _)) thing_inside ; return (ip_id, (IPBind (Right ip_id) d)) } tc_ip_bind _ (IPBind (Right {}) _) = panic "tc_ip_bind" - -- Coerces a `t` into a dictionry for `IP "x" t`. + -- Coerces a `t` into a dictionary for `IP "x" t`. -- co : t -> IP "x" t toDict ipClass x ty = HsWrap $ mkWpCast $ TcCoercion $ wrapIP $ mkClassPred ipClass [x,ty] @@ -280,7 +280,7 @@ Consider this (Trac #9161) Here, the type signature for b mentions A. But A is a pattern synonym, which is typechecked (for very good reasons; a view pattern in the RHS may mention a value binding) as part of a group of -bindings. It is entirely resonable to reject this, but to do so +bindings. It is entirely reasonable to reject this, but to do so we need A to be in the kind environment when kind-checking the signature for B. Hence the tcExtendKindEnv2 patsyn_placeholder_kinds, which adds a binding @@ -330,7 +330,7 @@ tcBindGroups :: TopLevelFlag -> TcSigFun -> PragFun -> TcM ([(RecFlag, LHsBinds TcId)], thing) -- Typecheck a whole lot of value bindings, -- one strongly-connected component at a time --- Here a "strongly connected component" has the strightforward +-- Here a "strongly connected component" has the straightforward -- meaning of a group of bindings that mention each other, -- ignoring type signatures (that part comes later) @@ -651,13 +651,13 @@ mkExport prag_fn qtvs inferred_theta (poly_name, mb_sig, mono_id) ; traceTc "mkExport: check sig" (vcat [ ppr poly_name, ppr sel_poly_ty, ppr (idType poly_id) ]) - -- Perform the impedence-matching and ambiguity check + -- Perform the impedance-matching and ambiguity check -- right away. If it fails, we want to fail now (and recover -- in tcPolyBinds). If we delay checking, we get an error cascade. -- Remember we are in the tcPolyInfer case, so the type envt is -- closed (unless we are doing NoMonoLocalBinds in which case all bets -- are off) - -- See Note [Impedence matching] + -- See Note [Impedance matching] ; (wrap, wanted) <- addErrCtxtM (mk_bind_msg inferred True poly_name (idType poly_id)) $ captureConstraints $ tcSubType_NC sig_ctxt sel_poly_ty (idType poly_id) @@ -807,13 +807,13 @@ Examples that might fail: - an inferred type that includes unboxed tuples However we don't do the ambiguity check (checkValidType omits it for -InfSigCtxt) because the impedence-matching stage, which follows +InfSigCtxt) because the impedance-matching stage, which follows immediately, will do it and we don't want two error messages. -Moreover, because of the impedence matching stage, the ambiguity-check -suggestion of -XAllowAmbiguiousTypes will not work. +Moreover, because of the impedance matching stage, the ambiguity-check +suggestion of -XAllowAmbiguousTypes will not work. -Note [Impedence matching] +Note [Impedance matching] ~~~~~~~~~~~~~~~~~~~~~~~~~ Consider f 0 x = x @@ -833,7 +833,7 @@ The types we really want for f and g are f :: forall a. (Eq a, Num a) => a -> Bool -> Bool g :: forall b. [b] -> Bool -> Bool -We can get these by "impedence matching": +We can get these by "impedance matching": tuple :: forall a b. (Eq a, Num a) => (a -> Bool -> Bool, [b] -> Bool -> Bool) tuple a b d1 d1 = let ...bind f_mono, g_mono in (f_mono, g_mono) @@ -843,9 +843,9 @@ We can get these by "impedence matching": Suppose the shared quantified tyvars are qtvs and constraints theta. Then we want to check that f's polytype is more polymorphic than forall qtvs. theta => f_mono_ty -and the proof is the impedence matcher. +and the proof is the impedance matcher. -Notice that the impedence matcher may do defaulting. See Trac #7173. +Notice that the impedance matcher may do defaulting. See Trac #7173. It also cleverly does an ambiguity check; for example, rejecting f :: F a -> a @@ -886,7 +886,7 @@ lhsBindArity _ env = env -- PatBind/VarBind ------------------ tcSpecPrags :: Id -> [LSig Name] -> TcM [LTcSpecPrag] --- Add INLINE and SPECIALSE pragmas +-- Add INLINE and SPECIALISE pragmas -- INLINE prags are added to the (polymorphic) Id directly -- SPECIALISE prags are passed to the desugarer via TcSpecPrags -- Pre-condition: the poly_id is zonked @@ -947,7 +947,7 @@ tcImpPrags prags -- Ignore SPECIALISE pragmas for imported things -- when we aren't specialising, or when we aren't generating -- code. The latter happens when Haddocking the base library; - -- we don't wnat complaints about lack of INLINABLE pragmas + -- we don't want complaints about lack of INLINABLE pragmas not_specialising dflags | not (gopt Opt_Specialise dflags) = True | otherwise = case hscTarget dflags of @@ -1368,7 +1368,7 @@ it's all cool; each signature has distinct type variables from the renamer.) Note [Fail eagerly on bad signatures] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If a type signaure is wrong, fail immediately: +If a type signature is wrong, fail immediately: * the type sigs may bind type variables, so proceeding without them can lead to a cascade of errors @@ -1472,7 +1472,7 @@ data GeneralisationPlan -- Explicit generalisation; there is an AbsBinds -- A consequence of the no-AbsBinds choice (NoGen) is that there is --- no "polymorphic Id" and "monmomorphic Id"; there is just the one +-- no "polymorphic Id" and "monomorphic Id"; there is just the one instance Outputable GeneralisationPlan where ppr NoGen = ptext (sLit "NoGen") @@ -1541,7 +1541,7 @@ decideGeneralisationPlan dflags type_env bndr_names lbinds sig_fn | otherwise = Nothing - -- The Haskell 98 monomorphism resetriction + -- The Haskell 98 monomorphism restriction restricted (PatBind {}) = True restricted (VarBind { var_id = v }) = no_sig v restricted (FunBind { fun_id = v, fun_matches = m }) = restricted_match m diff --git a/compiler/Eta/TypeCheck/TcCanonical.hs b/compiler/Eta/TypeCheck/TcCanonical.hs index bca8347d..c48c4f6d 100644 --- a/compiler/Eta/TypeCheck/TcCanonical.hs +++ b/compiler/Eta/TypeCheck/TcCanonical.hs @@ -63,7 +63,7 @@ The execution plan for canonicalization is the following: 2) If, when we decompose, we discover a variable on the head then we look at inert_eqs from the current inert for a substitution for this - variable and contine decomposing. Hence we lazily apply the inert + variable and continue decomposing. Hence we lazily apply the inert substitution if it is needed. 3) If no more decomposition is possible, we deeply apply the substitution @@ -209,7 +209,7 @@ canClass, canClassNC -- The canClassNC version is used on non-canonical constraints -- and adds superclasses. The plain canClass version is used -- for already-canonical class constraints (but which might have --- been subsituted or somthing), and hence do not need superclasses +-- been substituted or something), and hence do not need superclasses canClassNC ev cls tys = canClass ev cls tys @@ -255,7 +255,7 @@ For Wanteds: Generally speaking we want to be able to add superclasses of wanteds for two reasons: - (1) Oportunities for improvement. Example: + (1) Opportunities for improvement. Example: class (a ~ b) => C a b Wanted constraint is: C alpha beta We'd like to simply have C alpha alpha. Similar @@ -486,7 +486,7 @@ can_eq_nc' _rdr_env _envs ev eq_rel s1@(ForAllTy {}) _ s2@(ForAllTy {}) _ ; setEvBind orig_ev ev_term ; stopWith ev "Deferred polytype equality" } } | otherwise - = do { traceTcS "Ommitting decomposition of given polytype equality" $ + = do { traceTcS "Omitting decomposition of given polytype equality" $ pprEq s1 s2 -- See Note [Do not decompose given polytype equalities] ; stopWith ev "Discard given polytype equality" } @@ -842,7 +842,7 @@ If we see (T s1 t1 ~ T s2 t2), then we can just decompose to (s1 ~ s2, t1 ~ t2) and push those back into the work list. But if s1 = K k1 s2 = K k2 -then we will jus decomopose s1~s2, and it might be better to +then we will jus decompose s1~s2, and it might be better to do so on the spot. An important special case is where s1=s2, and we get just Refl. @@ -1235,12 +1235,12 @@ canEqTyVarTyVar, are these gets eliminated (improves error messages) * If one is a flatten-skolem, put it on the left so that it is - substituted out Note [Elminate flat-skols] + substituted out Note [Eliminate flat-skols] fsk ~ a Note [Avoid unnecessary swaps] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -If we swap without actually improving matters, we can get an infnite loop. +If we swap without actually improving matters, we can get an infinite loop. Consider work item: a ~ b inert item: b ~ c @@ -1583,7 +1583,7 @@ rewriteEqEvidence :: CtEvidence -- Old evidence :: olhs ~ orhs (not swap -- If swapped -- w : orhs ~ olhs = sym rhs_co ; sym w1 ; lhs_co -- --- It's all a form of rewwriteEvidence, specialised for equalities +-- It's all a form of rewriteEvidence, specialised for equalities rewriteEqEvidence old_ev eq_rel swapped nlhs nrhs lhs_co rhs_co | CtDerived {} <- old_ev = do { mb <- newDerived loc' new_pred diff --git a/compiler/Eta/TypeCheck/TcClassDcl.hs b/compiler/Eta/TypeCheck/TcClassDcl.hs index c4a602e7..e0c729b3 100644 --- a/compiler/Eta/TypeCheck/TcClassDcl.hs +++ b/compiler/Eta/TypeCheck/TcClassDcl.hs @@ -73,7 +73,7 @@ generates newtype CDict a = CDict (forall b. a -> b -> b) -Now DictTy in Type is just a form of type synomym: +Now DictTy in Type is just a form of type synonym: DictTy c t = TyConTy CDict `AppTy` t Death to "ExpandingDicts". diff --git a/compiler/Eta/TypeCheck/TcDeriv.hs b/compiler/Eta/TypeCheck/TcDeriv.hs index 4b3075de..1c2f8b35 100644 --- a/compiler/Eta/TypeCheck/TcDeriv.hs +++ b/compiler/Eta/TypeCheck/TcDeriv.hs @@ -354,7 +354,7 @@ tcDeriving tycl_decls inst_decls deriv_decls ; early_specs <- makeDerivSpecs is_boot tycl_decls inst_decls deriv_decls ; traceTc "tcDeriving 1" (ppr early_specs) - -- for each type, determine the auxliary declarations that are common + -- for each type, determine the auxiliary declarations that are common -- to multiple derivations involving that type (e.g. Generic and -- Generic1 should use the same TcGenGenerics.MetaTyCons) ; (commonAuxs, auxDerivStuff) <- commonAuxiliaries $ map forgetTheta early_specs @@ -558,7 +558,7 @@ Consider this (see Trac #1954): newtype P a = MkP (IO a) deriving Monad If you compile with -fwarn-unused-binds you do not expect the warning -"Defined but not used: data consructor MkP". Yet the newtype deriving +"Defined but not used: data constructor MkP". Yet the newtype deriving code does not explicitly mention MkP, but it should behave as if you had written instance Monad P where @@ -931,7 +931,7 @@ When there are no type families, it's quite easy: instance Eq [a] => Eq (S a) -- by coercion sym (Eq (:CoS a)) : Eq [a] ~ Eq (S a) instance Monad [] => Monad S -- by coercion sym (Monad :CoS) : Monad [] ~ Monad S -When type familes are involved it's trickier: +When type families are involved it's trickier: data family T a b newtype instance T Int a = MkT [a] deriving( Eq, Monad ) @@ -1129,7 +1129,7 @@ The DeriveAnyClass extension adds a third way to derive instances, based on empty instance declarations. The canonical use case is in combination with GHC.Generics and default method -signatures. These allow us have have instance declarations be empty, but still +signatures. These allow us to have instance declarations be empty, but still useful, e.g. data T a = ...blah..blah... deriving( Generic ) @@ -1602,7 +1602,7 @@ mkNewTypeEqn dflags overlap_mode tvs substTheta (zipOpenTvSubst cls_tyvars inst_tys) (classSCTheta cls) - -- Next we collect Coercible constaints between + -- Next we collect Coercible constraints between -- the Class method types, instantiated with the representation and the -- newtype type; precisely the constraints required for the -- calls to coercible that we are going to generate. @@ -1658,7 +1658,7 @@ e.g. newtype S1 = S1 [T1 ()] newtype T1 a = T1 (StateT S1 IO a ) deriving( Monad ) Remember, too, that type families are currently (conservatively) given a recursive flag, so this also allows newtype deriving to work -for type famillies. +for type families. We used to exclude recursive types, because we had a rather simple minded way of generating the instance decl: @@ -1697,7 +1697,7 @@ variable, tv. \item The (k,TyVarTy tv) pairs in a solution are canonically -ordered by sorting on type varible, tv, (major key) and then class, k, +ordered by sorting on type variable, tv, (major key) and then class, k, (minor key) \end{itemize} -} @@ -2074,7 +2074,7 @@ a)) will be solved by the explicit Eq (N a) instance. We do *not* create the superclasses by casting the superclass dictionaries for the representation type. -See the paper "Safe zero-cost coercions for Hsakell". +See the paper "Safe zero-cost coercions for Haskell". ************************************************************************ diff --git a/compiler/Eta/TypeCheck/TcEnv.hs b/compiler/Eta/TypeCheck/TcEnv.hs index 4cdd94cf..210fe290 100644 --- a/compiler/Eta/TypeCheck/TcEnv.hs +++ b/compiler/Eta/TypeCheck/TcEnv.hs @@ -418,7 +418,7 @@ getScopedTyVarBinds {- Note [Initialising the type environment for GHCi] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -tcExtendGhciIdEnv extends the local type environemnt with GHCi +tcExtendGhciIdEnv extends the local type environment with GHCi identifiers (from ic_tythings), bound earlier in the interaction. They may have free type variables (RuntimeUnk things), and if we don't register these free TyVars as global TyVars then the typechecker will @@ -433,7 +433,7 @@ Note especially that That's important because some are not closed (ie have free tyvars) and the compiler assumes that the global type env (tcg_type_env) has no free tyvars. Actually, only ones with Internal names can be non-closed - so we jsut add those + so we just add those * The tct_closed flag depends on whether the thing has free (RuntimeUnk) type variables @@ -471,7 +471,7 @@ tcExtendGhciIdEnv ids thing_inside | otherwise = NotTopLevel tcExtendLetEnv :: TopLevelFlag -> TopLevelFlag -> [TcId] -> TcM a -> TcM a --- Used for both top-level value bindings and and nested let/where-bindings +-- Used for both top-level value bindings and nested let/where-bindings tcExtendLetEnv top_lvl closed ids thing_inside = do { stage <- getStage ; tc_extend_local_env (top_lvl, thLevel stage) @@ -657,7 +657,7 @@ topIdLvl :: Id -> ThLevel -- E.g. this is bad: -- x = [| foo |] -- $( f x ) --- By the time we are prcessing the $(f x), the binding for "x" +-- By the time we are processing the $(f x), the binding for "x" -- will be in the global env, not the local one. topIdLvl id | isLocalId id = outerLevel | otherwise = impLevel @@ -714,7 +714,7 @@ tcGetDefaultTys {- Note [Default unitTy] ~~~~~~~~~~~~~~~~~~~~~ -In interative mode (or with -XExtendedDefaultRules) we add () as the first type we +In interactive mode (or with -XExtendedDefaultRules) we add () as the first type we try when defaulting. This has very little real impact, except in the following case. Consider: Text.Printf.printf "hello" @@ -797,7 +797,7 @@ simpleInstInfoTyCon inst = tcTyConAppTyCon (simpleInstInfoTy inst) {- Make a name for the dict fun for an instance decl. It's an *external* -name, like otber top-level names, and hence must be made with newGlobalBinder. +name, like other top-level names, and hence must be made with newGlobalBinder. -} newDFunName :: Class -> [Type] -> SrcSpan -> TcM Name @@ -809,7 +809,7 @@ newDFunName clas tys loc {- It may be advantageous at some point to build a string for a type which actually -encompasses the structure of the type like parethesizing. +encompasses the structure of the type like parenthesizing. Even in the new mkInfoString something like `a (b c)` and `(a b) c` will result in the same string "a_b_c". -} diff --git a/compiler/Eta/TypeCheck/TcErrors.hs b/compiler/Eta/TypeCheck/TcErrors.hs index f2fc5f46..fc564e5b 100644 --- a/compiler/Eta/TypeCheck/TcErrors.hs +++ b/compiler/Eta/TypeCheck/TcErrors.hs @@ -162,7 +162,7 @@ data ReportErrCtxt -- ic_skols and givens are tidied, rest are not , cec_tidy :: TidyEnv , cec_binds :: Maybe EvBindsVar - -- Nothinng <=> Report all errors, including holes; no bindings + -- Nothing <=> Report all errors, including holes; no bindings -- Just ev <=> make some errors (depending on cec_defer) -- into warnings, and emit evidence bindings -- into 'ev' for unsolved constraints @@ -249,7 +249,7 @@ reportWanteds ctxt wanted@(WC { wc_simple = simples, wc_insol = insols, wc_impl ctxt2 = ctxt { cec_suppress = suppress2 } reportSimples :: ReportErrCtxt -> Cts -> TcM () -reportSimples ctxt simples -- Here 'simples' includes insolble goals +reportSimples ctxt simples -- Here 'simples' includes insoluble goals = traceTc "reportSimples" (vcat [ ptext (sLit "Simples =") <+> ppr simples , ptext (sLit "Suppress =") <+> ppr (cec_suppress ctxt)]) >> tryReporters @@ -1072,7 +1072,7 @@ mkExpectedActualMsg ty1 ty2 (TypeEqOrigin { uo_actual = act, uo_expected = exp } msg = vcat [ text "Expected type:" <+> ppr exp , text " Actual type:" <+> ppr act ] -mkExpectedActualMsg _ _ _ = panic "mkExprectedAcutalMsg" +mkExpectedActualMsg _ _ _ = panic "mkExprectedActualMsg" sameOccExtra :: TcType -> TcType -> SDoc -- See Note [Disambiguating (X ~ X) errors] @@ -1136,7 +1136,7 @@ But nowadays when inferring the type of a function with no type signature, even if there are errors inside, we still generalise its signature and carry on. For example f x = x:x -Here we will infer somethiing like +Here we will infer something like f :: forall a. a -> [a] with a suspended error of (a ~ [a]). So 'a' is now a skolem, but not one bound by the programmer! Here we really should report an occurs check. @@ -1459,7 +1459,7 @@ mkAmbigMsg ct | not (null ambig_tvs) = pp_ambig (ptext (sLit "type")) ambig_tvs - | otherwise -- All ambiguous kind variabes; suggest -fprint-explicit-kinds + | otherwise -- All ambiguous kind variables; suggest -fprint-explicit-kinds = vcat [ pp_ambig (ptext (sLit "kind")) ambig_kvs , sdocWithDynFlags suggest_explicit_kinds ] @@ -1571,7 +1571,7 @@ relevantBindings want_filtering ctxt ct else if run_out n_left && id_tvs `subVarSet` tvs_seen -- We've run out of n_left fuel and this binding only - -- mentions aleady-seen type variables, so discard it + -- mentions already-seen type variables, so discard it then go tidy_env n_left tvs_seen docs True tc_bndrs -- Keep this binding, decrement fuel diff --git a/compiler/Eta/TypeCheck/TcEvidence.hs b/compiler/Eta/TypeCheck/TcEvidence.hs index 33c4c4d6..454b54bf 100644 --- a/compiler/Eta/TypeCheck/TcEvidence.hs +++ b/compiler/Eta/TypeCheck/TcEvidence.hs @@ -743,7 +743,7 @@ data EvTerm -- | Instructions on how to make a 'Typeable' dictionary. data EvTypeable = EvTypeableTyCon TyCon [Kind] - -- ^ Dicitionary for concrete type constructors. + -- ^ Dictionary for concrete type constructors. | EvTypeableTyApp (EvTerm,Type) (EvTerm,Type) -- ^ Dictionary for type applications; this is used when we have @@ -830,7 +830,7 @@ Conceptually, this class has infinitely many instances: ... In practice, we solve `KnownNat` predicates in the type-checker -(see typecheck/TcInteract.hs) because we can't have infinately many instances. +(see typecheck/TcInteract.hs) because we can't have infinitely many instances. The evidence (aka "dictionary") for `KnownNat` is of the form `EvLit (EvNum n)`. We make the following assumptions about dictionaries in GHC: @@ -851,7 +851,7 @@ a more convenient function, defined in terms of `natSing`: The reason we don't use this directly in the class is that it is simpler and more efficient to pass around an integer rather than an entier function, -especialy when the `KnowNat` evidence is packaged up in an existential. +especially when the `KnowNat` evidence is packaged up in an existential. The story for kind `Symbol` is analogous: * class KnownSymbol @@ -937,7 +937,7 @@ Important Details: [G] d :: IP "stk" CallStack in scope. In the interaction phase, GHC would normally solve the use of ?stk - directly from the given, i.e. re-using the dicionary. But this is NOT what we + directly from the given, i.e. re-using the dictionary. But this is NOT what we want! We want to generate a *new* CallStack with ?loc's SrcLoc pushed onto the given CallStack. So we must take care in TcInteract.interactDict to prioritize solving wanted CallStacks. diff --git a/compiler/Eta/TypeCheck/TcExpr.hs b/compiler/Eta/TypeCheck/TcExpr.hs index 0c20d7a0..623850f4 100644 --- a/compiler/Eta/TypeCheck/TcExpr.hs +++ b/compiler/Eta/TypeCheck/TcExpr.hs @@ -304,7 +304,7 @@ People write so much, where runST :: (forall s. ST s a) -> a that I have finally given in and written a special type-checking -rule just for saturated appliations of ($). +rule just for saturated applications of ($). * Infer the type of the first argument * Decompose it; should be of form (arg2_ty -> res_ty), where arg2_ty might be a polytype diff --git a/compiler/Eta/TypeCheck/TcFlatten.hs b/compiler/Eta/TypeCheck/TcFlatten.hs index 3c286f45..629965df 100644 --- a/compiler/Eta/TypeCheck/TcFlatten.hs +++ b/compiler/Eta/TypeCheck/TcFlatten.hs @@ -291,8 +291,8 @@ Is this type ambiguous: (Foo e ~ Maybe e) => Foo e [W] fmv2 ~ Maybe e [W] fmv2 ~ Maybe ee -Now maybe we shuld get [D] e ~ ee, and then we'd solve it entirely. -But if in a smilar situation we got [D] Int ~ Bool we'd be back +Now maybe we should get [D] e ~ ee, and then we'd solve it entirely. +But if in a similar situation we got [D] Int ~ Bool we'd be back to complaining about wanted/wanted interactions. Maybe this arises also for fundeps? @@ -328,7 +328,7 @@ Now we don’t see that fmv ~ fmv’, which is a problem for injectivity detecti Conclusion: rewrite wanteds with wanted for all untouchables. -skol ~ untch, must re-orieint to untch ~ skol, so that we can use it to rewrite. +skol ~ untch, must re-orient to untch ~ skol, so that we can use it to rewrite. @@ -411,7 +411,7 @@ flatten ---------------------------- -indexed-types/should_failt/T4179 +indexed-types/should_fail/T4179 after solving [W] fmv_1 ~ fmv_2 @@ -475,7 +475,7 @@ Assuming NOT rewriting wanteds with wanteds [G] V a ~ f_aBg Worklist includes [W] Scalar fmv_aBi ~ fmv_aBk - fmv_aBi, fmv_aBk are flatten unificaiton variables + fmv_aBi, fmv_aBk are flatten unification variables Work item: [W] V fsk_aBh ~ fmv_aBi @@ -631,7 +631,7 @@ other examples where lazy flattening caused problems. Bottom line: FM_Avoid is unused for now (Nov 14). Note: T5321Fun got faster when I disabled FM_Avoid - T5837 did too, but it's pathalogical anyway + T5837 did too, but it's pathological anyway Note [Phantoms in the flattener] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -944,7 +944,7 @@ flatten_exact_fam_app_fully fmode tc tys , cc_fsk = fsk } ; emitFlatWork ct - -- Now that flattening has finished, attempty to unify the + -- Now that flattening has finished, attempt to unify the -- type variables of a generic JWT when reducing Extends'. -- That way, the next time they attempt to solve it, it will -- succeed. @@ -1093,7 +1093,7 @@ guarantee that this recursive use will terminate. a not in s, OR the path from the top of s to a includes at least one non-newtype - then the extended substition T = S+(a -fw-> t) + then the extended substitution T = S+(a -fw-> t) is an inert generalised substitution. The idea is that @@ -1124,19 +1124,19 @@ The idea is that have (a -fs-> a) in S, which contradicts (WF2). * The extended substitution satisfies (WF1) and (WF2) - - (K1) plus (L1) guarantee that the extended substiution satisfies (WF1). + - (K1) plus (L1) guarantee that the extended substitution satisfies (WF1). - (T3) guarantees (WF2). * (K2) is about inertness. Intuitively, any infinite chain T^0(f,t), - T^1(f,t), T^2(f,T).... must pass through the new work item infnitely - often, since the substution without the work item is inert; and must - pass through at least one of the triples in S infnitely often. + T^1(f,t), T^2(f,T).... must pass through the new work item infinitely + often, since the substitution without the work item is inert; and must + pass through at least one of the triples in S infinitely often. - (K2a): if not(fs>=fs) then there is no f that fs can rewrite (fs>=f), and hence this triple never plays a role in application S(f,a). It is always safe to extend S with such a triple. - (NB: we could strengten K1) in this way too, but see K3. + (NB: we could strengthen K1) in this way too, but see K3. - (K2b): If this holds, we can't pass through this triple infinitely often, because if we did then fs>=f, fw>=f, hence fs>=fw, @@ -1226,7 +1226,7 @@ roles. For example, if we have and we wish to compute S(W/R, T a b), the correct answer is T a Bool, NOT T Int Bool. The reason is that T's first parameter has a nominal role, and thus rewriting a to Int in T a b is wrong. Indeed, this non-congruence of -subsitution means that the proof in Note [The inert equalities] may need +substitution means that the proof in Note [The inert equalities] may need to be revisited, but we don't think that the end conclusion is wrong. -} @@ -1265,7 +1265,7 @@ flattenTyVarOuter :: FlattenEnv -> TcTyVar flattenTyVarOuter fmode tv | not (isTcTyVar tv) -- Happens when flatten under a (forall a. ty) = Left `liftM` flattenTyVarFinal fmode tv - -- So ty contains refernces to the non-TcTyVar a + -- So ty contains references to the non-TcTyVar a | otherwise = do { mb_ty <- isFilledMetaTyVar_maybe tv @@ -1312,7 +1312,7 @@ flattenTyVarFinal fmode tv Note [An alternative story for the inert substitution] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ (This entire note is just background, left here in case we ever want - to return the the previousl state of affairs) + to return the previously state of affairs) We used (GHC 7.8) to have this story for the inert substitution inert_eqs @@ -1329,7 +1329,7 @@ It is easy to implement, in TcInteract.kick_out, by only kicking out an inert only if (a) the work item can rewrite the inert AND (b) the inert cannot rewrite the work item -This is signifcantly harder to think about. It can save a LOT of work +This is significantly harder to think about. It can save a LOT of work in occurs-check cases, but we don't care about them much. Trac #5837 is an example; all the constraints here are Givens @@ -1374,7 +1374,7 @@ is an example; all the constraints here are Givens inert fsk ~ ((fsk3, TF Int), TF Int) Because the incoming given rewrites all the inert givens, we get more and -more duplication in the inert set. But this really only happens in pathalogical +more duplication in the inert set. But this really only happens in pathological casee, so we don't care. -} diff --git a/compiler/Eta/TypeCheck/TcForeign.hs b/compiler/Eta/TypeCheck/TcForeign.hs index 2e5b7223..82f4588d 100644 --- a/compiler/Eta/TypeCheck/TcForeign.hs +++ b/compiler/Eta/TypeCheck/TcForeign.hs @@ -247,7 +247,7 @@ checkForeignRes nonIOResultOk checkSafe predResType ty -- handle safe language typecheck fail _ | checkSafe && safeLanguageOn dflags -> addErrTc $ illegalForeignTyErr result safeHsErr - -- sucess! non-IO return is fine + -- success! non-IO return is fine _ -> return () where safeHsErr = str $ "Safe Haskell is on, all FFI imports must be in the" ++ " IO monad" @@ -328,7 +328,7 @@ checkJavaTarget (StaticTarget importFS _ _) _ -> exactlyOneArgument "@static @field" partsRest2 staticFieldExample else (False, vcat [ str "@static @" <> str secondKeyword - <+> str "is not a valid annotiation." + <+> str "is not a valid annotation." , str "Perhaps you meant to write @static @field?"]) | argument:_partsRest2 <- partsRest -> checkDotInStatic "@static" argument partsRest staticMethodExample @@ -344,7 +344,7 @@ checkJavaTarget (StaticTarget importFS _ _) then exactlyOneArgument "@wrapper @abstract" partsRest2 abstractWrapperExample else (False, vcat [ str "@wrapper @" <> str secondKeyword - <+> str "is not a valid annotiation." + <+> str "is not a valid annotation." , str "Perhaps you meant to write @wrapper @abstract?"]) | otherwise -> exactlyOneArgument "@wrapper" partsRest interfaceWrapperExample diff --git a/compiler/Eta/TypeCheck/TcGenDeriv.hs b/compiler/Eta/TypeCheck/TcGenDeriv.hs index 75781982..fae8a6a1 100644 --- a/compiler/Eta/TypeCheck/TcGenDeriv.hs +++ b/compiler/Eta/TypeCheck/TcGenDeriv.hs @@ -417,7 +417,7 @@ gen_Ord_binds loc tycon = nlHsCase (nlHsVar a_RDR) $ map (mkOrdOpAlt op) tycon_data_cons -- i.e. case a of { C1 x y -> case b of C1 x y -> ....compare x,y... - -- C2 x -> case b of C2 x -> ....comopare x.... } + -- C2 x -> case b of C2 x -> ....compare x.... } | null non_nullary_cons -- All nullary, so go straight to comparing tags = mkTagCmp op @@ -537,7 +537,7 @@ unliftedOrdOp tycon ty op a b b_expr = nlHsVar b unliftedCompare :: RdrName -> RdrName - -> LHsExpr RdrName -> LHsExpr RdrName -- What to cmpare + -> LHsExpr RdrName -> LHsExpr RdrName -- What to compare -> LHsExpr RdrName -> LHsExpr RdrName -> LHsExpr RdrName -- Three results -> LHsExpr RdrName -- Return (if a < b then lt else if a == b then eq else gt) @@ -929,7 +929,7 @@ Note that we use expectP (Ident "T1") rather than Ident "T1" <- lexP -The latter desugares to inline code for matching the Ident and the +The latter desugars to inline code for matching the Ident and the string, and this can be very voluminous. The former is much more compact. Cf Trac #7258, although that also concerned non-linearity in the occurrence analyser, a separate issue. @@ -1838,7 +1838,7 @@ gen_Traversable_binds loc tycon , ft_tup = \t gs -> do -- traverse f = \x -> case x of (a1,a2,..) -> gg <- sequence gs -- (,,) <$> g1 a1 <*> g2 a2 <*> .. mkSimpleLam $ mkSimpleTupleCase match_for_con t gg - , ft_ty_app = \_ g -> nlHsApp traverse_Expr <$> g -- traverse f = travese g + , ft_ty_app = \_ g -> nlHsApp traverse_Expr <$> g -- traverse f = traverse g , ft_forall = \_ g -> g , ft_co_var = panic "contravariant" , ft_fun = panic "function" diff --git a/compiler/Eta/TypeCheck/TcGenGenerics.hs b/compiler/Eta/TypeCheck/TcGenGenerics.hs index dd08c013..3a73a1ec 100644 --- a/compiler/Eta/TypeCheck/TcGenGenerics.hs +++ b/compiler/Eta/TypeCheck/TcGenGenerics.hs @@ -637,7 +637,7 @@ tc_mkRepTy gk_ tycon metaDts = Gen0_ -> mkRec0 t Gen1_ argVar -> argPar argVar t where - -- Builds argument represention for Rep1 (more complicated due to + -- Builds argument representation for Rep1 (more complicated due to -- the presence of composition). argPar argVar = argTyFold argVar $ ArgTyAlg {ata_rec0 = mkRec0, ata_par1 = mkPar1, diff --git a/compiler/Eta/TypeCheck/TcHsSyn.hs b/compiler/Eta/TypeCheck/TcHsSyn.hs index 0bedc433..94098a60 100644 --- a/compiler/Eta/TypeCheck/TcHsSyn.hs +++ b/compiler/Eta/TypeCheck/TcHsSyn.hs @@ -1344,7 +1344,7 @@ zonkTyVarOcc env@(ZonkEnv zonk_unbound_tyvar tv_env _) tv zonkTcTypeToType env (tyVarKind tv) ; zonk_unbound_tyvar (setTyVarKind tv kind) } Indirect ty -> do { zty <- zonkTcTypeToType env ty - -- Small optimisation: shortern-out indirect steps + -- Small optimisation: shorten-out indirect steps -- so that the old type may be more easily collected. ; writeMutVar ref (Indirect zty) ; return zty } } diff --git a/compiler/Eta/TypeCheck/TcHsType.hs b/compiler/Eta/TypeCheck/TcHsType.hs index c6c0755e..ebd77ec2 100644 --- a/compiler/Eta/TypeCheck/TcHsType.hs +++ b/compiler/Eta/TypeCheck/TcHsType.hs @@ -93,7 +93,7 @@ But in mutually recursive groups of type and class decls we do For example, when we find (forall a m. m a -> m a) -we bind a,m to kind varibles and kind-check (m a -> m a). This makes +we bind a,m to kind variables and kind-check (m a -> m a). This makes a get kind *, and m get kind *->*. Now we typecheck (m a -> m a) in an environment that binds a and m suitably. @@ -780,7 +780,7 @@ Note [Body kind of a forall] The body of a forall is usually a type, but in principle there's no reason to prohibit *unlifted* types. In fact, GHC can itself construct a function with an -unboxed tuple inside a for-all (via CPR analyis; see +unboxed tuple inside a for-all (via CPR analysis; see typecheck/should_compile/tc170). Moreover in instance heads we get forall-types with @@ -895,7 +895,7 @@ Help functions for type applications addTypeCtxt :: LHsType Name -> TcM a -> TcM a -- Wrap a context around only if we want to show that contexts. - -- Omit invisble ones and ones user's won't grok + -- Omit invisible ones and ones user's won't grok addTypeCtxt (L _ ty) thing = addErrCtxt doc thing where @@ -1141,7 +1141,7 @@ tcTyClTyVars tycon (HsQTvs { hsq_kvs = hs_kvs, hsq_tvs = hs_tvs }) thing_inside ; tcExtendTyVarEnv tvs (thing_inside (kvs ++ tvs) res) } where -- In the case of associated types, the renamer has - -- ensured that the names are in commmon + -- ensured that the names are in common -- e.g. class C a_29 where -- type T b_30 a_29 :: * -- Here the a_29 is shared @@ -1157,7 +1157,7 @@ tcDataKindSig :: Kind -> TcM [TyVar] -- e.g. data T :: * -> * -> * where ... -- This function makes up suitable (kinded) type variables for -- the argument kinds, and checks that the result kind is indeed *. --- We use it also to make up argument type variables for for data instances. +-- We use it also to make up argument type variables for data instances. tcDataKindSig kind = do { checkTc (isLiftedTypeKind res_kind) (badKindSig kind) ; span <- getSrcSpanM @@ -1213,7 +1213,7 @@ They never have explicit kinds (because this is source-code only) They are mutable (because they can get bound to a more specific type). Usually we kind-infer and expand type splices, and then -tupecheck/desugar the type. That doesn't work well for scoped type +typecheck/desugar the type. That doesn't work well for scoped type variables, because they scope left-right in patterns. (e.g. in the example above, the 'a' in (y::a) is bound by the 'a' in (x::a). @@ -1229,7 +1229,7 @@ This is bad because throwing away the kind checked type throws away its splices. But too bad for now. [July 03] Historical note: - We no longer specify that these type variables must be univerally + We no longer specify that these type variables must be universally quantified (lots of email on the subject). If you want to put that back in, you need to a) Do a checkSigTyVars after thing_inside @@ -1345,12 +1345,12 @@ Consider Here * The pattern (T p1 p2) creates a *skolem* type variable 'a_sk', - It must be a skolem so that that it retains its identity, and + It must be a skolem so that it retains its identity, and TcErrors.getSkolemInfo can thereby find the binding site for the skolem. * The type signature pattern (f :: a->Int) binds "a" -> a_sig in the envt - * Then unificaiton makes a_sig := a_sk + * Then unification makes a_sig := a_sk That's why we must make a_sig a MetaTv (albeit a SigTv), not a SkolemTv, so that it can unify to a_sk. diff --git a/compiler/Eta/TypeCheck/TcInstDcls.hs b/compiler/Eta/TypeCheck/TcInstDcls.hs index cb682557..da8a5fab 100644 --- a/compiler/Eta/TypeCheck/TcInstDcls.hs +++ b/compiler/Eta/TypeCheck/TcInstDcls.hs @@ -141,7 +141,7 @@ Note [Instances and loop breakers] * Instead the idea is to inline df_i into op1_i, which may then select methods from the MkC record, and thereby break the recursion with - df_i, leaving a *self*-recurisve op1_i. (If op1_i doesn't call op at + df_i, leaving a *self*-recursive op1_i. (If op1_i doesn't call op at the same type, it won't mention df_i, so there won't be recursion in the first place.) @@ -170,7 +170,7 @@ big intermediate) if you inline a bit too much. Instead we use a cunning trick. * We arrange that 'df' and 'op2' NEVER inline. - * We arrange that 'df' is ALWAYS defined in the sylised form + * We arrange that 'df' is ALWAYS defined in the stylised form df d1 d2 = MkD ($cop1 d1 d2) ($cop2 d1 d2) ... * We give 'df' a magical unfolding (DFunUnfolding [$cop1, $cop2, ..]) @@ -437,7 +437,7 @@ tcInstDecls1 tycl_decls inst_decls deriv_decls 2 (pprInstanceHdr (iSpec i)) -- Report an error or a warning for a `Typeable` instances. - -- If we are workikng on an .hs-boot file, we just report a warning, + -- If we are working on an .hs-boot file, we just report a warning, -- and ignore the instance. We do this, to give users a chance to fix -- their code. typeable_err i = @@ -564,7 +564,7 @@ tcATDefault inst_subst defined_ats (ATI fam_tc defs) | tyConName fam_tc `elemNameSet` defined_ats = return [] - -- No user instance, have defaults ==> instatiate them + -- No user instance, have defaults ==> instantiate them -- Example: class C a where { type F a b :: *; type F a b = () } -- instance C [x] -- Then we want to generate the decl: type F [x] b = () @@ -998,7 +998,7 @@ that the type variables bound in the signature will scope over the body. What about the check that the instance method signature is more polymorphic than the instantiated class method type? We just do a tcSubType call in mkMethIds, and use the HsWrapper thus generated in -the method AbsBind. It's very like the tcSubType impedence-matching +the method AbsBind. It's very like the tcSubType impedance-matching call in mkExport. We have to pass the HsWrapper into tcInstanceMethodBody. @@ -1328,7 +1328,7 @@ tcInstanceMethods dfun_id clas tyvars dfun_ev_vars inst_tys -- check if one of the minimal complete definitions is satisfied checkMinimalDefinition = whenIsJust (isUnsatisfied methodExists (classMinimalDef clas)) $ - warnUnsatisifiedMinimalDefinition + warnUnsatisfiedMinimalDefinition where methodExists meth = isJust (findMethodBind meth binds) @@ -1368,8 +1368,8 @@ warnMissingMethodOrAT what name (ptext (sLit "No explicit") <+> text what <+> ptext (sLit "or default declaration for") <+> quotes (ppr name)) } -warnUnsatisifiedMinimalDefinition :: ClassMinimalDef -> TcM () -warnUnsatisifiedMinimalDefinition mindef +warnUnsatisfiedMinimalDefinition :: ClassMinimalDef -> TcM () +warnUnsatisfiedMinimalDefinition mindef = do { warn <- woptM Opt_WarnMissingMethods ; warnTc (Reason Opt_WarnMissingMethods) warn message } @@ -1419,7 +1419,7 @@ less work to generate the translated version! Note [INLINE and default methods] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Default methods need special case. They are supposed to behave rather like -macros. For exmample +macros. For example class Foo a where op1, op2 :: Bool -> a -> a diff --git a/compiler/Eta/TypeCheck/TcInteract.hs b/compiler/Eta/TypeCheck/TcInteract.hs index aa497ae6..9c05c578 100644 --- a/compiler/Eta/TypeCheck/TcInteract.hs +++ b/compiler/Eta/TypeCheck/TcInteract.hs @@ -67,7 +67,7 @@ Note [Basic Simplifier Plan] - canonicalization - inert reactions - spontaneous reactions - - top-level intreactions + - top-level interactions Each stage returns a StopOrContinue and may have sideffected the inerts or worklist. @@ -118,7 +118,7 @@ Note [Running plugins on unflattened wanteds] There is an annoying mismatch between solveSimpleGivens and solveSimpleWanteds, because the latter needs to fiddle with the inert -set, unflatten and and zonk the wanteds. It passes the zonked wanteds +set, unflatten and zonk the wanteds. It passes the zonked wanteds to runTcPluginsWanteds, which produces a replacement set of wanteds, some additional insolubles and a flag indicating whether to go round the loop again. If so, prepareInertsForImplications is used to remove @@ -181,7 +181,7 @@ solveSimples cts = {-# SCC "solve_loop" #-} do { sel <- selectNextWorkItem max_depth ; case sel of - NoWorkRemaining -- Done, successfuly (modulo frozen) + NoWorkRemaining -- Done, successfully (modulo frozen) -> do dicts <- getUnsolvedInertDicts new_work <- getUniqueInstanceWanteds dyn_flags dicts if null new_work @@ -429,7 +429,7 @@ But this isn't quite true. Suppose we have, c1: [W] beta ~ [alpha], c2 : [W] blah, c3 :[W] alpha ~ Int After processing the first two, we get c1: [G] beta ~ [alpha], c2 : [W] blah -Now, c3 does not interact with the the given c1, so when we spontaneously +Now, c3 does not interact with the given c1, so when we spontaneously solve c3, we must re-react it with the inert set. So we can attempt a reaction between inert c2 [W] and work-item c3 [G]. @@ -670,7 +670,7 @@ f2 :: (?x :: Int, ?x :: Char) => Int f2 = ?x Both of these are actually wrong: when we try to use either one, -we'll get two incompatible wnated constraints (?x :: Int, ?x :: Char), +we'll get two incompatible wanted constraints (?x :: Int, ?x :: Char), which would lead to an error. I can think of two ways to fix this: @@ -805,7 +805,7 @@ Notice the orientation (xi_w ~ xi_i) NOT (xi_i ~ xi_w): new_work :: xi_w ~ xi_i cw := ci ; sym new_work Why? Consider the simplest case when xi1 is a type variable. If -we generate xi1~xi2, porcessing that constraint will kick out 'ci'. +we generate xi1~xi2, processing that constraint will kick out 'ci'. If we generate xi2~xi1, there is less chance of that happening. Of course it can and should still happen if xi1=a, xi1=Int, say. But we want to avoid it happening needlessly. @@ -1113,7 +1113,7 @@ Note [Kick out insolubles] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Suppose we have an insoluble alpha ~ [alpha], which is insoluble because an occurs check. And then we unify alpha := [Int]. -Then we really want to rewrite the insouluble to [Int] ~ [[Int]]. +Then we really want to rewrite the insoluble to [Int] ~ [[Int]]. Now it can be decomposed. Otherwise we end up with a "Can't match [Int] ~ [[Int]]" which is true, but a bit confusing because the outer type constructors match. @@ -1143,7 +1143,7 @@ Note [Superclasses and recursive dictionaries] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Overlaps with Note [SUPERCLASS-LOOP 1] Note [SUPERCLASS-LOOP 2] - Note [Recursive instances and superclases] + Note [Recursive instances and superclasses] ToDo: check overlap and delete redundant stuff Right before adding a given into the inert set, we must @@ -1377,7 +1377,7 @@ Solution: Note [SUPERCLASS-LOOP 2] ~~~~~~~~~~~~~~~~~~~~~~~~ -We need to be careful when adding "the constaint we are trying to prove". +We need to be careful when adding "the constraint we are trying to prove". Suppose we are *given* d1:Ord a, and want to deduce (d2:C [a]) where class Ord a => C a where @@ -1415,7 +1415,7 @@ first time, but reducible next time. Now we implement the Right Solution, which is to check for loops directly when adding superclasses. It's a bit like the occurs check in unification. -Note [Recursive instances and superclases] +Note [Recursive instances and superclasses] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this code, which arises in the context of "Scrap Your Boilerplate with Class". @@ -1439,7 +1439,7 @@ Using the instance for Data, we therefore need (Sat (Maybe [a], Data Maybe a) But we are given (Foo a), and hence its superclass (Data Maybe a). So that leaves (Sat (Maybe [a])). Using the instance for Sat means -we need (Foo [a]). And that is the very dictionary we are bulding +we need (Foo [a]). And that is the very dictionary we are building an instance for! So we must put that in the "givens". So in this case we have Given: Foo a, Foo [a] @@ -1631,7 +1631,7 @@ doTopReactFunEq work_item@(CFunEqCan { cc_ev = old_ev, cc_fun = fam_tc -- final_co :: fsk ~ rhs_ty ; new_ev <- newGivenEvVar deeper_loc (mkTcEqPred (mkTyVarTy fsk) rhs_ty, EvCoercion final_co) - ; emitWorkNC [new_ev] -- Non-cannonical; that will mean we flatten rhs_ty + ; emitWorkNC [new_ev] -- Non-canonical; that will mean we flatten rhs_ty ; stopWith old_ev "Fun/Top (given)" } | not (fsk `elemVarSet` tyVarsOfType rhs_ty) @@ -1767,7 +1767,7 @@ Note [FunDep and implicit parameter reactions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Currently, our story of interacting two dictionaries (or a dictionary and top-level instances) for functional dependencies, and implicit -paramters, is that we simply produce new Derived equalities. So for example +parameters, is that we simply produce new Derived equalities. So for example class D a b | a -> b where ... Inert: @@ -1898,7 +1898,7 @@ two possibilities: now solvable by the given Q [a]. However, this option is restrictive, for instance [Example 3] from - Note [Recursive instances and superclases] will fail to work. + Note [Recursive instances and superclasses] will fail to work. 2. Ignore the problem, hoping that the situations where there exist indeed such multiple strategies are rare: Indeed the cause of the previous @@ -2164,7 +2164,7 @@ Other notes: -} -- | Assumes that we've checked that this is the 'Typeable' class, --- and it was applied to the correc arugment. +-- and it was applied to the correc argument. matchTypeableClass :: Class -> Kind -> Type -> CtLoc -> TcS LookupInstResult matchTypeableClass clas _k t loc diff --git a/compiler/Eta/TypeCheck/TcMType.hs b/compiler/Eta/TypeCheck/TcMType.hs index 795b4d28..53dfc7dc 100644 --- a/compiler/Eta/TypeCheck/TcMType.hs +++ b/compiler/Eta/TypeCheck/TcMType.hs @@ -297,7 +297,7 @@ Then we have to instantiate the kind variables, build a substitution from old variables to the new variables, then instantiate the type variables substituting the original kind. -Exemple: If we want to instantiate +Example: If we want to instantiate [(k1 :: BOX), (k2 :: BOX), (a :: k1 -> k2), (b :: k1)] we want [(?k1 :: BOX), (?k2 :: BOX), (?a :: ?k1 -> ?k2), (?b :: ?k1)] diff --git a/compiler/Eta/TypeCheck/TcMatches.hs b/compiler/Eta/TypeCheck/TcMatches.hs index 6c55ac01..fa443930 100644 --- a/compiler/Eta/TypeCheck/TcMatches.hs +++ b/compiler/Eta/TypeCheck/TcMatches.hs @@ -198,7 +198,7 @@ tcMatch ctxt pat_tys rhs_ty match tc_grhss _ (Just {}) _ _ = panic "tc_ghrss" -- Rejected by renamer - -- For (\x -> e), tcExpr has already said "In the expresssion \x->e" + -- For (\x -> e), tcExpr has already said "In the expression \x->e" -- so we don't want to add "In the lambda abstraction \x->e" add_match_ctxt match thing_inside = case mc_what ctxt of @@ -327,7 +327,7 @@ tcStmtsAndThen ctxt stmt_chk (L loc (LetStmt binds) : stmts) res_ty thing_inside -- Don't set the error context for an ApplicativeStmt. It ought to be -- possible to do this with a popErrCtxt in the tcStmt case for --- ApplicativeStmt, but it did someting strange and broke a test (ado002). +-- ApplicativeStmt, but it did something strange and broke a test (ado002). tcStmtsAndThen ctxt stmt_chk (L loc stmt : stmts) res_ty thing_inside | ApplicativeStmt{} <- stmt = do { (stmt', (stmts', thing)) <- @@ -633,7 +633,7 @@ tcMcStmt ctxt (TransStmt { trS_stmts = stmts, trS_bndrs = bindersMap ; using' <- tcPolyExpr using using_poly_ty ; let final_using = fmap (HsWrap (WpTyApp tup_ty)) using' - --------------- Bulding the bindersMap ---------------- + --------------- Building the bindersMap ---------------- ; let mk_n_bndr :: Name -> TcId -> TcId mk_n_bndr n_bndr_name bndr_id = mkLocalId n_bndr_name (n_app (idType bndr_id)) @@ -853,7 +853,7 @@ When typechecking we want to typecheck 'bar' in the knowledge that it should be an IO thing, pushing info from the context into the RHS. To do this, we check the rebindable syntax first, and push that information into (tcMonoExprNC rhs). -Otherwise the error shows up when cheking the rebindable syntax, and +Otherwise the error shows up when checking the rebindable syntax, and the expected/inferred stuff is back to front (see Trac #3613). Note [typechecking ApplicativeStmt] diff --git a/compiler/Eta/TypeCheck/TcPat.hs b/compiler/Eta/TypeCheck/TcPat.hs index 7c1a9d0e..a8f8d928 100644 --- a/compiler/Eta/TypeCheck/TcPat.hs +++ b/compiler/Eta/TypeCheck/TcPat.hs @@ -123,7 +123,7 @@ data LetBndrSpec = LetLclBndr -- The binder is just a local one; -- an AbsBinds will provide the global version - | LetGblBndr TcPragFun -- Genrealisation plan is NoGen, so there isn't going + | LetGblBndr TcPragFun -- Generalisation plan is NoGen, so there isn't going -- to be an AbsBinds; So we must bind the global version -- of the binder right away. -- Oh, and dhhere is the inline-pragma information @@ -359,10 +359,10 @@ Two cases, dealt with by the LetPat case of tcPatBndr we want to bind a cloned, local version of the variable, with the type given by the pattern context, *not* by the signature (even if there is one; see Trac #7268). The mkExport part of the - generalisation step will do the checking and impedence matching + generalisation step will do the checking and impedance matching against the signature. - * If for some some reason we are not generalising (plan = NoGen), the + * If for some reason we are not generalising (plan = NoGen), the LetBndrSpec will be LetGblBndr. In that case we must bind the global version of the Id, and do so with precisely the type given in the signature. (Then we unify with the type from the pattern @@ -506,7 +506,7 @@ tc_pat penv (ViewPat expr pat _) overall_pat_ty thing_inside -- NOTE: this forces pat_ty to be a monotype (because we use a unification -- variable to find it). this means that in an example like -- (view -> f) where view :: _ -> forall b. b - -- we will only be able to use view at one instantation in the + -- we will only be able to use view at one instantiation in the -- rest of the view ; (expr_co, pat_ty) <- tcInfer $ \ pat_ty -> unifyType expr'_inferred (mkFunTy overall_pat_ty pat_ty) @@ -661,7 +661,7 @@ are not made available in the RHS of the match. For example f :: T a -> Int -> a f ~(T1 i) y = y It's obviously not sound to refine a to Int in the right -hand side, because the arugment might not match T1 at all! +hand side, because the argument might not match T1 at all! Finally, a lazy pattern should not bind any existential type variables because they won't be in scope when we do the desugaring diff --git a/compiler/Eta/TypeCheck/TcRnDriver.hs b/compiler/Eta/TypeCheck/TcRnDriver.hs index 996c0b55..8ec796c7 100644 --- a/compiler/Eta/TypeCheck/TcRnDriver.hs +++ b/compiler/Eta/TypeCheck/TcRnDriver.hs @@ -218,7 +218,7 @@ tcRnModuleTcRnM hsc_env hsc_src -- If the whole module is warned about or deprecated -- (via mod_deprec) record that in tcg_warns. If we do thereby add - -- a WarnAll, it will override any subseqent depracations added to tcg_warns + -- a WarnAll, it will override any subsequent depracations added to tcg_warns let { tcg_env1 = case mod_deprec of Just (L _ txt) -> tcg_env { tcg_warns = WarnAll txt } Nothing -> tcg_env @@ -588,7 +588,7 @@ tcRnHsBootDecls hsc_src decls ; mapM_ (badBootDecl hsc_src "rule") rule_decls ; mapM_ (badBootDecl hsc_src "vect") vect_decls - -- Typecheck type/class/isntance decls + -- Typecheck type/class/instance decls ; traceTc "Tc2 (boot)" empty ; (tcg_env, inst_infos, _deriv_binds) <- tcTyClsInstDecls tycl_decls inst_decls deriv_decls @@ -1618,7 +1618,7 @@ tcUserStmt (L loc (BodyStmt expr _ _ _)) ; when (isUnitTy $ it_ty) failM ; return stuff }, - -- Plan B; a naked bind statment + -- Plan B; a naked bind statement tcGhciStmts [bind_stmt], -- Plan C; check that the let-binding is typeable all by itself. @@ -1804,7 +1804,7 @@ isGHCiMonad hsc_env ty _ <- tcLookupInstance ghciClass [userTy] return name - Just _ -> failWithTc $ text "Ambigous type!" + Just _ -> failWithTc $ text "Ambiguous type!" Nothing -> failWithTc $ text ("Can't find type:" ++ ty) -- tcRnExpr just finds the type of an expression @@ -2081,7 +2081,7 @@ loadUnqualIfaces hsc_env ictxt {- ************************************************************************ * * - Degugging output + Debugging output * * ************************************************************************ -} diff --git a/compiler/Eta/TypeCheck/TcRnMonad.hs b/compiler/Eta/TypeCheck/TcRnMonad.hs index 8e015abf..c2cf39c8 100644 --- a/compiler/Eta/TypeCheck/TcRnMonad.hs +++ b/compiler/Eta/TypeCheck/TcRnMonad.hs @@ -877,7 +877,7 @@ tryTc m ----------------------- tryTcErrs :: TcRn a -> TcRn (Messages, Maybe a) -- Run the thing, returning --- Just r, if m succceeds with no error messages +-- Just r, if m succeeds with no error messages -- Nothing, if m fails, or if it succeeds but has error messages -- Either way, the messages are returned; even in the Just case -- there might be warnings @@ -1362,7 +1362,7 @@ constraints might be "variable out of scope" Hole constraints, and that might have been the actual original cause of the exception! For example (Trac #12529): f = p @ Int -Here 'p' is out of scope, so we get an insolube Hole constraint. But +Here 'p' is out of scope, so we get an insoluble Hole constraint. But the visible type application fails in the monad (thows an exception). We must not discard the out-of-scope error. @@ -1371,7 +1371,7 @@ Hence: - insolublesOnly in tryCaptureConstraints - emitConstraints in the Left case of captureConstraints -Hover note that fresly-generated constraints like (Int ~ Bool), or +Hover note that freshly-generated constraints like (Int ~ Bool), or ((a -> b) ~ Int) are all CNonCanonical, and hence won't be flagged as insoluble. The constraint solver does that. So they'll be discarded. That's probably ok; but see th/5358 as a not-so-good example: diff --git a/compiler/Eta/TypeCheck/TcRnTypes.hs b/compiler/Eta/TypeCheck/TcRnTypes.hs index f3fb1bfd..b966e2cd 100644 --- a/compiler/Eta/TypeCheck/TcRnTypes.hs +++ b/compiler/Eta/TypeCheck/TcRnTypes.hs @@ -157,7 +157,7 @@ import Eta.REPL.RemoteTypes -- 'ns_module_name' @A@, defines a mapping from @{A.T}@ -- (for some 'OccName' @T@) to some arbitrary other 'Name'. -- --- The most intruiging thing about a 'NameShape', however, is +-- The most intriguing thing about a 'NameShape', however, is -- how it's constructed. A 'NameShape' is *implied* by the -- exported 'AvailInfo's of the implementor of an interface: -- if an implementor of signature @@ exports @M.T@, you implicitly @@ -209,7 +209,7 @@ type RnM = TcRn -- | Historical "type-checking monad" (now it's just 'TcRn'). type TcM = TcRn --- We 'stack' these envs through the Reader like monad infastructure +-- We 'stack' these envs through the Reader like monad infrastructure -- as we move into an expression (although the change is focused in -- the lcl type). data Env gbl lcl @@ -219,7 +219,7 @@ data Env gbl lcl -- BangPattern is to fix leak, see #15111 env_us :: {-# UNPACK #-} !(IORef UniqSupply), - -- Unique supply for local varibles + -- Unique supply for local variables env_gbl :: gbl, -- Info about things defined at the top level -- of the module being compiled @@ -325,7 +325,7 @@ data DsGblEnv -- iff '-fvectorise' flag was given as well as -- exported entities of 'Data.Array.Parallel' iff -- '-XParallelArrays' was given; otherwise, empty - , ds_parr_bi :: PArrBuiltin -- desugarar names for '-XParallelArrays' + , ds_parr_bi :: PArrBuiltin -- desugarer names for '-XParallelArrays' , ds_static_binds :: IORef [(Fingerprint, (Id,CoreExpr))] -- ^ Bindings resulted from floating static forms } @@ -427,7 +427,7 @@ data TcGblEnv tcg_imports :: ImportAvails, -- ^ Information about what was imported from where, including -- things bound in this module. Also store Safe Haskell info - -- here about transative trusted packaage requirements. + -- here about transitive trusted package requirements. tcg_dus :: DefUses, -- ^ What is defined in this module and what is used. tcg_used_rdrnames :: TcRef (Set RdrName), @@ -613,7 +613,7 @@ We gather two sorts of usage information Used only to report unused import declarations Notice that they are RdrNames, not Names, so we can tell whether the reference was qualified or unqualified, which - is esssential in deciding whether a particular import decl + is essential in deciding whether a particular import decl is unnecessary. This info isn't present in Names. @@ -660,7 +660,7 @@ data TcLclEnv -- Changes as we move inside an expression -- Does *not* include global name envt; may shadow it -- Includes both ordinary variables and type variables; -- they are kept distinct because tyvar have a different - -- occurrence contructor (Name.TvOcc) + -- occurrence constructor (Name.TvOcc) -- We still need the unsullied global name env so that -- we can look up record field names @@ -697,7 +697,7 @@ type ThBindEnv = NameEnv (TopLevelFlag, ThLevel) data TcIdBinder = TcIdBndr TcId - TopLevelFlag -- Tells whether the bindind is syntactically top-level + TopLevelFlag -- Tells whether the binding is syntactically top-level -- (The monomorphic Ids for a recursive group count -- as not-top-level for this purpose.) @@ -869,7 +869,7 @@ data TcTyThing -- Name in the domain of the envt | AThing TcKind -- Used temporarily, during kind checking, for the - -- tycons and clases in this recursive group + -- tycons and classes in this recursive group -- Can be a mono-kind or a poly-kind; in TcTyClsDcls see -- Note [Type checking recursive type and class declarations] @@ -884,7 +884,7 @@ data PromotionErr -- See Note [AFamDataCon: not promoting data family constructors] in TcRnDriver | RecDataConPE -- Data constructor in a recursive loop - -- See Note [ARecDataCon: recusion and promoting data constructors] in TcTyClsDecls + -- See Note [ARecDataCon: recursion and promoting data constructors] in TcTyClsDecls | NoDataKinds -- -XDataKinds not enabled instance Outputable TcTyThing where -- Debugging only @@ -1230,7 +1230,7 @@ data HoleSort = ExprHole -- ^ A hole in an expression (TypedHoles) Note [Kind orientation for CTyEqCan] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Given an equality (t:* ~ s:Open), we can't solve it by updating t:=s, -ragardless of how touchable 't' is, because the kinds don't work. +regardless of how touchable 't' is, because the kinds don't work. Instead we absolutely must re-orient it. Reason: if that gets into the inert set we'll start replacing t's by s's, and that might make a @@ -1285,7 +1285,7 @@ Example 1: (c Int), where c :: * -> Constraint. We can't do anything Example 2: a ~ b, where a :: *, b :: k, where k is a kind variable We don't want to use this to substitute 'b' for 'a', in case - 'k' is subequently unifed with (say) *->*, because then + 'k' is subsequently unified with (say) *->*, because then we'd have ill-kinded types floating about. Rather we want to defer using the equality altogether until 'k' get resolved. @@ -1656,7 +1656,7 @@ data Implication -- False <=> ic_givens might have equalities ic_env :: TcLclEnv, -- Gives the source location and error context - -- for the implicatdion, and hence for all the + -- for the implication, and hence for all the -- given evidence variables ic_wanted :: WantedConstraints, -- The wanted @@ -1687,7 +1687,7 @@ Note [Shadowing in a constraint] We assume NO SHADOWING in a constraint. Specifically * The unification variables are all implicitly quantified at top level, and are all unique - * The skolem varibles bound in ic_skols are all freah when the + * The skolem variables bound in ic_skols are all fresh when the implication is created. So we can safely substitute. For example, if we have forall a. a~Int => ...(forall b. ...a...)... @@ -1869,7 +1869,7 @@ ctEvFlavour (CtDerived {}) = Derived Note [SubGoalDepth] ~~~~~~~~~~~~~~~~~~~ The 'SubGoalCounter' takes care of stopping the constraint solver from looping. -Because of the different use-cases of regular constaints and type function +Because of the different use-cases of regular constraints and type function applications, there are two independent counters. Therefore, this datatype is abstract. See Note [WorkList] @@ -1888,7 +1888,7 @@ Each counter starts at zero and increases. depth removes a type constructor from the type, so the depth never gets big; i.e. is bounded by the structural depth of the type. - The flag -fcontext-stack=n (not very well named!) fixes the maximium + The flag -fcontext-stack=n (not very well named!) fixes the maximum level. * The "type function reduction counter" does the same thing when resolving @@ -1904,7 +1904,7 @@ Each counter starts at zero and increases. different maximum, as we expect possibly many more type function reductions in sensible programs than type class constraints. - The flag -ftype-function-depth=n fixes the maximium level. + The flag -ftype-function-depth=n fixes the maximum level. -} data SubGoalCounter = CountConstraints | CountTyFunApps @@ -1966,7 +1966,7 @@ which initializes it to initialSubGoalDepth (i.e. 0); but when requesting a Coercible instance (requestCoercible in TcInteract), we bump the current depth by one and use that. -There are two spots where wanted contraints attempted to be solved +There are two spots where wanted constraints attempted to be solved using existing constraints: lookupInertDict and lookupSolvedDict in TcSMonad. Both use ctEvCheckDepth to make the check. That function ensures that a Given constraint can always be used to solve a goal @@ -2080,7 +2080,7 @@ data SkolemInfo | RuleSkol RuleName -- The LHS of a RULE | InferSkol [(Name,TcType)] - -- We have inferred a type for these (mutually-recursivive) + -- We have inferred a type for these (mutually-recursive) -- polymorphic Ids, and are now checking that their RHS -- constraints are satisfied. @@ -2115,7 +2115,7 @@ pprSkolInfo (PatSkol cl mc) = case cl of RealDataCon dc -> sep [ ptext (sLit "a pattern with constructor") , nest 2 $ ppr dc <+> dcolon <+> pprType (dataConUserType dc) <> comma - -- pprType prints forall's regardless of -fprint-explict-foralls + -- pprType prints forall's regardless of -fprint-explicit-foralls -- which is what we want here, since we might be saying -- type variable 't' is bound by ... , ptext (sLit "in") <+> pprMatchContext mc ] @@ -2343,7 +2343,7 @@ data TcPluginResult = TcPluginContradiction [Ct] -- ^ The plugin found a contradiction. -- The returned constraints are removed from the inert set, - -- and recorded as insoluable. + -- and recorded as insoluble. | TcPluginOk [(EvTerm,Ct)] [Ct] -- ^ The first field is for constraints that were solved. diff --git a/compiler/Eta/TypeCheck/TcRules.hs b/compiler/Eta/TypeCheck/TcRules.hs index 968bf58f..25f04a65 100644 --- a/compiler/Eta/TypeCheck/TcRules.hs +++ b/compiler/Eta/TypeCheck/TcRules.hs @@ -100,7 +100,7 @@ Consider f b True = ... #-} Here we *must* solve the wanted (Eq a) from the given (Eq a) -resulting from skolemising the agument type of g. So we +resulting from skolemising the argument type of g. So we revert to SimplCheck when going under an implication. @@ -159,7 +159,7 @@ tcRule (HsRule name act hs_bndrs lhs fv_lhs rhs fv_rhs) -- RULE: forall v. fst (ss v) = fst v -- The type of the rhs of the rule is just a, but v::(a,(b,c)) -- - -- We also need to get the completely-uconstrained tyvars of + -- We also need to get the completely-unconstrained tyvars of -- the LHS, lest they otherwise get defaulted to Any; but we do that -- during zonking (see TcHsSyn.zonkRule) @@ -223,7 +223,7 @@ tcRuleBndrs (L _ (RuleBndrSig (L _ name) rn_ty) : rule_bndrs) ; let id = mkLocalId name id_ty tvs = map snd tv_prs -- tcHsPatSigType returns (Name,TyVar) pairs - -- for for RuleSigCtxt their Names are not + -- for RuleSigCtxt their Names are not -- cloned, so we get (n, tv-with-name-n) pairs -- See Note [Pattern signature binders] in TcHsType diff --git a/compiler/Eta/TypeCheck/TcSMonad.hs b/compiler/Eta/TypeCheck/TcSMonad.hs index 0765b9be..ec6e7c44 100644 --- a/compiler/Eta/TypeCheck/TcSMonad.hs +++ b/compiler/Eta/TypeCheck/TcSMonad.hs @@ -594,7 +594,7 @@ implications. Consider a ~ F b, forall c. b~Int => blah If we have F b ~ fsk in the flat-cache, and we push that into the nested implication, we might miss that F b can be rewritten to F Int, -and hence perhpas solve it. Moreover, the fsk from outside is +and hence perhaps solve it. Moreover, the fsk from outside is flattened out after solving the outer level, but and we don't do that flattening recursively. @@ -613,7 +613,7 @@ could help to solve a constraint. For example If we pushed the (C Int alpha) inwards, as a given, it can produce a fundep (alpha~a) and this can float out again and be used to fix alpha. (In general we can't float class constraints out just in case -(C d blah) might help to solve (C Int a).) But we ignore this possiblity. +(C d blah) might help to solve (C Int a).) But we ignore this possibility. For Derived constraints we don't have evidence, so we do not turn them into Givens. There can *be* deriving CFunEqCans; see Trac #8129. @@ -685,7 +685,7 @@ getNoGivenEqs tclvl skol_tvs where eqs_given_here :: VarSet -> EqualCtList -> Bool eqs_given_here local_fsks [CTyEqCan { cc_tyvar = tv, cc_ev = ev }] - -- Givens are always a sigleton + -- Givens are always a singleton = not (skolem_bound_here local_fsks tv) && ev_given_here ev eqs_given_here _ _ = False @@ -1525,7 +1525,7 @@ sites. Not only do we want an error message for each, but with *derived* insolubles, we only want to report each one once. Why? (a) A constraint (C r s t) where r -> s, say, may generate the same fundep - equality many times, as the original constraint is sucessively rewritten. + equality many times, as the original constraint is successively rewritten. (b) Ditto the successive iterations of the main solver itself, as it traverses the constraint tree. See example below. diff --git a/compiler/Eta/TypeCheck/TcSimplify.hs b/compiler/Eta/TypeCheck/TcSimplify.hs index c9fca41c..98b001e4 100644 --- a/compiler/Eta/TypeCheck/TcSimplify.hs +++ b/compiler/Eta/TypeCheck/TcSimplify.hs @@ -337,7 +337,7 @@ simplifyInfer rhs_tclvl apply_mr name_taus wanteds ] -- Historical note: Before step 2 we used to have a - -- HORRIBLE HACK described in Note [Avoid unecessary + -- HORRIBLE HACK described in Note [Avoid unnecessary -- constraint simplification] but, as described in Trac -- #4361, we have taken in out now. That's why we start -- with step 2! @@ -509,8 +509,8 @@ quantifyPred qtvs pred IrredPred ty -> tyVarsOfType ty `intersectsVarSet` qtvs TuplePred {} -> False where - -- Only quantify over (F tys ~ ty) if tys mentions a quantifed variable - -- In particular, quanitifying over (F Int ~ ty) is a bit like quantifying + -- Only quantify over (F tys ~ ty) if tys mentions a quantified variable + -- In particular, quantifying over (F Int ~ ty) is a bit like quantifying -- over (Eq Int); the instance should kick in right here quant_fun ty = case tcSplitTyConApp_maybe ty of @@ -637,7 +637,7 @@ mkMinimalBySCs does. Then, simplifyInfer uses the minimal constraint to check the original wanted. -Note [Avoid unecessary constraint simplification] +Note [Avoid unnecessary constraint simplification] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -------- NB NB NB (Jun 12) ------------- This note not longer applies; see the notes with Trac #4361. @@ -681,8 +681,8 @@ Deciding which equalities to quantify over is tricky: F is a type function. The difficulty is that it's hard to tell what is insoluble! -So we see whether the simplificaiotn step yielded any type errors, -and if so refrain from quantifying over *any* equalites. +So we see whether the simplification step yielded any type errors, +and if so refrain from quantifying over *any* equalities. -} simplifyRule :: RuleName @@ -781,7 +781,7 @@ solveWantedsTcMWithEvBinds :: EvBindsVar -- Returns a *zonked* result -- We zonk when we finish primarily to un-flatten out any -- flatten-skolems etc introduced by canonicalisation of --- types involving type funuctions. Happily the result +-- types involving type functions. Happily the result -- is typically much smaller than the input, indeed it is -- often empty. solveWantedsTcMWithEvBinds ev_binds_var wc tcs_action @@ -988,7 +988,7 @@ of progress. Trac #8474 is a classic example: ?yn:betan => [W] ?x:Int because we'll just get the same [D] again - * If we *do* re-solve, we'll get an ininite loop. It is cut off by + * If we *do* re-solve, we'll get an infinite loop. It is cut off by the fixed bound of 10, but solving the next takes 10*10*...*10 (ie exponentially many) iterations! @@ -1015,12 +1015,12 @@ tcCheckSatisfiability :: Bag EvVar -> TcM Bool tcCheckSatisfiability givens = do { lcl_env <- TcM.getLclEnv ; let given_loc = mkGivenLoc topTcLevel UnkSkol lcl_env - ; traceTc "checkSatisfiabilty {" (ppr givens) + ; traceTc "checkSatisfiability {" (ppr givens) ; (res, _ev_binds) <- runTcS $ do { solveSimpleGivens given_loc (bagToList givens) ; insols <- getInertInsols ; return (not (isEmptyBag insols)) } - ; traceTc "checkSatisfiabilty }" (ppr res) + ; traceTc "checkSatisfiability }" (ppr res) ; return (not res) } promoteTyVar :: TcLevel -> TcTyVar -> TcS () @@ -1100,7 +1100,7 @@ the top-level simple constraints are plausible, but we also float constraints out from inside, if they are not captured by skolems. The same function is used when doing type-class defaulting (see the call -to applyDefaultingRules) to extract constraints that that might be defaulted. +to applyDefaultingRules) to extract constraints that might be defaulted. There are two caveats: @@ -1334,7 +1334,7 @@ floatEqualities skols no_given_eqs wanteds@(WC { wc_simple = simples }) (float_eqs, remaining_simples) = partitionBag float_me simples float_me :: Ct -> Bool - float_me ct -- The constraint is un-flattened and de-cannonicalised + float_me ct -- The constraint is un-flattened and de-canonicalised | let pred = ctPred ct , EqPred NomEq ty1 ty2 <- classifyPredType pred , tyVarsOfType pred `disjointVarSet` skol_set @@ -1409,7 +1409,7 @@ to beta[1], and that means the (a ~ beta[1]) will be stuck, as it should be. ********************************************************************************* * * -* Defaulting and disamgiguation * +* Defaulting and disambiguation * * * ********************************************************************************* -} @@ -1481,7 +1481,7 @@ findDefaultableGroups (default_tys, (ovl_strings, extended_defaults)) wanteds | otherwise = all is_std_class clss && (any is_num_class clss) -- In interactive mode, or with -XExtendedDefaultRules, - -- we default Show a to Show () to avoid graututious errors on "show []" + -- we default Show a to Show () to avoid gratuitous errors on "show []" isInteractiveClass cls = is_num_class cls || (classKey cls `elem` [showClassKey, eqClassKey, ordClassKey]) diff --git a/compiler/Eta/TypeCheck/TcSplice.hs b/compiler/Eta/TypeCheck/TcSplice.hs index 19297f0a..bce4a682 100644 --- a/compiler/Eta/TypeCheck/TcSplice.hs +++ b/compiler/Eta/TypeCheck/TcSplice.hs @@ -736,7 +736,7 @@ runMeta' show_code ppr_hs run_and_convert expr -- including, say, a pattern-match exception in the code we are running -- -- We also do the TH -> HS syntax conversion inside the same - -- exception-cacthing thing so that if there are any lurking + -- exception-catching thing so that if there are any lurking -- exceptions in the data structure returned by hval, we'll -- encounter them inside the try -- @@ -793,7 +793,7 @@ like that. Here's how it's processed: * 'qReport' forces the message to ensure any exception hidden in unevaluated thunk doesn't get into the bag of errors. Otherwise the following splice - will triger panic (Trac #8987): + will trigger panic (Trac #8987): $(fail undefined) See also Note [Concealed TH exceptions] diff --git a/compiler/Eta/TypeCheck/TcTyClsDecls.hs b/compiler/Eta/TypeCheck/TcTyClsDecls.hs index dbccb0d3..f6c5c1ad 100644 --- a/compiler/Eta/TypeCheck/TcTyClsDecls.hs +++ b/compiler/Eta/TypeCheck/TcTyClsDecls.hs @@ -146,7 +146,7 @@ tcTyClGroup tyclds -- Populate environment with knot-tied ATyCon for TyCons -- NB: if the decls mention any ill-staged data cons - -- (see Note [Recusion and promoting data constructors] + -- (see Note [Recursion and promoting data constructors] -- we will have failed already in kcTyClGroup, so no worries here ; tcExtendRecEnv (zipRecTyClss names_w_poly_kinds rec_tyclss) $ @@ -255,7 +255,7 @@ Open type families This treatment of type synonyms only applies to Haskell 98-style synonyms. General type functions can be recursive, and hence, appear in `alg_decls'. -The kind of an open type family is solely determinded by its kind signature; +The kind of an open type family is solely determined by its kind signature; hence, only kind signatures participate in the construction of the initial kind environment (as constructed by `getInitialKind'). In fact, we ignore instances of families altogether in the following. However, we need to include @@ -564,7 +564,7 @@ However, during tcTyClDecl of T (above) we will be in a recursive "knot". So we aren't allowed to look at the TyCon T itself; we are only allowed to put it (lazily) in the returned structures. But when kind-checking the RHS of T's decl, we *do* need to know T's kind (so -that we can correctly elaboarate (T k f a). How can we get T's kind +that we can correctly elaborate (T k f a). How can we get T's kind without looking at T? Delicate answer: during tcTyClDecl, we extend *Global* env with T -> ATyCon (the (not yet built) TyCon for T) @@ -579,7 +579,7 @@ Then: the *global* env to get the TyCon. But we must be careful not to force the TyCon or we'll get a loop. -This fancy footwork (with two bindings for T) is only necesary for the +This fancy footwork (with two bindings for T) is only necessary for the TyCons or Classes of this recursive group. Earlier, finished groups, live in the global env only. -} @@ -650,7 +650,7 @@ tcTyClDecl1 _parent rec_info ; return (ATyCon (classTyCon clas) : gen_dm_ids ++ class_ats ) } -- NB: Order is important due to the call to `mkGlobalThings' when - -- tying the the type and class declaration type checking knot. + -- tying the type and class declaration type checking knot. where tc_fundep (tvs1, tvs2) = do { tvs1' <- mapM (tc_fd_tyvar . unLoc) tvs1 ; ; tvs2' <- mapM (tc_fd_tyvar . unLoc) tvs2 ; @@ -1503,7 +1503,7 @@ checkValidTyCon tc ts1 = mkVarSet tvs1 fty1 = dataConFieldType con1 label - checkOne (_, con2) -- Do it bothways to ensure they are structurally identical + checkOne (_, con2) -- Do it both ways to ensure they are structurally identical = do { checkFieldCompat label con1 con2 ts1 res1 res2 fty1 fty2 ; checkFieldCompat label con2 con1 ts2 res2 res1 fty2 fty1 } where @@ -2043,7 +2043,7 @@ For naughty selectors we make a dummy binding sel = () for naughty selectors, so that the later type-check will add them to the environment, and they'll be exported. The function is never called, because -the tyepchecker spots the sel_naughty field. +the typechecker spots the sel_naughty field. Note [GADT record selectors] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -2062,7 +2062,7 @@ Details: the "real" types of T1,T2 are: T1 :: forall r a. (r~[a]) => a -> T r T2 :: forall r a b. (r~[a]) => a -> b -> T r -So the selector loooks like this: +So the selector looks like this: f :: forall a. T [a] -> Maybe a f (a:*) (t:T [a]) = case t of @@ -2098,7 +2098,7 @@ The selector we want for fld looks like this: T1 (x::b) -> x The scrutinee of the case has type :R7T (Maybe b), which can be -gotten by appying the eq_spec to the univ_tvs of the data con. +gotten by applying the eq_spec to the univ_tvs of the data con. ************************************************************************ * * diff --git a/compiler/Eta/TypeCheck/TcTyDecls.hs b/compiler/Eta/TypeCheck/TcTyDecls.hs index 896e8350..12902159 100644 --- a/compiler/Eta/TypeCheck/TcTyDecls.hs +++ b/compiler/Eta/TypeCheck/TcTyDecls.hs @@ -3,7 +3,7 @@ (c) The GRASP/AQUA Project, Glasgow University, 1992-1999 -Analysis functions over data types. Specficially, detecting recursive types. +Analysis functions over data types. Specifically, detecting recursive types. This stuff is only used for source-code decls; it's recorded in interface files for imported data types. @@ -346,7 +346,7 @@ then we mark S as recursive, just in case. What that means is that if we see then we don't need to look inside S to compute R's recursiveness. Since S is imported (not from an hi-boot file), one cannot get from R back to S except via an hi-boot file, and that means that some data type will be marked recursive along the way. So R is -unconditionly non-recursive (i.e. there'll be a loop breaker elsewhere if necessary) +unconditionally non-recursive (i.e. there'll be a loop breaker elsewhere if necessary) This in turn means that we grovel through fewer interface files when computing recursiveness, because we need only look at the type decls in the module being diff --git a/compiler/Eta/TypeCheck/TcType.hs b/compiler/Eta/TypeCheck/TcType.hs index a6238b1a..d1ba87b4 100644 --- a/compiler/Eta/TypeCheck/TcType.hs +++ b/compiler/Eta/TypeCheck/TcType.hs @@ -422,7 +422,7 @@ data UserTypeCtxt ************************************************************************ * * - Untoucable type variables + Untouchable type variables * * ************************************************************************ -} @@ -584,7 +584,7 @@ pprSigCtxt ctxt extra pp_ty ************************************************************************ -} --- | Finds outermost type-family applications occuring in a type, +-- | Finds outermost type-family applications occurring in a type, -- after expanding synonyms. tcTyFamInsts :: Type -> [(TyCon, [Type])] tcTyFamInsts ty @@ -753,7 +753,7 @@ isReturnTyVar tv -- isAmbiguousTyVar is used only when reporting type errors -- It picks out variables that are unbound, namely meta --- type variables and the RuntimUnk variables created by +-- type variables and the RuntimeUnk variables created by -- RtClosureInspect.zonkRTTIType. These are "ambiguous" in -- the sense that they stand for an as-yet-unknown type isAmbiguousTyVar tv @@ -844,7 +844,7 @@ mkTcEqPred :: TcType -> TcType -> Type -- -- But for now we call mkTyConApp, not mkEqPred, because the invariants -- of the latter might not be satisfied during type checking. --- Notably when we form an equalty (a : OpenKind) ~ (Int : *) +-- Notably when we form an equality (a : OpenKind) ~ (Int : *) -- -- But this is horribly delicate: what about type variables -- that turn out to be bound to Int#? diff --git a/compiler/Eta/TypeCheck/TcTypeNats.hs b/compiler/Eta/TypeCheck/TcTypeNats.hs index d3f668e3..6f328b5d 100644 --- a/compiler/Eta/TypeCheck/TcTypeNats.hs +++ b/compiler/Eta/TypeCheck/TcTypeNats.hs @@ -641,7 +641,7 @@ rootExact x y = do (z,True) <- genRoot x y -{- | Compute the the n-th root of a natural number, rounded down to +{- | Compute the n-th root of a natural number, rounded down to the closest natural number. The boolean indicates if the result is exact (i.e., True means no rounding was done, False means rounded down). The second argument specifies which root we are computing. -} diff --git a/compiler/Eta/TypeCheck/TcUnify.hs b/compiler/Eta/TypeCheck/TcUnify.hs index 79d55a57..31345799 100644 --- a/compiler/Eta/TypeCheck/TcUnify.hs +++ b/compiler/Eta/TypeCheck/TcUnify.hs @@ -338,7 +338,7 @@ There are a number of wrinkles (below). Notice that Wrinkle 1 and 2 both require eta-expansion, which technically may increase termination. We just put up with this, in exchange for getting -more predicatble type inference. +more predictable type inference. Wrinkle 1: Note [Deep skolemisation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -692,7 +692,7 @@ unifyTypeList (ty1:tys@(ty2:_)) = do { _ <- unifyType ty1 ty2 ************************************************************************ uType is the heart of the unifier. Each arg occurs twice, because -we want to report errors in terms of synomyms if possible. The first of +we want to report errors in terms of synonyms if possible. The first of the pair is used in error messages only; it is always the same as the second, except that if the first is a synonym then the second may be a de-synonym'd version. This way we get better error messages. @@ -877,7 +877,7 @@ We may encounter a unification ty1 ~ ty2 that cannot be performed syntactically, and yet its consistency is undetermined. Previously, there was no way to still make it consistent. So a mismatch error was issued. -Now these unfications are deferred until constraint simplification, where type +Now these unifications are deferred until constraint simplification, where type family instances and given equations may (or may not) establish the consistency. Deferred unifications are of the form F ... ~ ... @@ -887,7 +887,7 @@ E.g. id :: x ~ y => x -> y id e = e -involves the unfication x = y. It is deferred until we bring into account the +involves the unification x = y. It is deferred until we bring into account the context x ~ y to establish that it holds. If available, we defer original types (rather than those where closed type @@ -944,7 +944,7 @@ uUnfilledVar origin swapped tv1 details1 non_var_ty2 -- ty2 is not a type varia defer = unSwap swapped (uType_defer origin) (mkTyVarTy tv1) non_var_ty2 -- Occurs check or an untouchable: just defer -- NB: occurs check isn't necessarily fatal: - -- eg tv1 occured in type family parameter + -- eg tv1 occurred in type family parameter ---------------- uUnfilledVars :: CtOrigin @@ -952,7 +952,7 @@ uUnfilledVars :: CtOrigin -> TcTyVar -> TcTyVarDetails -- Tyvar 1 -> TcTyVar -> TcTyVarDetails -- Tyvar 2 -> TcM TcCoercion --- Invarant: The type variables are distinct, +-- Invariant: The type variables are distinct, -- Neither is filled in yet uUnfilledVars origin swapped tv1 details1 tv2 details2 @@ -1205,9 +1205,9 @@ happy to have types of kind Constraint on either end of an arrow. Note [Kind variables can be untouchable] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We must use the careful function lookupTcTyVar to see if a kind -variable is filled or unifiable. It checks for touchablity, and kind +variable is filled or unifiable. It checks for touchability, and kind variables can certainly be untouchable --- for example the variable -might be bound outside an enclosing existental pattern match that +might be bound outside an enclosing existential pattern match that binds an inner kind variable, which we don't want to escape outside. This, or something closely related, was the cause of Trac #8985. diff --git a/compiler/Eta/TypeCheck/TcValidity.hs b/compiler/Eta/TypeCheck/TcValidity.hs index 0a56cd59..f7db8be4 100644 --- a/compiler/Eta/TypeCheck/TcValidity.hs +++ b/compiler/Eta/TypeCheck/TcValidity.hs @@ -122,7 +122,7 @@ to a Type, performing kind checking, and then check various things that should be true about it. We don't want to perform these checks at the same time as the initial translation because (a) they are unnecessary for interface-file types and (b) when checking a mutually recursive group of type and class decls, -we can't "look" at the tycons/classes yet. Also, the checks are are rather +we can't "look" at the tycons/classes yet. Also, the checks are rather diverse, and used to really mess up the other code. One thing we check for is 'rank'. @@ -439,7 +439,7 @@ expand S first, then T we get just which is fine. IMPORTANT: suppose T is a type synonym. Then we must do validity -checking on an appliation (T ty1 ty2) +checking on an application (T ty1 ty2) *either* before expansion (i.e. check ty1, ty2) *or* after expansion (i.e. expand T ty1 ty2, and then check) @@ -706,17 +706,17 @@ and fail. So in fact we use this as our *definition* of ambiguity. We use a very similar test for *inferred* types, to ensure that they are -unambiguous. See Note [Impedence matching] in TcBinds. +unambiguous. See Note [Impedance matching] in TcBinds. This test is very conveniently implemented by calling tcSubType -This neatly takes account of the functional dependecy stuff above, +This neatly takes account of the functional dependency stuff above, and implicit parameter (see Note [Implicit parameters and ambiguity]). What about this, though? g :: C [a] => Int Is every call to 'g' ambiguous? After all, we might have - intance C [a] where ... + instance C [a] where ... at the call site. So maybe that type is ok! Indeed even f's quintessentially ambiguous type might, just possibly be callable: with -XFlexibleInstances we could have @@ -725,7 +725,7 @@ and now a call could be legal after all! Well, we'll reject this unless the instance is available *here*. Side note: the ambiguity check is only used for *user* types, not for -types coming from inteface files. The latter can legitimately have +types coming from interface files. The latter can legitimately have ambiguous types. Example class S a where s :: a -> (Int,Int) @@ -743,7 +743,7 @@ Only a *class* predicate can give rise to ambiguity An *implicit parameter* cannot. For example: foo :: (?x :: [a]) => Int foo = length ?x -is fine. The call site will suppply a particular 'x' +is fine. The call site will supply a particular 'x' Furthermore, the type variables fixed by an implicit parameter propagate to the others. E.g. @@ -818,7 +818,7 @@ checkValidInstHead ctxt clas cls_args (instTypeErr clas cls_args abstract_class_msg) -- Check language restrictions; - -- but not for SPECIALISE isntance pragmas + -- but not for SPECIALISE instance pragmas ; let ty_args = dropWhile isKind cls_args ; unless spec_inst_prag $ do { checkTc (xopt LangExt.TypeSynonymInstances dflags || @@ -917,7 +917,7 @@ checkValidInstance ctxt hs_type ty = do { setSrcSpan head_loc (checkValidInstHead ctxt clas inst_tys) ; checkValidTheta ctxt theta - -- The Termination and Coverate Conditions + -- The Termination and Coverage Conditions -- Check that instance inference will terminate (if we care) -- For Haskell 98 this will already have been done by checkValidTheta, -- but as we may be using other extensions we need to check. @@ -952,7 +952,7 @@ checkValidInstance ctxt hs_type ty Note [Paterson conditions] ~~~~~~~~~~~~~~~~~~~~~~~~~~ Termination test: the so-called "Paterson conditions" (see Section 5 of -"Understanding functionsl dependencies via Constraint Handling Rules, +"Understanding functional dependencies via Constraint Handling Rules, JFP Jan 2007). We check that each assertion in the context satisfies: @@ -1048,11 +1048,11 @@ So we * Look at the tyvars a,x,b of the type family constructor T (it shares tyvars with the class C) - * Apply the mini-evnt to them, and check that the result is + * Apply the mini-event to them, and check that the result is consistent with the instance types [p] y Int -We do *not* assume (at this point) the the bound variables of -the assoicated type instance decl are the same as for the parent +We do *not* assume (at this point) the bound variables of +the associated type instance decl are the same as for the parent instance decl. So, for example, instance C [p] Int @@ -1121,7 +1121,7 @@ checkConsistentFamInst (Just (clas, mini_env)) fam_tc at_tvs at_tys Just subst | all_distinct subst -> return subst _ -> failWithTc $ wrongATArgErr at_ty inst_ty -- No need to instantiate here, because the axiom - -- uses the same type variables as the assocated class + -- uses the same type variables as the associated class | otherwise = return subst -- Allow non-type-variable instantiation -- See Note [Associated type instances] @@ -1327,7 +1327,7 @@ sizeTypes xs = sum (map sizeType tys) -- -- We are considering whether class constraints terminate. -- Equality constraints and constraints for the implicit --- parameter class always termiante so it is safe to say "size 0". +-- parameter class always terminate so it is safe to say "size 0". -- (Implicit parameter constraints always terminate because -- there are no instances for them---they are only solved by -- "local instances" in expressions). diff --git a/compiler/Eta/Types/CoAxiom.hs b/compiler/Eta/Types/CoAxiom.hs index 6fdd0e64..efdf90e0 100644 --- a/compiler/Eta/Types/CoAxiom.hs +++ b/compiler/Eta/Types/CoAxiom.hs @@ -340,7 +340,7 @@ enclosing class or instance. Consider type F Int [z] = ... -- Second param must be [z] In the CoAxBranch in the instance decl (F Int [z]) we use the -same 'z', so that it's easy to check that that type is the same +same 'z', so that it's easy to check that the type is the same as that in the instance header. Similarly in the CoAxBranch for the default decl for F in the diff --git a/compiler/Eta/Types/Coercion.hs b/compiler/Eta/Types/Coercion.hs index 97f13773..2b75ba6f 100644 --- a/compiler/Eta/Types/Coercion.hs +++ b/compiler/Eta/Types/Coercion.hs @@ -225,7 +225,7 @@ Note [Refl invariant] Coercions have the following invariant Refl is always lifted as far as possible. -You might think that a consequencs is: +You might think that a consequences is: Every identity coercions has Refl at the root But that's not quite true because of coercion variables. Consider @@ -589,7 +589,7 @@ tyCoVarsOfCosAcc (co:cos) fv_cand in_scope acc = tyCoVarsOfCosAcc [] fv_cand in_scope acc = emptyFV fv_cand in_scope acc coVarsOfCo :: Coercion -> VarSet --- Extract *coerction* variables only. Tiresome to repeat the code, but easy. +-- Extract *coercion* variables only. Tiresome to repeat the code, but easy. coVarsOfCo (Refl _ _) = emptyVarSet coVarsOfCo (TyConAppCo _ _ cos) = coVarsOfCos cos coVarsOfCo (AppCo co1 co2) = coVarsOfCo co1 `unionVarSet` coVarsOfCo co2 @@ -944,7 +944,7 @@ mkReflCo :: Role -> Type -> Coercion mkReflCo = Refl mkAxInstCo :: Role -> CoAxiom br -> BranchIndex -> [Type] -> Coercion --- mkAxInstCo can legitimately be called over-staturated; +-- mkAxInstCo can legitimately be called over-saturated; -- i.e. with more type arguments than the coercion requires mkAxInstCo role ax index tys | arity == n_tys = downgradeRole role ax_role $ AxiomInstCo ax_br index rtys @@ -1193,7 +1193,7 @@ applyRoles tc cos = zipWith applyRole (tyConRolesX Representational tc) cos -- the Role parameter is the Role of the TyConAppCo --- defined here because this is intimiately concerned with the implementation +-- defined here because this is intimately concerned with the implementation -- of TyConAppCo tyConRolesX :: Role -> TyCon -> [Role] tyConRolesX Representational tc = tyConRoles tc ++ repeat Nominal @@ -1707,7 +1707,7 @@ ty_co_subst subst role ty Note [liftCoSubstTyVar] ~~~~~~~~~~~~~~~~~~~~~~~ This function can fail (i.e., return Nothing) for two separate reasons: - 1) The variable is not in the substutition + 1) The variable is not in the substitution 2) The coercion found is of too low a role liftCoSubstTyVar is called from two places: in liftCoSubst (naturally), and diff --git a/compiler/Eta/Types/FamInstEnv.hs b/compiler/Eta/Types/FamInstEnv.hs index 25caf9f2..4692e680 100644 --- a/compiler/Eta/Types/FamInstEnv.hs +++ b/compiler/Eta/Types/FamInstEnv.hs @@ -272,10 +272,10 @@ A FamInstEnv maps a family name to the list of known instances for that family. The same FamInstEnv includes both 'data family' and 'type family' instances. Type families are reduced during type inference, but not data families; -the user explains when to use a data family instance by using contructors +the user explains when to use a data family instance by using constructors and pattern matching. -Neverthless it is still useful to have data families in the FamInstEnv: +Nevertheless it is still useful to have data families in the FamInstEnv: - For finding overlaps and conflicts @@ -301,7 +301,7 @@ Then we get a data type for each instance, and an axiom: axiom ax8 a :: T Bool [a] ~ TBoolList a These two axioms for T, one with one pattern, one with two. The reason -for this eta-reduction is decribed in TcInstDcls +for this eta-reduction is described in TcInstDcls Note [Eta reduction for data family axioms] -} @@ -789,7 +789,7 @@ reduceTyFamApp_maybe :: FamInstEnvs -- but *not* newtypes -- Works on type-synonym families always; data-families only if -- the role we seek is representational --- It does *not* normlise the type arguments first, so this may not +-- It does *not* normalise the type arguments first, so this may not -- go as far as you want. If you want normalised type arguments, -- use normaliseTcArgs first. -- @@ -951,7 +951,7 @@ normaliseType :: FamInstEnvs -- environment with family instances -- Normalise the input type, by eliminating *all* type-function redexes -- but *not* newtypes (which are visible to the programmer) -- Returns with Refl if nothing happens --- Try to not to disturb type syonyms if possible +-- Try to not to disturb type synonyms if possible normaliseType env role (TyConApp tc tys) = normaliseTcApp env role tc tys diff --git a/compiler/Eta/Types/InstEnv.hs b/compiler/Eta/Types/InstEnv.hs index 9d92197e..e9b138fd 100644 --- a/compiler/Eta/Types/InstEnv.hs +++ b/compiler/Eta/Types/InstEnv.hs @@ -438,10 +438,10 @@ deleteFromInstEnv inst_env ins_item@(ClsInst { is_cls_nm = cls_nm }) adjust (ClsIE items) = ClsIE (filterOut (identicalClsInstHead ins_item) items) identicalClsInstHead :: ClsInst -> ClsInst -> Bool --- ^ True when when the instance heads are the same +-- ^ True when the instance heads are the same -- e.g. both are Eq [(a,b)] -- Used for overriding in GHCi --- Obviously should be insenstive to alpha-renaming +-- Obviously should be insensitive to alpha-renaming identicalClsInstHead (ClsInst { is_cls_nm = cls_nm1, is_tcs = rough1, is_tvs = tvs1, is_tys = tys1 }) (ClsInst { is_cls_nm = cls_nm2, is_tcs = rough2, is_tvs = tvs2, is_tys = tys2 }) = cls_nm1 == cls_nm2 @@ -926,7 +926,7 @@ not incoherent, but we still want this to compile. Hence the "all-but-one-logic". The implementation is in insert_overlapping, where we remove matching -incoherent instances as long as there are are others. +incoherent instances as long as there are others. diff --git a/compiler/Eta/Types/Kind.hs b/compiler/Eta/Types/Kind.hs index 9625f0cd..9bcb734b 100644 --- a/compiler/Eta/Types/Kind.hs +++ b/compiler/Eta/Types/Kind.hs @@ -211,7 +211,7 @@ isKind k = isSuperKind (typeKind k) isSubKind :: Kind -> Kind -> Bool -- ^ @k1 \`isSubKind\` k2@ checks that @k1@ <: @k2@ -- Sub-kinding is extremely simple and does not look --- under arrrows or type constructors +-- under arrows or type constructors -- If you edit this function, you may need to update the GHC formalism -- See Note [GHC Formalism] in coreSyn/CoreLint.lhs @@ -248,7 +248,7 @@ isSubKindCon kc1 kc2 -- a sub-kind of OpenTypeKind. It must be a sub-kind of OpenTypeKind -- *after* the typechecker -- a) So that (Ord a -> Eq a) is a legal type --- b) So that the simplifer can generate (error (Eq a) "urk") +-- b) So that the simplifier can generate (error (Eq a) "urk") -- Moreover, after the type checker, Constraint and * -- are identical; see Note [Kind Constraint and kind *] -- diff --git a/compiler/Eta/Types/OptCoercion.hs b/compiler/Eta/Types/OptCoercion.hs index 0e645a7c..cce068be 100644 --- a/compiler/Eta/Types/OptCoercion.hs +++ b/compiler/Eta/Types/OptCoercion.hs @@ -40,7 +40,7 @@ import Control.Monad ( zipWithM ) Note [Subtle shadowing in coercions] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Supose we optimising a coercion +Suppose we optimising a coercion optCoercion (forall (co_X5:t1~t2). ...co_B1...) The co_X5 is a wild-card; the bound variable of a coercion for-all should never appear in the body of the forall. Indeed we often @@ -288,7 +288,7 @@ opt_univ env prov role oty1 oty2 , Just (l2, r2) <- splitAppTy_maybe oty2 , typeKind l1 `eqType` typeKind l2 -- kind(r1) == kind(r2) by consequence = let role' = if role == Phantom then Phantom else Nominal in - -- role' is to comform to mkAppCo's precondition + -- role' is to conform to mkAppCo's precondition mkAppCo (opt_univ env prov role l1 l2) (opt_univ env prov role' r1 r2) | Just (tv1, ty1) <- splitForAllTy_maybe oty1 diff --git a/compiler/Eta/Types/TyCon.hs b/compiler/Eta/Types/TyCon.hs index c5fb3f28..a0fa1e07 100644 --- a/compiler/Eta/Types/TyCon.hs +++ b/compiler/Eta/Types/TyCon.hs @@ -191,7 +191,7 @@ See also Note [Wrappers for data instance tycons] in MkId.lhs * The axiom ax_ti may be eta-reduced; see Note [Eta reduction for data family axioms] in TcInstDcls -* The data contructor T2 has a wrapper (which is what the +* The data constructor T2 has a wrapper (which is what the source-level "T2" invokes): $WT2 :: Bool -> T Int @@ -694,7 +694,7 @@ data TyConParent [Type] -- Argument types (mentions the tyConTyVars of this TyCon) -- Match in length the tyConTyVars of the family TyCon - -- E.g. data intance T [a] = ... + -- E.g. data instance T [a] = ... -- gives a representation tycon: -- data R:TList a = ... -- axiom co a :: T [a] ~ R:TList a @@ -1034,7 +1034,7 @@ So we compromise, and move their Kind calculation to the call site. -} -- | Given the name of the function type constructor and it's kind, create the --- corresponding 'TyCon'. It is reccomended to use 'TypeRep.funTyCon' if you want +-- corresponding 'TyCon'. It is recommended to use 'TypeRep.funTyCon' if you want -- this functionality mkFunTyCon :: Name -> Kind -> TyCon mkFunTyCon name kind @@ -1087,7 +1087,7 @@ mkClassTyCon :: Name -> Kind -> [TyVar] -> [Role] -> AlgTyConRhs -> Class mkClassTyCon name kind tyvars roles rhs clas is_rec = mkAlgTyCon name kind tyvars roles Nothing [] rhs (ClassTyCon clas) is_rec False - Nothing -- Class TyCons are not pormoted + Nothing -- Class TyCons are not promoted mkTupleTyCon :: Name -> Kind -- ^ Kind of the resulting 'TyCon' @@ -1427,8 +1427,8 @@ isDataFamilyTyCon :: TyCon -> Bool isDataFamilyTyCon (AlgTyCon {algTcRhs = DataFamilyTyCon {}}) = True isDataFamilyTyCon _ = False --- | Are we able to extract informationa 'TyVar' to class argument list --- mappping from a given 'TyCon'? +-- | Are we able to extract information 'TyVar' to class argument list +-- mapping from a given 'TyCon'? isTyConAssoc :: TyCon -> Bool isTyConAssoc tc = isJust (tyConAssoc_maybe tc) diff --git a/compiler/Eta/Types/Type.hs b/compiler/Eta/Types/Type.hs index eda2de82..74d28d5c 100644 --- a/compiler/Eta/Types/Type.hs +++ b/compiler/Eta/Types/Type.hs @@ -360,7 +360,7 @@ Note [Decomposing fat arrow c=>t] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Can we unify (a b) with (Eq a => ty)? If we do so, we end up with a partial application like ((=>) Eq a) which doesn't make sense in -source Haskell. In constrast, we *can* unify (a b) with (t1 -> t2). +source Haskell. In contrast, we *can* unify (a b) with (t1 -> t2). Here's an example (Trac #9858) of how you might do it: i :: (Typeable a, Typeable b) => Proxy (a b) -> TypeRep i p = typeRep p @@ -500,7 +500,7 @@ pprUserTypeErrorTy ty = | tyConName tc == typeErrorVAppendDataConName -> pprUserTypeErrorTy t1 $$ pprUserTypeErrorTy t2 - -- An uneavaluated type function + -- An unevaluated type function _ -> ppr ty @@ -940,7 +940,7 @@ splitForAllTy_maybe ty = splitFAT_m ty splitFAT_m _ = Nothing -- | Attempts to take a forall type apart, returning all the immediate such bound --- type variables and the remainder of the type. Always suceeds, even if that means +-- type variables and the remainder of the type. Always succeeds, even if that means -- returning an empty list of 'TyVar's splitForAllTys :: Type -> ([TyVar], Type) splitForAllTys ty = split ty ty [] diff --git a/compiler/Eta/Types/TypeRep.hs b/compiler/Eta/Types/TypeRep.hs index bd09e9fc..1782fb09 100644 --- a/compiler/Eta/Types/TypeRep.hs +++ b/compiler/Eta/Types/TypeRep.hs @@ -111,7 +111,7 @@ data Type | TyConApp -- See Note [AppTy invariant] TyCon [KindOrType] -- ^ Application of a 'TyCon', including newtypes /and/ synonyms. - -- Invariant: saturated appliations of 'FunTyCon' must + -- Invariant: saturated applications of 'FunTyCon' must -- use 'FunTy' and saturated synonyms must use their own -- constructors. However, /unsaturated/ 'FunTyCon's -- do appear as 'TyConApp's. @@ -380,7 +380,7 @@ as ATyCon. You can tell the difference, and get to the class, with The Class and its associated TyCon have the same Name. -} --- | A typecheckable-thing, essentially anything that has a name +-- | A typechecked-thing, essentially anything that has a name data TyThing = AnId Id | AConLike ConLike @@ -509,9 +509,9 @@ works just by setting the initial context precedence very high. Note [Precedence in types] ~~~~~~~~~~~~~~~~~~~~~~~~~~ We don't keep the fixity of type operators in the operator. So the pretty printer -operates the following precedene structre: +operates the following precedence structure: Type constructor application binds more tightly than - Oerator applications which bind more tightly than + Operator applications which bind more tightly than Function arrow So we might see a :+: T b -> c @@ -522,7 +522,7 @@ Maybe operator applications should bind a bit less tightly? Anyway, that's the current story, and it is used consistently for Type and HsType -} -data TyPrec -- See Note [Prededence in types] +data TyPrec -- See Note [Precedence in types] = TopPrec -- No parens | FunPrec -- Function args; no parens for tycon apps @@ -701,7 +701,7 @@ So I'm trying out this rule: print explicit foralls if b) Any of the quantified type variables has a kind that mentions a kind variable -This catches common situations, such as a type siguature +This catches common situations, such as a type signature f :: m a which means f :: forall k. forall (m :: k->*) (a :: k). m a @@ -787,7 +787,7 @@ pprTupleApp p pp tc tys tupleParens (tupleTyConSort tc) (sep (punctuate comma (map (pp TopPrec) tys))) pprTcApp_help :: TyPrec -> (TyPrec -> a -> SDoc) -> TyCon -> [a] -> DynFlags -> SDoc --- This one has accss to the DynFlags +-- This one has access to the DynFlags pprTcApp_help p pp tc tys dflags | not (isSymOcc (nameOccName (tyConName tc))) = pprPrefixApp p (ppr tc) (map (pp TyConPrec) tys_wo_kinds) diff --git a/compiler/Eta/Types/Unify.hs b/compiler/Eta/Types/Unify.hs index 858b3fbc..85e122b5 100644 --- a/compiler/Eta/Types/Unify.hs +++ b/compiler/Eta/Types/Unify.hs @@ -415,7 +415,7 @@ Note [Unifying with skolems] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If we discover that two types unify if and only if a skolem variable is substituted, we can't properly unify the types. But, that skolem variable -may later be instantiated with a unifyable type. So, we return maybeApart +may later be instantiated with a unifiable type. So, we return maybeApart in these cases. Note [Lists of different lengths are MaybeApart] @@ -505,7 +505,7 @@ To make it idempotent we don't want to get just We also want to substitute inside f's kind, to get [ k -> * , g -> H k (f:*) ] -If we don't do this, we may apply the substitition to something, +If we don't do this, we may apply the substitution to something, and get an ill-formed type, i.e. one where typeKind will fail. This happened, for example, in Trac #9106. diff --git a/compiler/Eta/Utils/Bag.hs b/compiler/Eta/Utils/Bag.hs index 699b1c2b..764503c2 100644 --- a/compiler/Eta/Utils/Bag.hs +++ b/compiler/Eta/Utils/Bag.hs @@ -278,7 +278,7 @@ mapAccumBagL f s (ListBag xs) = let (s', xs') = mapAccumL f s xs in (s', ListBag xs') mapAccumBagLM :: Monad m - => (acc -> x -> m (acc, y)) -- ^ combining funcction + => (acc -> x -> m (acc, y)) -- ^ combining function -> acc -- ^ initial state -> Bag x -- ^ inputs -> m (acc, Bag y) -- ^ final state, outputs diff --git a/compiler/Eta/Utils/Binary.hs b/compiler/Eta/Utils/Binary.hs index 116f855d..4e484a46 100644 --- a/compiler/Eta/Utils/Binary.hs +++ b/compiler/Eta/Utils/Binary.hs @@ -270,7 +270,7 @@ getByte :: BinHandle -> IO Word8 getByte = getWord8 -- ----------------------------------------------------------------------------- --- Primitve Word writes +-- Primitive Word writes instance Binary Word8 where put_ = putWord8 @@ -331,7 +331,7 @@ instance Binary Word64 where (fromIntegral w8)) -- ----------------------------------------------------------------------------- --- Primitve Int writes +-- Primitive Int writes instance Binary Int8 where put_ h w = put_ h (fromIntegral w :: Word8) @@ -463,7 +463,7 @@ instance Binary DiffTime where -- -- TODO This instance is not architecture portable. GMP stores numbers as -- arrays of machine sized words, so the byte format is not portable across --- architectures with different endianess and word size. +-- architectures with different endianness and word size. -- -- This makes it hard (impossible) to make an equivalent instance -- with code that is compilable with non-GHC. Do we need any instance @@ -788,7 +788,7 @@ lazyGet bh = do data UserData = UserData { - -- for *deserialising* only: + -- for *deserializing* only: ud_get_name :: BinHandle -> IO Name, ud_get_fs :: BinHandle -> IO FastString, diff --git a/compiler/Eta/Utils/BooleanFormula.hs b/compiler/Eta/Utils/BooleanFormula.hs index d3b1bd96..3db3588e 100644 --- a/compiler/Eta/Utils/BooleanFormula.hs +++ b/compiler/Eta/Utils/BooleanFormula.hs @@ -87,7 +87,7 @@ The smart constructors (`mkAnd` and `mkOr`) do some attempt to simplify expressi Implemented by mkAnd' / mkOr' 3. Conjunction with false, disjunction with true is simplified, i.e. `mkAnd [mkFalse,x]` becomes `mkFalse`. - 4. Common subexpresion elimination: + 4. Common subexpression elimination: `mkAnd [x,x,y]` is reduced to just `mkAnd [x,y]`. This simplification is not exhaustive, in the sense that it will not produce diff --git a/compiler/Eta/Utils/FastString.hs b/compiler/Eta/Utils/FastString.hs index 65aaebce..545e807b 100644 --- a/compiler/Eta/Utils/FastString.hs +++ b/compiler/Eta/Utils/FastString.hs @@ -19,7 +19,7 @@ -- -- * Just a wrapper for the @Addr#@ of a C string (@Ptr CChar@). -- * Practically no operations. --- * Outputing them is fast. +-- * Outputting them is fast. -- * Generated by 'sLit'. -- * Turn into 'Outputable.SDoc' with 'Outputable.ptext' -- @@ -68,7 +68,7 @@ module Eta.Utils.FastString consFS, nilFS, - -- ** Outputing + -- ** Outputting hPutFS, -- ** Internal diff --git a/compiler/Eta/Utils/FastTypes.hs b/compiler/Eta/Utils/FastTypes.hs index 03c7d5df..9b7e9916 100644 --- a/compiler/Eta/Utils/FastTypes.hs +++ b/compiler/Eta/Utils/FastTypes.hs @@ -69,7 +69,7 @@ type FastInt = Int# --in case it's a macro, don't lexically feed an argument! --e.g. #define _ILIT(x) (x#) , #define _ILIT(x) (x :: FastInt) _ILIT = \(I# x) -> x ---perhaps for accomodating caseless-leading-underscore treatment, +--perhaps for accommodating caseless-leading-underscore treatment, --something like _iLIT or iLIT would be better? iBox x = I# x diff --git a/compiler/Eta/Utils/GraphBase.hs b/compiler/Eta/Utils/GraphBase.hs index 3b54182a..a1b20dfa 100644 --- a/compiler/Eta/Utils/GraphBase.hs +++ b/compiler/Eta/Utils/GraphBase.hs @@ -85,7 +85,7 @@ data Node k cls color -- | Colors that cannot be used by this node. , nodeExclusions :: UniqSet color - -- | Colors that this node would prefer to be, in decending order. + -- | Colors that this node would prefer to be, in descending order. , nodePreference :: [color] -- | Neighbors that this node would like to be colored the same as. diff --git a/compiler/Eta/Utils/GraphColor.hs b/compiler/Eta/Utils/GraphColor.hs index bf8f58b1..710ccdea 100644 --- a/compiler/Eta/Utils/GraphColor.hs +++ b/compiler/Eta/Utils/GraphColor.hs @@ -27,7 +27,7 @@ import Data.List -- | Try to color a graph with this set of colors. -- Uses Chaitin's algorithm to color the graph. --- The graph is scanned for nodes which are deamed 'trivially colorable'. These nodes +-- The graph is scanned for nodes which are deemed 'trivially colorable'. These nodes -- are pushed onto a stack and removed from the graph. -- Once this process is complete the graph can be colored by removing nodes from -- the stack (ie in reverse order) and assigning them colors different to their neighbors. @@ -74,7 +74,7 @@ colorGraph iterative spinCount colors triv spill graph0 -- We need to apply all the coalescences found by the scanner to the original -- graph before doing assignColors. -- - -- Because we've got the whole, non-pruned graph here we turn on aggressive coalecing + -- Because we've got the whole, non-pruned graph here we turn on aggressive coalescing -- to force all the (conservative) coalescences found during scanning. -- (graph_scan_coalesced, _) @@ -114,7 +114,7 @@ colorGraph iterative spinCount colors triv spill graph0 -- | Scan through the conflict graph separating out trivially colorable and -- potentially uncolorable (problem) nodes. -- --- Checking whether a node is trivially colorable or not is a resonably expensive operation, +-- Checking whether a node is trivially colorable or not is a reasonably expensive operation, -- so after a triv node is found and removed from the graph it's no good to return to the 'start' -- of the graph and recheck a bunch of nodes that will probably still be non-trivially colorable. -- diff --git a/compiler/Eta/Utils/GraphOps.hs b/compiler/Eta/Utils/GraphOps.hs index 4d5e31da..ac66ebe9 100644 --- a/compiler/Eta/Utils/GraphOps.hs +++ b/compiler/Eta/Utils/GraphOps.hs @@ -231,8 +231,8 @@ addExclusions u getClass colors graph = foldr (addExclusion u getClass) graph colors --- | Add a coalescence edge to the graph, creating nodes if requried. --- It is considered adventageous to assign the same color to nodes in a coalesence. +-- | Add a coalescence edge to the graph, creating nodes if required. +-- It is considered advantageous to assign the same color to nodes in a coalescence. addCoalesce :: Uniquable k => (k, cls) -> (k, cls) @@ -470,7 +470,7 @@ freezeNode k = if elementOfUniqSet k (nodeCoalesce node) then node { nodeCoalesce = delOneFromUniqSet (nodeCoalesce node) k } else node -- panic "GraphOps.freezeNode: edge to freeze wasn't in the coalesce set" - -- If the edge isn't actually in the coelesce set then just ignore it. + -- If the edge isn't actually in the coalesce set then just ignore it. fm2 = nonDetFoldUniqSet (adjustUFM_C (freezeEdge k)) fm1 -- It's OK to use nonDetFoldUFM here because the operation @@ -567,7 +567,7 @@ validateGraph doc isColored graph , badEdges <- minusUniqSet edges nodes , not $ isEmptyUniqSet badEdges = pprPanic "GraphOps.validateGraph" - ( text "Graph has edges that point to non-existent nodes" + ( text "Graph has edges that point to nonexistent nodes" $$ text " bad edges: " <> pprUFM (getUniqSet badEdges) (vcat . map ppr) $$ doc ) diff --git a/compiler/Eta/Utils/IOEnv.hs b/compiler/Eta/Utils/IOEnv.hs index 0a5012b1..db45be1e 100644 --- a/compiler/Eta/Utils/IOEnv.hs +++ b/compiler/Eta/Utils/IOEnv.hs @@ -122,7 +122,7 @@ instance ContainsModule env => HasModule (IOEnv env) where return $ extractModule env ---------------------------------------------------------------------- --- Fundmantal combinators specific to the monad +-- Fundamental combinators specific to the monad ---------------------------------------------------------------------- diff --git a/compiler/Eta/Utils/JAR.hs b/compiler/Eta/Utils/JAR.hs index c9b209b1..100da6a0 100644 --- a/compiler/Eta/Utils/JAR.hs +++ b/compiler/Eta/Utils/JAR.hs @@ -16,8 +16,8 @@ been created, files can be added to it using the `addByteStringToJar` and When adding multiple files to a Jar, be sure to use the `addMultiByteStringsToJar` function, as it writes all of the file changes in one action, while mapping over a list of files with `addByteStringToJar` would -perform the file write actions all seperately. -Here is a quick exampe of how to create a Jar and add a file into it. +perform the file write actions all separately. +Here is a quick example of how to create a Jar and add a file into it. @ -- Create the empty jar let jarLocation = "build/Hello.jar" diff --git a/compiler/Eta/Utils/MonadUtils.hs b/compiler/Eta/Utils/MonadUtils.hs index 02ddf5ff..f316d7b2 100644 --- a/compiler/Eta/Utils/MonadUtils.hs +++ b/compiler/Eta/Utils/MonadUtils.hs @@ -104,7 +104,7 @@ mapAndUnzip4M f (x:xs) = do -- | Monadic version of mapAccumL mapAccumLM :: Monad m - => (acc -> x -> m (acc, y)) -- ^ combining funcction + => (acc -> x -> m (acc, y)) -- ^ combining function -> acc -- ^ initial state -> [x] -- ^ inputs -> m (acc, [y]) -- ^ final state, outputs diff --git a/compiler/Eta/Utils/Outputable.hs b/compiler/Eta/Utils/Outputable.hs index 32bea809..0bf219be 100644 --- a/compiler/Eta/Utils/Outputable.hs +++ b/compiler/Eta/Utils/Outputable.hs @@ -36,7 +36,7 @@ module Eta.Utils.Outputable ( colored, keyword, - -- * Converting 'SDoc' into strings and outputing it + -- * Converting 'SDoc' into strings and outputting it printForC, printForAsm, printForUser, printForUserColored, printForUserPartWay, pprCode, mkCodeStyle, showSDoc, showSDocUnsafe, showSDocOneLine, @@ -161,7 +161,7 @@ data PrintUnqualified = QueryQualify { -- | given an /original/ name, this function tells you which module -- name it should be qualified with when printing for the user, if -- any. For example, given @Control.Exception.catch@, which is in scope --- as @Exception.catch@, this fuction will return @Just "Exception"@. +-- as @Exception.catch@, this function will return @Just "Exception"@. -- Note that the return value is a ModuleName, not a Module, because -- in source code, names are qualified by ModuleNames. type QueryQualifyName = Module -> OccName -> QualifyName @@ -633,7 +633,7 @@ fsep :: [SDoc] -> SDoc -- ^ A paragraph-fill combinator. It's much like sep, only it -- keeps fitting things on one line until it can't fit any more. fcat :: [SDoc] -> SDoc --- ^ This behaves like 'fsep', but it uses '<>' for horizontal conposition rather than '<+>' +-- ^ This behaves like 'fsep', but it uses '<>' for horizontal composition rather than '<+>' hcat ds = SDoc $ \sty -> Pretty.hcat [runSDoc d sty | d <- ds] @@ -1084,7 +1084,7 @@ warnPprTrace True file line msg x heading = hsep [text "WARNING: file", text file <> comma, text "line", int line] assertPprPanic :: String -> Int -> SDoc -> a --- ^ Panic with an assertation failure, recording the given file and line number. +-- ^ Panic with an assertion failure, recording the given file and line number. -- Should typically be accessed with the ASSERT family of macros assertPprPanic file line msg = pprPanic "ASSERT failed!" doc diff --git a/compiler/Eta/Utils/Platform.hs b/compiler/Eta/Utils/Platform.hs index 5ce9af41..4721c0e4 100644 --- a/compiler/Eta/Utils/Platform.hs +++ b/compiler/Eta/Utils/Platform.hs @@ -28,7 +28,7 @@ data Platform -- Word size in bytes (i.e. normally 4 or 8, -- for 32bit and 64bit platforms respectively) platformWordSize :: {-# UNPACK #-} !Int, - platformUnregisterised :: Bool, + platformUnregistered :: Bool, platformHasGnuNonexecStack :: Bool, platformHasIdentDirective :: Bool, platformHasSubsectionsViaSymbols :: Bool, diff --git a/compiler/Eta/Utils/Pretty.hs b/compiler/Eta/Utils/Pretty.hs index 45a85f7d..f0fbc163 100644 --- a/compiler/Eta/Utils/Pretty.hs +++ b/compiler/Eta/Utils/Pretty.hs @@ -21,7 +21,7 @@ Version 3.0 28 May 1997 quadratic behaviour with left-associated (++) chains. This is really bad news. One thing a pretty-printer abstraction should - certainly guarantee is insensivity to associativity. It matters: suddenly + certainly guarantee is insensitivity to associativity. It matters: suddenly GHC's compilation times went up by a factor of 100 when I switched to the new pretty printer. @@ -101,8 +101,8 @@ Relative to John's original paper, there are the following new features: sep (separate) is either like hsep or like vcat, depending on what fits - cat is behaves like sep, but it uses <> for horizontal conposition - fcat is behaves like fsep, but it uses <> for horizontal conposition + cat is behaves like sep, but it uses <> for horizontal composition + fcat is behaves like fsep, but it uses <> for horizontal composition These new ones do the obvious things: char, semi, comma, colon, space, @@ -335,7 +335,7 @@ Laws for oneLiner \end{verbatim} -You might think that the following verion of would +You might think that the following version of would be neater: \begin{verbatim} <3 NO> (text s <> x) $$ y = text s <> ((empty <> x)) $$ @@ -448,7 +448,7 @@ The argument of @NilAbove@ is never @Empty@. Therefore a @NilAbove@ occupies at least two lines. \item -The arugment of @TextBeside@ is never @Nest@. +The argument of @TextBeside@ is never @Nest@. \item The layouts of the two arguments of @Union@ both flatten to the same string. @@ -579,7 +579,7 @@ above p@(Beside _ _ _) g q = aboveNest (reduceDoc p) g (_ILIT(0)) (reduceDoc q above p g q = aboveNest p g (_ILIT(0)) (reduceDoc q) aboveNest :: RDoc -> Bool -> FastInt -> RDoc -> RDoc --- Specfication: aboveNest p g k q = p $g$ (nest k q) +-- Specification: aboveNest p g k q = p $g$ (nest k q) aboveNest NoDoc _ _ _ = NoDoc aboveNest (p1 `Union` p2) g k q = aboveNest p1 g k q `union_` diff --git a/compiler/Eta/Utils/UnVarGraph.hs b/compiler/Eta/Utils/UnVarGraph.hs index 39af1585..4ec557ec 100644 --- a/compiler/Eta/Utils/UnVarGraph.hs +++ b/compiler/Eta/Utils/UnVarGraph.hs @@ -8,7 +8,7 @@ A data structure for undirected graphs of variables This is very specifically tailored for the use in CallArity. In particular it stores the graph as a union of complete and complete bipartite graph, which -would be very expensive to store as sets of edges or as adjanceny lists. +would be very expensive to store as sets of edges or as adjacent lists. It does not normalize the graphs. This means that g `unionUnVarGraph` g is equal to g, but twice as expensive and large. @@ -42,7 +42,7 @@ import qualified Data.IntSet as S -- at hand, and we do not have that when we turn the domain of a VarEnv into a UnVarSet. -- Therefore, use a IntSet directly (which is likely also a bit more efficient). --- Set of uniques, i.e. for adjancet nodes +-- Set of uniques, i.e. for adjacent nodes newtype UnVarSet = UnVarSet (S.IntSet) deriving Eq diff --git a/compiler/Eta/Utils/UniqDFM.hs b/compiler/Eta/Utils/UniqDFM.hs index 969299bb..7b5a79e0 100644 --- a/compiler/Eta/Utils/UniqDFM.hs +++ b/compiler/Eta/Utils/UniqDFM.hs @@ -375,7 +375,7 @@ instance Monoid (UniqDFM a) where mempty = emptyUDFM mappend = plusUDFM --- This should not be used in commited code, provided for convenience to +-- This should not be used in committed code, provided for convenience to -- make ad-hoc conversions when developing alwaysUnsafeUfmToUdfm :: UniqFM elt -> UniqDFM elt alwaysUnsafeUfmToUdfm = listToUDFM_Directly . nonDetUFMToList diff --git a/compiler/Eta/Utils/UniqFM.hs b/compiler/Eta/Utils/UniqFM.hs index 8785e3e5..e5098b9e 100644 --- a/compiler/Eta/Utils/UniqFM.hs +++ b/compiler/Eta/Utils/UniqFM.hs @@ -194,7 +194,7 @@ plusUFM_C :: (elt -> elt -> elt) -> UniqFM elt -> UniqFM elt -> UniqFM elt plusUFM_C f (UFM x) (UFM y) = UFM (M.unionWith f x y) -- | `plusUFM_CD f m1 d1 m2 d2` merges the maps using `f` as the --- combinding function and `d1` resp. `d2` as the default value if +-- combining function and `d1` resp. `d2` as the default value if -- there is no entry in `m1` reps. `m2`. The domain is the union of -- the domains of `m1` and `m2`. -- diff --git a/compiler/Eta/Utils/Util.hs b/compiler/Eta/Utils/Util.hs index 3aef69bb..ca3096c4 100644 --- a/compiler/Eta/Utils/Util.hs +++ b/compiler/Eta/Utils/Util.hs @@ -293,7 +293,7 @@ splitEithers (e : es) = case e of where (xs,ys) = splitEithers es chkAppend :: [a] -> [a] -> [a] --- Checks for the second arguemnt being empty +-- Checks for the second argument being empty -- Used in situations where that situation is common chkAppend xs ys | null ys = xs @@ -637,7 +637,7 @@ splitAtList (_:xs) (y:ys) = (y:ys', ys'') -- drop from the end of a list dropTail :: Int -> [a] -> [a] -- Specification: dropTail n = reverse . drop n . reverse --- Better implemention due to Joachim Breitner +-- Better implementation due to Joachim Breitner -- http://www.joachim-breitner.de/blog/archives/600-On-taking-the-last-n-elements-of-a-list.html dropTail n xs = go (drop n xs) xs @@ -834,16 +834,16 @@ fuzzyMatch key vals = fuzzyLookup key [(v,v) | v <- vals] -- | Search for possible matches to the users input in the given list, -- returning a small number of ranked results fuzzyLookup :: String -> [(String,a)] -> [a] -fuzzyLookup user_entered possibilites +fuzzyLookup user_entered possibilities = map fst $ take mAX_RESULTS $ sortBy (comparing snd) - [ (poss_val, distance) | (poss_str, poss_val) <- possibilites + [ (poss_val, distance) | (poss_str, poss_val) <- possibilities , let distance = restrictedDamerauLevenshteinDistance poss_str user_entered , distance <= fuzzy_threshold ] where - -- Work out an approriate match threshold: + -- Work out an appropriate match threshold: -- We report a candidate if its edit distance is <= the threshold, - -- The threshhold is set to about a quarter of the # of characters the user entered + -- The threshold is set to about a quarter of the # of characters the user entered -- Length Threshold -- 1 0 -- Don't suggest *any* candidates -- 2 1 -- for single-char identifiers diff --git a/docker/README.md b/docker/README.md index 62044312..19c5a801 100644 --- a/docker/README.md +++ b/docker/README.md @@ -11,7 +11,7 @@ By default the target repository will be inferred from the GIT repository name, This behavior can be changed by setting `DOCKER_REPO` env variable to override the default setting. For example, setting `DOCKER_REPO` to `quay.io/jdoe/myrepo` will result pushing to that docker repository. ## Results and tags -The contaner is pushed as: +The container is pushed as: - `:` - is pushed always - `:` - is pushed if the commit that is being built is tagged diff --git a/docs/0-user-guides/0-eta-user-guide/4-functions/3-application.md b/docs/0-user-guides/0-eta-user-guide/4-functions/3-application.md index 31b53d8c..5bc1faf1 100644 --- a/docs/0-user-guides/0-eta-user-guide/4-functions/3-application.md +++ b/docs/0-user-guides/0-eta-user-guide/4-functions/3-application.md @@ -48,4 +48,4 @@ Note that we were able to define `applyOne` solely by composing existing functio ## Next Section -We will explore even futher methods of re-use by exploring higher-order functions. +We will explore even further methods of re-use by exploring higher-order functions. diff --git a/docs/0-user-guides/1-etlas-user-guide/2-basics/1-the-build-process.md b/docs/0-user-guides/1-etlas-user-guide/2-basics/1-the-build-process.md index c394489b..7dd83708 100644 --- a/docs/0-user-guides/1-etlas-user-guide/2-basics/1-the-build-process.md +++ b/docs/0-user-guides/1-etlas-user-guide/2-basics/1-the-build-process.md @@ -4,7 +4,7 @@ In the last section, we ran the `etlas run` command which runs the `etlas build` command for you and executes your program's launcher script as a final step. In this section, we will cover in detail what happens when you build a project. -## Teminology +## Terminology A **project** is a group of packages that may have inter-dependencies. diff --git a/docs/0-user-guides/1-etlas-user-guide/3-configuration/1-dependency-management.md b/docs/0-user-guides/1-etlas-user-guide/3-configuration/1-dependency-management.md index 053d29db..8083c08c 100644 --- a/docs/0-user-guides/1-etlas-user-guide/3-configuration/1-dependency-management.md +++ b/docs/0-user-guides/1-etlas-user-guide/3-configuration/1-dependency-management.md @@ -60,7 +60,7 @@ source-repository-package commit: acbbe10b68f22f8f3f8ac21c82f12bb811a2fa7e ``` -If the package `eta-spark-core` with the exact same version as in the commit specified can be found in Hacakge or Etlas Index, the GitHub location will override the other search locations. This makes it easy to depend on forked versions of existing libraries when the released versions of those libraries don't contain the functionality required for your project. +If the package `eta-spark-core` with the exact same version as in the commit specified can be found in Hackage or Etlas Index, the GitHub location will override the other search locations. This makes it easy to depend on forked versions of existing libraries when the released versions of those libraries don't contain the functionality required for your project. ## JVM Dependency diff --git a/docs/0-user-guides/2-gradle-user-guide/0-introduction/0-what-is-gradle.md b/docs/0-user-guides/2-gradle-user-guide/0-introduction/0-what-is-gradle.md index 1b67478c..b3ada123 100644 --- a/docs/0-user-guides/2-gradle-user-guide/0-introduction/0-what-is-gradle.md +++ b/docs/0-user-guides/2-gradle-user-guide/0-introduction/0-what-is-gradle.md @@ -8,7 +8,7 @@ Gradle is an advanced build automation system that uses a [Groovy DSL](https://d We chose to add full support for Eta in Gradle through the [Eta Gradle Plugin](https://github.com/typelead/gradle-eta) for a few reasons we outline below: -- **Extensiblility**: Gradle is very extensible and allows you to specify concise configuration for highly complex builds. +- **Extensibility**: Gradle is very extensible and allows you to specify concise configuration for highly complex builds. - **Reproducibility**: Gradle allows you to generate a [wrapper script](https://docs.gradle.org/current/userguide/gradle_wrapper.html) that will let anyone build your project without having Gradle installed before hand, ensuring that your project can build on every configuration reliably. - **JVM Support**: Gradle makes it very easy to setup polyglot JVM language projects allowing Eta to be easily plugged in to any JVM setup. - **Caching**: Gradle offers [build caching](https://docs.gradle.org/current/userguide/build_cache.html) which will cache a build based on the inputs and outputs. This works great for organizations which can setup a remote build cache to save time in building large internal projects. diff --git a/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/0-plugin-management.md b/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/0-plugin-management.md index 02791467..c13d4e60 100644 --- a/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/0-plugin-management.md +++ b/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/0-plugin-management.md @@ -19,7 +19,7 @@ There are three plugins which the Eta Gradle plugin provides: ### Eta Base Plugin -This plugin is used for configuring your Eta and Etlas versions and other global parametrs for all the projects in your build. It will activate the `eta` extension block that will allow you to do so. +This plugin is used for configuring your Eta and Etlas versions and other global parameters for all the projects in your build. It will activate the `eta` extension block that will allow you to do so. **NOTE:** Both the Eta Plugin and the Eta Android Plugin apply the Eta Base Plugin by default, so if you apply either one, it is not required to apply this one! diff --git a/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/2-dependency-management.md b/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/2-dependency-management.md index 0f0783a4..3ccffd49 100644 --- a/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/2-dependency-management.md +++ b/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/2-dependency-management.md @@ -30,13 +30,13 @@ dependencies { | Notation | Description | | -------- | -------------------------------------- | | `[a,b]` | Matches all versions greater than or equal to `a` and less than or equal to `b` | -| `[a,b[` | Matches all versions greater than or equal to `a` and less than than `b` | +| `[a,b[` | Matches all versions greater than or equal to `a` and less than `b` | | `]a,b]` | Matches all versions greater than `a` and less than or equal to `b` | -| `]a,b[` | Matches all versions greater than `a` and less than than `b` | +| `]a,b[` | Matches all versions greater than `a` and less than `b` | | `[a,)` | Matches all versions greater than or equal to `a` | | `]a,)` | Matches all versions greater than `a` | | `(,b]` | Matches all versions less than or equal to `b` | -| `(,b[` | Matches all versions less than than `b` | +| `(,b[` | Matches all versions less than `b` | **Example** diff --git a/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/3-compiler-configuration.md b/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/3-compiler-configuration.md index 05af5662..fbdf7718 100644 --- a/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/3-compiler-configuration.md +++ b/docs/0-user-guides/2-gradle-user-guide/2-advanced-configuration/3-compiler-configuration.md @@ -18,7 +18,7 @@ The following table lists the properties that are available for the `options` pr ## language -This specifies the language to use for the build. A language consists of a set of predefined language extensions are are defined by the compiler. Currently, there are only two values you can put for this field: +This specifies the language to use for the build. A language consists of a set of predefined language extensions are defined by the compiler. Currently, there are only two values you can put for this field: - `'Haskell98'` - `'Haskell2010'` diff --git a/docs/0-user-guides/3-eta-repl-user-guide/0-introduction/0-what-is-eta-repl.md b/docs/0-user-guides/3-eta-repl-user-guide/0-introduction/0-what-is-eta-repl.md index 5617b555..35029d0f 100644 --- a/docs/0-user-guides/3-eta-repl-user-guide/0-introduction/0-what-is-eta-repl.md +++ b/docs/0-user-guides/3-eta-repl-user-guide/0-introduction/0-what-is-eta-repl.md @@ -15,7 +15,7 @@ The Eta REPL is based on Haskell's [GHCi](https://downloads.haskell.org/~ghc/8.4 ## Motivation -Interacting with your programs in realtime is a great way to quickly solidfy your understanding of how your application is pieced together and also quickly learn about how new libraries work. +Interacting with your programs in realtime is a great way to quickly solidify your understanding of how your application is pieced together and also quickly learn about how new libraries work. ## Next Section diff --git a/docs/0-user-guides/4-sbt-user-guide/0-introduction/0-what-is-sbt.md b/docs/0-user-guides/4-sbt-user-guide/0-introduction/0-what-is-sbt.md index 1b67478c..b3ada123 100644 --- a/docs/0-user-guides/4-sbt-user-guide/0-introduction/0-what-is-sbt.md +++ b/docs/0-user-guides/4-sbt-user-guide/0-introduction/0-what-is-sbt.md @@ -8,7 +8,7 @@ Gradle is an advanced build automation system that uses a [Groovy DSL](https://d We chose to add full support for Eta in Gradle through the [Eta Gradle Plugin](https://github.com/typelead/gradle-eta) for a few reasons we outline below: -- **Extensiblility**: Gradle is very extensible and allows you to specify concise configuration for highly complex builds. +- **Extensibility**: Gradle is very extensible and allows you to specify concise configuration for highly complex builds. - **Reproducibility**: Gradle allows you to generate a [wrapper script](https://docs.gradle.org/current/userguide/gradle_wrapper.html) that will let anyone build your project without having Gradle installed before hand, ensuring that your project can build on every configuration reliably. - **JVM Support**: Gradle makes it very easy to setup polyglot JVM language projects allowing Eta to be easily plugged in to any JVM setup. - **Caching**: Gradle offers [build caching](https://docs.gradle.org/current/userguide/build_cache.html) which will cache a build based on the inputs and outputs. This works great for organizations which can setup a remote build cache to save time in building large internal projects. diff --git a/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/2-dependency-management.md b/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/2-dependency-management.md index 6a3ee5cd..fe04ee5c 100644 --- a/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/2-dependency-management.md +++ b/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/2-dependency-management.md @@ -54,13 +54,13 @@ libraryDependencies in EtaLib += eta("array", "1.2.3.0") | Notation | Description | | -------- | -------------------------------------- | | `[a,b]` | Matches all versions greater than or equal to `a` and less than or equal to `b` | -| `[a,b[` | Matches all versions greater than or equal to `a` and less than than `b` | +| `[a,b[` | Matches all versions greater than or equal to `a` and less than `b` | | `]a,b]` | Matches all versions greater than `a` and less than or equal to `b` | -| `]a,b[` | Matches all versions greater than `a` and less than than `b` | +| `]a,b[` | Matches all versions greater than `a` and less than `b` | | `[a,)` | Matches all versions greater than or equal to `a` | | `]a,)` | Matches all versions greater than `a` | | `(,b]` | Matches all versions less than or equal to `b` | -| `(,b[` | Matches all versions less than than `b` | +| `(,b[` | Matches all versions less than `b` | **Example** diff --git a/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/3-compiler-configuration.md b/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/3-compiler-configuration.md index 45919d2c..0fd977da 100644 --- a/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/3-compiler-configuration.md +++ b/docs/0-user-guides/4-sbt-user-guide/2-advanced-configuration/3-compiler-configuration.md @@ -18,7 +18,7 @@ The following table lists the settings that are available for the `Eta[Type]` co ## language -This specifies the language to use for the build. A language consists of a set of predefined language extensions are are defined by the compiler. Currently, there are only two values you can put for this field: +This specifies the language to use for the build. A language consists of a set of predefined language extensions are defined by the compiler. Currently, there are only two values you can put for this field: - `"Haskell98"` - `"Haskell2010"` diff --git a/eta/Eta/REPL/UI.hs b/eta/Eta/REPL/UI.hs index ca8fc11d..cbb9df74 100644 --- a/eta/Eta/REPL/UI.hs +++ b/eta/Eta/REPL/UI.hs @@ -722,8 +722,8 @@ runGHCiInput f = do nextInputLine :: Bool -> Bool -> InputT GHCi (Maybe String) nextInputLine show_prompt is_tty | is_tty = do - prmpt <- if show_prompt then lift mkPrompt else return "" - r <- getInputLine prmpt + prompt <- if show_prompt then lift mkPrompt else return "" + r <- getInputLine prompt incrementLineNo return r | otherwise = do @@ -1193,7 +1193,7 @@ runStmt stmt step = do run_stmt stmt' = do -- In the new IO library, read handles buffer data even if the Handle -- is set to NoBuffering. This causes problems for GHCi where there - -- are really two stdin Handles. So we flush any bufferred data in + -- are really two stdin Handles. So we flush any buffered data in -- GHCi's stdin Handle here (only relevant if stdin is attached to -- a file, otherwise the read buffer can't be flushed). _ <- liftIO $ tryIO $ hFlushAll stdin @@ -1548,10 +1548,10 @@ editFile str = -- or otherwise the first target. -- -- XXX: Can we figure out what happened if the depndecy analysis fails --- (e.g., because the porgrammeer mistyped the name of a module)? +-- (e.g., because the programmeer mistyped the name of a module)? -- XXX: Can we figure out the location of an error to pass to the editor? --- XXX: if we could figure out the list of errors that occured during the --- last load/reaload, then we could start the editor focused on the first +-- XXX: if we could figure out the list of errors that occurred during the +-- last load/reload, then we could start the editor focused on the first -- of those. chooseEditFile :: GHCi String chooseEditFile = diff --git a/eta/Main.hs b/eta/Main.hs index 95ed54a7..af172955 100644 --- a/eta/Main.hs +++ b/eta/Main.hs @@ -150,7 +150,7 @@ main' postLoadMode dflags0 args flagWarnings = do } -- turn on -fimplicit-import-qualified for GHCi now, so that it - -- can be overriden from the command-line + -- can be overridden from the command-line -- XXX: this should really be in the interactive DynFlags, but -- we don't set that until later in interactiveUI dflags3 | DoInteractive <- postLoadMode = imp_qual_enabled @@ -179,7 +179,7 @@ main' postLoadMode dflags0 args flagWarnings = do -- into forward slashes. normal_fileish_paths = map (normalise . unLoc) fileish_args -- TODO: Clean this up - (srcs', objs) = parititionArgs normal_fileish_paths [] [] + (srcs', objs) = partitionArgs normal_fileish_paths [] [] srcs = srcs' ++ map (\o -> (o, Nothing)) objs dflags5 = dflags4 @@ -236,19 +236,19 @@ etaReplUI = interactiveUI defaultEtaReplSettings -- interpret the -x option, and attach a (Maybe Phase) to each source -- file indicating the phase specified by the -x option in force, if any. -parititionArgs :: [String] -> [(String, Maybe Phase)] -> [String] +partitionArgs :: [String] -> [(String, Maybe Phase)] -> [String] -> ([(String, Maybe Phase)], [String]) -parititionArgs [] srcs objs = (reverse srcs, reverse objs) -parititionArgs ("-x":suff:args) srcs objs - | "none" <- suff = parititionArgs args srcs objs - | StopLn <- phase = parititionArgs args srcs (slurp ++ objs) - | otherwise = parititionArgs rest (these_srcs ++ srcs) objs +partitionArgs [] srcs objs = (reverse srcs, reverse objs) +partitionArgs ("-x":suff:args) srcs objs + | "none" <- suff = partitionArgs args srcs objs + | StopLn <- phase = partitionArgs args srcs (slurp ++ objs) + | otherwise = partitionArgs rest (these_srcs ++ srcs) objs where phase = startPhase suff (slurp,rest) = break (== "-x") args these_srcs = zip slurp (repeat (Just phase)) -parititionArgs (arg:args) srcs objs - | looksLikeAnInput arg = parititionArgs args ((arg,Nothing):srcs) objs - | otherwise = parititionArgs args srcs (arg:objs) +partitionArgs (arg:args) srcs objs + | looksLikeAnInput arg = partitionArgs args ((arg,Nothing):srcs) objs + | otherwise = partitionArgs args srcs (arg:objs) {- We split out the object files (.o, .dll) and add them @@ -524,7 +524,7 @@ modeFlags = "Object splitting supported", "Have native code generator", "Support SMP", - "Unregisterised", + "Unregistered", "Tables next to code", "RTS ways", "Leading underscore", @@ -797,7 +797,7 @@ options may be necessary in order to find the .hi files. This is used by Cabal for generating the InstalledPackageId for a package. The InstalledPackageId must change when the visible ABI of -the package chagnes, so during registration Cabal calls ghc --abi-hash +the package changes, so during registration Cabal calls ghc --abi-hash to get a hash of the package's ABI. -} diff --git a/libraries/base/Control/Concurrent.hs b/libraries/base/Control/Concurrent.hs index 3c823977..a9035cbc 100644 --- a/libraries/base/Control/Concurrent.hs +++ b/libraries/base/Control/Concurrent.hs @@ -537,7 +537,7 @@ threadWaitWriteSTM fd = GHC.Conc.threadWaitWriteSTM fd GHC implements pre-emptive multitasking: the execution of threads are interleaved in a random fashion. More specifically, - a thread may be pre-empted whenever it allocates some memory, + a thread may be preempted whenever it allocates some memory, which unfortunately means that tight loops which do no allocation tend to lock out other threads (this only seems to happen with pathological benchmark-style code, however). @@ -545,7 +545,7 @@ threadWaitWriteSTM fd = GHC.Conc.threadWaitWriteSTM fd The rescheduling timer runs on a 20ms granularity by default, but this may be altered using the @-i\@ RTS option. After a rescheduling - \"tick\" the running thread is pre-empted as soon as + \"tick\" the running thread is preempted as soon as possible. One final note: the diff --git a/libraries/base/Control/Concurrent/QSem.hs b/libraries/base/Control/Concurrent/QSem.hs index bae31057..e164920a 100644 --- a/libraries/base/Control/Concurrent/QSem.hs +++ b/libraries/base/Control/Concurrent/QSem.hs @@ -29,7 +29,7 @@ import Control.Concurrent.MVar ( MVar, newEmptyMVar, takeMVar, tryTakeMVar import Control.Exception import Data.Maybe --- | 'QSem' is a quantity semaphore in which the resource is aqcuired +-- | 'QSem' is a quantity semaphore in which the resource is acquired -- and released in units of one. It provides guaranteed FIFO ordering -- for satisfying blocked `waitQSem` calls. -- diff --git a/libraries/base/Control/Concurrent/QSemN.hs b/libraries/base/Control/Concurrent/QSemN.hs index 7043ff06..7d3b404d 100644 --- a/libraries/base/Control/Concurrent/QSemN.hs +++ b/libraries/base/Control/Concurrent/QSemN.hs @@ -31,7 +31,7 @@ import Control.Concurrent.MVar ( MVar, newEmptyMVar, takeMVar, tryTakeMVar import Control.Exception import Data.Maybe --- | 'QSemN' is a quantity semaphore in which the resource is aqcuired +-- | 'QSemN' is a quantity semaphore in which the resource is acquired -- and released in units of one. It provides guaranteed FIFO ordering -- for satisfying blocked `waitQSemN` calls. -- diff --git a/libraries/base/Control/Exception/Base.hs b/libraries/base/Control/Exception/Base.hs index 462504cb..82ceee29 100644 --- a/libraries/base/Control/Exception/Base.hs +++ b/libraries/base/Control/Exception/Base.hs @@ -167,7 +167,7 @@ mapException f v = unsafePerformIO (catch (evaluate v) -- | Similar to 'catch', but returns an 'Either' result which is -- @('Right' a)@ if no exception of type @e@ was raised, or @('Left' ex)@ -- if an exception of type @e@ was raised and its value is @ex@. --- If any other type of exception is raised than it will be propogated +-- If any other type of exception is raised than it will be propagated -- up to the next enclosing exception handler. -- -- > try a = catch (Right `liftM` a) (return . Left) diff --git a/libraries/base/Data/Maybe.hs b/libraries/base/Data/Maybe.hs index e81cdf7a..b8808b1f 100644 --- a/libraries/base/Data/Maybe.hs +++ b/libraries/base/Data/Maybe.hs @@ -147,7 +147,7 @@ fromJust :: Maybe a -> a fromJust Nothing = errorWithoutStackTrace "Maybe.fromJust: Nothing" -- yuck fromJust (Just x) = x --- | The 'fromMaybe' function takes a default value and and 'Maybe' +-- | The 'fromMaybe' function takes a default value and 'Maybe' -- value. If the 'Maybe' is 'Nothing', it returns the default values; -- otherwise, it returns the value contained in the 'Maybe'. -- diff --git a/libraries/base/Data/OldList.hs b/libraries/base/Data/OldList.hs index b9b98f2a..9233d453 100644 --- a/libraries/base/Data/OldList.hs +++ b/libraries/base/Data/OldList.hs @@ -907,7 +907,7 @@ permutations xs0 = xs0 : perms xs0 [] -- It is a special case of 'sortBy', which allows the programmer to supply -- their own comparison function. -- --- Elements are arranged from from lowest to highest, keeping duplicates in +-- Elements are arranged from lowest to highest, keeping duplicates in -- the order they appeared in the input. -- -- >>> sort [1,6,4,3,2,5] @@ -1080,7 +1080,7 @@ rqpart cmp x (y:ys) rle rgt r = -- input list. This is called the decorate-sort-undecorate paradigm, or -- Schwartzian transform. -- --- Elements are arranged from from lowest to highest, keeping duplicates in +-- Elements are arranged from lowest to highest, keeping duplicates in -- the order they appeared in the input. -- -- >>> sortOn fst [(2, "world"), (4, "!"), (1, "Hello")] diff --git a/libraries/base/Data/Semigroup/Internal.hs b/libraries/base/Data/Semigroup/Internal.hs index 7484608c..9b7c494b 100644 --- a/libraries/base/Data/Semigroup/Internal.hs +++ b/libraries/base/Data/Semigroup/Internal.hs @@ -4,7 +4,7 @@ {-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} --- | Auxilary definitions for 'Semigroup' +-- | Auxiliary definitions for 'Semigroup' -- -- This module provides some @newtype@ wrappers and helpers which are -- reexported from the "Data.Semigroup" module or imported directly diff --git a/libraries/base/Data/Typeable/Internal.hs b/libraries/base/Data/Typeable/Internal.hs index e2caaecc..03fc4b25 100644 --- a/libraries/base/Data/Typeable/Internal.hs +++ b/libraries/base/Data/Typeable/Internal.hs @@ -108,7 +108,7 @@ mkTyCon :: Word64# -> Word64# -> String -> String -> String -> TyCon mkTyCon high# low# pkg modl name = TyCon (Fingerprint (W64# high#) (W64# low#)) pkg modl name --- | Applies a polymorhic type constructor to a sequence of kinds and types +-- | Applies a polymorphic type constructor to a sequence of kinds and types mkPolyTyConApp :: TyCon -> [KindRep] -> [TypeRep] -> TypeRep mkPolyTyConApp tc@(TyCon tc_k _ _ _) [] [] = TypeRep tc_k tc [] [] mkPolyTyConApp tc@(TyCon tc_k _ _ _) kinds types = @@ -126,7 +126,7 @@ mkFunTy :: TypeRep -> TypeRep -> TypeRep mkFunTy f a = mkTyConApp funTc [f,a] -- | Splits a type constructor application. --- Note that if the type construcotr is polymorphic, this will +-- Note that if the type constructor is polymorphic, this will -- not return the kinds that were used. -- See 'splitPolyTyConApp' if you need all parts. splitTyConApp :: TypeRep -> (TyCon,[TypeRep]) diff --git a/libraries/base/Foreign/Marshal/Alloc.hs b/libraries/base/Foreign/Marshal/Alloc.hs index 252e3e2a..e59b4ecf 100644 --- a/libraries/base/Foreign/Marshal/Alloc.hs +++ b/libraries/base/Foreign/Marshal/Alloc.hs @@ -104,7 +104,7 @@ calloc = doCalloc undefined mallocBytes :: Int -> IO (Ptr a) mallocBytes size = failWhenNULL "malloc" (_malloc (fromIntegral size)) --- |Llike 'mallocBytes' but memory is filled with bytes of value zero. +-- |Like 'mallocBytes' but memory is filled with bytes of value zero. -- callocBytes :: Int -> IO (Ptr a) callocBytes size = failWhenNULL "calloc" $ _calloc 1 (fromIntegral size) @@ -198,7 +198,7 @@ free :: Ptr a -> IO () free = _free --- auxilliary routines +-- auxiliary routines -- ------------------- -- asserts that the pointer returned from the action in the second argument is diff --git a/libraries/base/Foreign/Marshal/Utils.hs b/libraries/base/Foreign/Marshal/Utils.hs index 73d00c78..9e4b1b88 100644 --- a/libraries/base/Foreign/Marshal/Utils.hs +++ b/libraries/base/Foreign/Marshal/Utils.hs @@ -177,7 +177,7 @@ fillBytes dest char size = do _ <- memset dest (fromIntegral char) (fromIntegral size) return () --- auxilliary routines +-- auxiliary routines -- ------------------- -- |Basic C routines needed for memory copying diff --git a/libraries/base/GHC/Arr.hs b/libraries/base/GHC/Arr.hs index adfd602d..4a054ef3 100644 --- a/libraries/base/GHC/Arr.hs +++ b/libraries/base/GHC/Arr.hs @@ -162,7 +162,7 @@ For 1-d, 2-d, and 3-d arrays of Int we have specialised instances to avoid this. Note [Out-of-bounds error messages] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The default method for 'index' generates hoplelessIndexError, because +The default method for 'index' generates hopelessIndexError, because Ix doesn't have Show as a superclass. For particular base types we can do better, so we override the default method for index. -} diff --git a/libraries/base/GHC/Base.hs b/libraries/base/GHC/Base.hs index eca4d780..0b2a8002 100644 --- a/libraries/base/GHC/Base.hs +++ b/libraries/base/GHC/Base.hs @@ -1032,7 +1032,7 @@ augment g xs = g (:) xs -- when we disable the rule that expands (++) into foldr -- The foldr/cons rule looks nice, but it can give disastrously --- bloated code when commpiling +-- bloated code when compiling -- array (a,b) [(1,2), (2,2), (3,2), ...very long list... ] -- i.e. when there are very very long literal lists -- So I've disabled it for now. We could have special cases diff --git a/libraries/base/GHC/Conc/Sync.hs b/libraries/base/GHC/Conc/Sync.hs index 28e27a2d..b9461b97 100644 --- a/libraries/base/GHC/Conc/Sync.hs +++ b/libraries/base/GHC/Conc/Sync.hs @@ -884,7 +884,7 @@ modifyMVar_ m io = -- Thread waiting ----------------------------------------------------------------------------- --- Machinery needed to ensureb that we only have one copy of certain +-- Machinery needed to ensure that we only have one copy of certain -- CAFs in this module even when the base package is present twice, as -- it is when base is dynamically loaded into GHCi. The RTS keeps -- track of the single true value of the CAF, so even when the CAFs in diff --git a/libraries/base/GHC/Float.hs b/libraries/base/GHC/Float.hs index 9142808e..8f19c554 100644 --- a/libraries/base/GHC/Float.hs +++ b/libraries/base/GHC/Float.hs @@ -14,7 +14,7 @@ -- | -- Module : GHC.Float -- Copyright : (c) The University of Glasgow 1994-2002 --- Portions obtained from hbc (c) Lennart Augusstson +-- Portions obtained from hbc (c) Lennart Augustsson -- License : see libraries/base/LICENSE -- -- Maintainer : cvs-ghc@haskell.org @@ -232,7 +232,7 @@ class (RealFrac a, Floating a) => RealFloat a where -- to wrong results, hence we clamp the -- scaling parameter. -- If n + k would be larger than h, - -- n + clamp b k must be too, simliar + -- n + clamp b k must be too, similar -- for smaller than l - d. -- Add a little extra to keep clear -- from the boundary cases. @@ -1225,7 +1225,7 @@ word2Float (W# w) = F# (word2Float# w) {- Note [realToFrac int-to-float] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Don found that the RULES for realToFrac/Int->Double and simliarly +Don found that the RULES for realToFrac/Int->Double and similarly Float made a huge difference to some stream-fusion programs. Here's an example @@ -1287,7 +1287,7 @@ clamp bd k = max (-bd) (min bd k) Note [Casting from integral to floating point types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To implement something like `reinterpret_cast` from C++ to go from a -floating-point type to an integral type one might niavely think that the +floating-point type to an integral type one might naively think that the following should work: cast :: Float -> Word32 @@ -1300,7 +1300,7 @@ that performs an integer/word operation on a floating-point register, which results in a compile error. The correct way of implementing `reinterpret_cast` to implement a primpop, but -that requires a unique implementation for all supported archetectures. The next +that requires a unique implementation for all supported architectures. The next best solution is to write the value from the source register to memory and then read it from memory into the destination register and the best way to do that is using CMM. diff --git a/libraries/base/GHC/Float/RealFracMethods.hs b/libraries/base/GHC/Float/RealFracMethods.hs index 28242bce..e2e809c4 100644 --- a/libraries/base/GHC/Float/RealFracMethods.hs +++ b/libraries/base/GHC/Float/RealFracMethods.hs @@ -286,7 +286,7 @@ int2Float (I# i) = F# (int2Float# i) -- -- Note: Since the value is integral, the exponent can't be less than -- (-TYP_MANT_DIG), so we need not check the validity of the shift --- distance for the right shfts here. +-- distance for the right shifts here. {-# INLINE double2Integer #-} double2Integer :: Double -> Integer diff --git a/libraries/base/GHC/Generics.hs b/libraries/base/GHC/Generics.hs index b3fad977..f3ec5873 100644 --- a/libraries/base/GHC/Generics.hs +++ b/libraries/base/GHC/Generics.hs @@ -33,7 +33,7 @@ module GHC.Generics ( -- -- | -- --- Datatype-generic functions are are based on the idea of converting values of +-- Datatype-generic functions are based on the idea of converting values of -- a datatype @T@ into corresponding values of a (nearly) isomorphic type @'Rep' T@. -- The type @'Rep' T@ is -- built from a limited set of type constructors, all provided by this module. A diff --git a/libraries/base/GHC/IO.hs b/libraries/base/GHC/IO.hs index 88f5914d..bd43dc7d 100644 --- a/libraries/base/GHC/IO.hs +++ b/libraries/base/GHC/IO.hs @@ -31,7 +31,7 @@ module GHC.IO ( unsafeDupablePerformIO, unsafeDupableInterleaveIO, noDuplicate, - -- To and from from ST + -- To and from ST stToIO, ioToST, unsafeIOToST, unsafeSTToIO, FilePath, @@ -175,7 +175,7 @@ catchException !io handler = catch io handler -- might catch either. If you are calling @catch@ with type -- @IO Int -> (ArithException -> IO Int) -> IO Int@ then the handler may -- get run with @DivideByZero@ as an argument, or an @ErrorCall \"urk\"@ --- exception may be propogated further up. If you call it again, you +-- exception may be propagated further up. If you call it again, you -- might get a the opposite behaviour. This is ok, because 'catch' is an -- 'IO' computation. -- diff --git a/libraries/base/GHC/IO/Buffer.hs b/libraries/base/GHC/IO/Buffer.hs index b92ac430..8b77167e 100644 --- a/libraries/base/GHC/IO/Buffer.hs +++ b/libraries/base/GHC/IO/Buffer.hs @@ -141,7 +141,7 @@ charSize = 4 -- -- The "live" elements of the buffer are those between the 'bufL' and -- 'bufR' offsets. In an empty buffer, 'bufL' is equal to 'bufR', but --- they might not be zero: for exmaple, the buffer might correspond to +-- they might not be zero: for example, the buffer might correspond to -- a memory-mapped file and in which case 'bufL' will point to the -- next location to be written, which is not necessarily the beginning -- of the file. diff --git a/libraries/base/GHC/IO/Encoding/Failure.hs b/libraries/base/GHC/IO/Encoding/Failure.hs index e6a56bab..a3395755 100644 --- a/libraries/base/GHC/IO/Encoding/Failure.hs +++ b/libraries/base/GHC/IO/Encoding/Failure.hs @@ -61,7 +61,7 @@ data CodingFailureMode -- Roundtripping is based on the ideas of PEP383. -- -- We used to use the range of private-use characters from 0xEF80 to --- 0xEFFF designated for "encoding hacks" by the ConScript Unicode Registery +-- 0xEFFF designated for "encoding hacks" by the ConScript Unicode Registry -- to encode these characters. -- -- However, people didn't like this because it means we don't get @@ -177,7 +177,7 @@ recoverEncode cfm input@Buffer{ bufRaw=iraw, bufL=ir, bufR=_ } -- The "if" test above ensures we skip if the encoding fails -- to deal with the ?, though this should never happen in -- practice as all encodings are in fact capable of - -- reperesenting all ASCII characters. + -- representing all ASCII characters. _ir' <- writeCharBuf iraw ir '?' return (input, output) diff --git a/libraries/base/GHC/IO/Handle.hs b/libraries/base/GHC/IO/Handle.hs index f458d6f5..e82d0b1f 100644 --- a/libraries/base/GHC/IO/Handle.hs +++ b/libraries/base/GHC/IO/Handle.hs @@ -306,7 +306,7 @@ hFlush handle = wantWritableHandle "hFlush" handle flushWriteBuffer -- | The action 'hFlushAll' @hdl@ flushes all buffered data in @hdl@, -- including any buffered read data. Buffered read data is flushed --- by seeking the file position back to the point before the bufferred +-- by seeking the file position back to the point before the buffered -- data was read, and hence only works if @hdl@ is seekable (see -- 'hIsSeekable'). -- diff --git a/libraries/base/GHC/IO/Handle/Internals.hs b/libraries/base/GHC/IO/Handle/Internals.hs index 985fc2b9..648f6a49 100644 --- a/libraries/base/GHC/IO/Handle/Internals.hs +++ b/libraries/base/GHC/IO/Handle/Internals.hs @@ -359,7 +359,7 @@ ioe_bufsiz n = ioException -- Wrapper for Handle encoding/decoding. -- The interface for TextEncoding changed so that a TextEncoding doesn't raise --- an exception if it encounters an invalid sequnce. Furthermore, encoding +-- an exception if it encounters an invalid sequence. Furthermore, encoding -- returns a reason as to why encoding stopped, letting us know if it was due -- to input/output underflow or an invalid sequence. -- @@ -676,7 +676,7 @@ mkDuplexHandle dev filepath mb_codec tr_newlines = do mkHandle dev filepath WriteHandle Nothing mb_codec tr_newlines (Just handleFinalizer) - Nothing -- no othersie + Nothing -- no otherside read_side@(FileHandle _ read_m) <- mkHandle dev filepath ReadHandle Nothing mb_codec diff --git a/libraries/base/GHC/IO/Handle/Lock.hs b/libraries/base/GHC/IO/Handle/Lock.hs index 12033c05..a7f9f7f6 100644 --- a/libraries/base/GHC/IO/Handle/Lock.hs +++ b/libraries/base/GHC/IO/Handle/Lock.hs @@ -61,7 +61,7 @@ data LockMode = SharedLock | ExclusiveLock -- 2) The implementation uses 'LockFileEx' on Windows and 'flock' otherwise, -- hence all of their caveats also apply here. -- --- 3) On non-Windows plaftorms that don't support 'flock' (e.g. Solaris) this +-- 3) On non-Windows platforms that don't support 'flock' (e.g. Solaris) this -- function throws 'FileLockingNotImplemented'. We deliberately choose to not -- provide fcntl based locking instead because of its broken semantics. -- diff --git a/libraries/base/GHC/IO/Handle/Types.hs b/libraries/base/GHC/IO/Handle/Types.hs index ee6dfe88..0994903b 100644 --- a/libraries/base/GHC/IO/Handle/Types.hs +++ b/libraries/base/GHC/IO/Handle/Types.hs @@ -176,7 +176,7 @@ isReadWriteHandleType _ = False -- (an unbuffered handle needs a 1 character buffer in order to support -- hLookAhead and hIsEOF). -- * In a read Handle, the byte buffer is always empty (we decode when reading) --- * In a wriite Handle, the Char buffer is always empty (we encode when writing) +-- * In a write Handle, the Char buffer is always empty (we encode when writing) -- checkHandleInvariants :: Handle__ -> IO () #if defined(DEBUG) @@ -373,7 +373,7 @@ nativeNewline :: Newline nativeNewline = if isNewlineCRLF then CRLF else LF -- | Map '\r\n' into '\n' on input, and '\n' to the native newline --- represetnation on output. This mode can be used on any platform, and +-- representation on output. This mode can be used on any platform, and -- works with text files using any newline convention. The downside is -- that @readFile >>= writeFile@ might yield a different file. -- diff --git a/libraries/base/GHC/List.hs b/libraries/base/GHC/List.hs index af502134..dc5f6eb3 100644 --- a/libraries/base/GHC/List.hs +++ b/libraries/base/GHC/List.hs @@ -86,7 +86,7 @@ last [] = errorEmptyList "last" #else -- Use foldl to make last a good consumer. -- This will compile to good code for the actual GHC.List.last. --- (At least as long it is eta-expaned, otherwise it does not, #10260.) +-- (At least as long it is eta-expand, otherwise it does not, #10260.) last xs = foldl (\_ x -> x) lastError xs {-# INLINE last #-} -- The inline pragma is required to make GHC remember the implementation via diff --git a/libraries/base/GHC/PArr.hs b/libraries/base/GHC/PArr.hs index 5f9d7590..3e0769eb 100644 --- a/libraries/base/GHC/PArr.hs +++ b/libraries/base/GHC/PArr.hs @@ -18,7 +18,7 @@ -- the desugarer does not load 'Data.Array.Parallel' into its global state. (Hence, -- the present module may not use any other piece of '-XParallelArray' syntax.) -- --- This will be cleaned up when we change the internal represention of '[::]' to not +-- This will be cleaned up when we change the internal representation of '[::]' to not -- rely on a wired-in type constructor. module GHC.PArr where diff --git a/libraries/base/GHC/Ptr.hs b/libraries/base/GHC/Ptr.hs index 8a2479da..b1e10f83 100644 --- a/libraries/base/GHC/Ptr.hs +++ b/libraries/base/GHC/Ptr.hs @@ -37,7 +37,7 @@ import Numeric ( showHex ) -- Data pointers. -- The role of Ptr's parameter is phantom, as there is no relation between --- the Haskell representation and whathever the user puts at the end of the +-- the Haskell representation and whatever the user puts at the end of the -- pointer. And phantom is useful to implement castPtr (see #9163) -- redundant role annotation checks that this doesn't change diff --git a/libraries/base/GHC/Read.hs b/libraries/base/GHC/Read.hs index 86532acc..edcb9d08 100644 --- a/libraries/base/GHC/Read.hs +++ b/libraries/base/GHC/Read.hs @@ -407,11 +407,11 @@ readSymField fieldName readVal = do -- Note [Why readField] -- --- Previousy, the code for automatically deriving Read instance (in +-- Previously, the code for automatically deriving Read instance (in -- typecheck/TcGenDeriv.hs) would generate inline code for parsing fields; -- this, however, turned out to produce massive amounts of intermediate code, -- and produced a considerable performance hit in the code generator. --- Since Read instances are not generally supposed to be perfomance critical, +-- Since Read instances are not generally supposed to be performance critical, -- the readField and readSymField functions have been factored out, and the -- code generator now just generates calls rather than manually inlining the -- parsers. For large record types (e.g. 500 fields), this produces a diff --git a/libraries/base/GHC/StaticPtr.hs b/libraries/base/GHC/StaticPtr.hs index af837e9c..48f8fc70 100644 --- a/libraries/base/GHC/StaticPtr.hs +++ b/libraries/base/GHC/StaticPtr.hs @@ -28,7 +28,7 @@ -- table is known as the Static Pointer Table. The reference can then be -- dereferenced to obtain the value. -- --- The various communicating processes need to aggree on the keys used to refer +-- The various communicating processes need to agree on the keys used to refer -- to the values in the Static Pointer Table, or lookups will fail. Only -- processes launched from the same program binary are guaranteed to use the -- same set of keys. @@ -82,7 +82,7 @@ unsafeLookupStaticPtr :: StaticKey -> IO (Maybe (StaticPtr a)) unsafeLookupStaticPtr (Fingerprint w1 w2) = errorWithoutStackTrace "unsafeLookupStaticPtr: Static pointers not implemented in the Eta RTS." --- | Miscelaneous information available for debugging purposes. +-- | Miscellaneous information available for debugging purposes. data StaticPtrInfo = StaticPtrInfo { -- | Package key of the package where the static pointer is defined spInfoPackageKey :: String diff --git a/libraries/base/GHC/TypeLits.hs b/libraries/base/GHC/TypeLits.hs index 788afac2..f2cebe4e 100644 --- a/libraries/base/GHC/TypeLits.hs +++ b/libraries/base/GHC/TypeLits.hs @@ -158,7 +158,7 @@ infixl 6 :<>: -- -- The polymorphic kind of this type allows it to be used in several settings. -- For instance, it can be used as a constraint, e.g. to provide a better error --- message for a non-existent instance, +-- message for a nonexistent instance, -- -- @ -- -- in a context diff --git a/libraries/base/GHC/Weak.hs b/libraries/base/GHC/Weak.hs index 8f886a6d..139fdb16 100644 --- a/libraries/base/GHC/Weak.hs +++ b/libraries/base/GHC/Weak.hs @@ -53,7 +53,7 @@ addition to finalizers. References from the finalizer to the key are treated in the same way as references from the value to the key: they do not keep the key -alive. A finalizer may therefore ressurrect the key, perhaps by +alive. A finalizer may therefore resurrect the key, perhaps by storing it in the same data structure. The finalizer, and the relationship between the key and the value, diff --git a/libraries/base/Java/Exception.hs b/libraries/base/Java/Exception.hs index 7b377953..4ce49532 100644 --- a/libraries/base/Java/Exception.hs +++ b/libraries/base/Java/Exception.hs @@ -4,7 +4,7 @@ BangPatterns #-} ----------------------------------------------------------------------------- -- | --- Module : Java.Execption +-- Module : Java.Exception -- Copyright : (c) Jyothsna Srinivas 2017 -- -- License : BSD-style (see the file libraries/base/LICENSE) @@ -13,7 +13,7 @@ -- Stability : provisional -- Portability : portable -- --- Bindings for Java Execption utilities +-- Bindings for Java Exception utilities -- ----------------------------------------------------------------------------- diff --git a/libraries/base/System/Console/GetOpt.hs b/libraries/base/System/Console/GetOpt.hs index b69612b1..1f6406b1 100644 --- a/libraries/base/System/Console/GetOpt.hs +++ b/libraries/base/System/Console/GetOpt.hs @@ -124,7 +124,7 @@ data OptKind a -- kind of cmd line arg (internal use only): -- second argument. usageInfo :: String -- header -> [OptDescr a] -- option descriptors - -> String -- nicely formatted decription of options + -> String -- nicely formatted description of options usageInfo header optDescr = unlines (header:table) where (ss,ls,ds) = (unzip3 . concatMap fmtOpt) optDescr table = zipWith3 paste (sameLen ss) (sameLen ls) ds diff --git a/libraries/base/System/IO.hs b/libraries/base/System/IO.hs index 0330a69f..4228982f 100644 --- a/libraries/base/System/IO.hs +++ b/libraries/base/System/IO.hs @@ -202,7 +202,7 @@ module System.IO ( -- as '\r\n'. -- -- A text-mode 'Handle' has an associated 'NewlineMode' that - -- specifies how to transate newline characters. The + -- specifies how to translate newline characters. The -- 'NewlineMode' specifies the input and output translation -- separately, so that for instance you can translate '\r\n' -- to '\n' on input, but leave newlines as '\n' on output. diff --git a/libraries/base/Text/Printf.hs b/libraries/base/Text/Printf.hs index cc8c4626..670863d1 100644 --- a/libraries/base/Text/Printf.hs +++ b/libraries/base/Text/Printf.hs @@ -62,7 +62,7 @@ module Text.Printf( -- ** Standard Formatters -- -- | These formatters for standard types are provided for --- convenience in writting new type-specific formatters: +-- convenience in writing new type-specific formatters: -- a common pattern is to throw to 'formatString' or -- 'formatInteger' to do most of the format handling for -- a new type. diff --git a/libraries/eta-boot/Eta/PackageDb.hs b/libraries/eta-boot/Eta/PackageDb.hs index 7687e2f6..37e200a9 100644 --- a/libraries/eta-boot/Eta/PackageDb.hs +++ b/libraries/eta-boot/Eta/PackageDb.hs @@ -30,12 +30,12 @@ -- the internal package format which is specialised just for Eta. -- -- Eta the compiler only needs some of the information which is kept about --- registerd packages, such as module names, various paths etc. On the other +-- registered packages, such as module names, various paths etc. On the other -- hand eta-pkg has to keep all the information from Etlas packages and be able -- to regurgitate it for users and other tools. -- -- The first trick is that we duplicate some of the information in the package --- database. We essentially keep two versions of the datbase in one file, one +-- database. We essentially keep two versions of the database in one file, one -- version used only by eta-pkg which keeps the full information (using the -- serialised form of the 'InstalledPackageInfo' type defined by the Etlas -- library); and a second version written by eta-pkg and read by Eta which has diff --git a/libraries/eta-boot/Eta/Serialized.hs b/libraries/eta-boot/Eta/Serialized.hs index ef4a77e4..bc07c89a 100644 --- a/libraries/eta-boot/Eta/Serialized.hs +++ b/libraries/eta-boot/Eta/Serialized.hs @@ -10,7 +10,7 @@ -- Maintainer : typeleadhq@gmail.com -- Portability : portable -- --- Serialization for IPC architecture for external interpeter +-- Serialization for IPC architecture for external interpreter module Eta.Serialized ( -- * Main Serialized data type diff --git a/libraries/eta-meta/Language/Eta/Meta/Lib/Map.hs b/libraries/eta-meta/Language/Eta/Meta/Lib/Map.hs index 9dd9d711..d4676bfb 100644 --- a/libraries/eta-meta/Language/Eta/Meta/Lib/Map.hs +++ b/libraries/eta-meta/Language/Eta/Meta/Lib/Map.hs @@ -3,7 +3,7 @@ -- This is a non-exposed internal module -- -- The code in this module has been ripped from containers-0.5.5.1:Data.Map.Base [1] almost --- verbatimely to avoid a dependency of 'template-haskell' on the containers package. +-- verbatimly to avoid a dependency of 'template-haskell' on the containers package. -- -- [1] see https://hackage.haskell.org/package/containers-0.5.5.1 -- diff --git a/libraries/eta-meta/Language/Eta/Meta/Ppr.hs b/libraries/eta-meta/Language/Eta/Meta/Ppr.hs index 3302a89b..6e34b4f6 100644 --- a/libraries/eta-meta/Language/Eta/Meta/Ppr.hs +++ b/libraries/eta-meta/Language/Eta/Meta/Ppr.hs @@ -135,7 +135,7 @@ pprExp i (LamCaseE ms) = parensIf (i > noPrec) $ text "\\case" $$ nest nestDepth (ppr ms) pprExp _ (TupE es) = parens (commaSep es) pprExp _ (UnboxedTupE es) = hashParens (commaSep es) --- Nesting in Cond is to avoid potential problems in do statments +-- Nesting in Cond is to avoid potential problems in do statements pprExp i (CondE guard true false) = parensIf (i > noPrec) $ sep [text "if" <+> ppr guard, nest 1 $ text "then" <+> ppr true, @@ -653,7 +653,7 @@ instance Ppr Type where = text "forall" <+> hsep (map ppr tvars) <+> text "." <+> sep [pprCxt ctxt, ppr ty] ppr ty = pprTyApp (split ty) - -- Works, in a degnerate way, for SigT, and puts parens round (ty :: kind) + -- Works, in a degenerate way, for SigT, and puts parens round (ty :: kind) -- See Note [Pretty-printing kind signatures] {- Note [Pretty-printing kind signatures] diff --git a/libraries/eta-meta/Language/Eta/Meta/Syntax.hs b/libraries/eta-meta/Language/Eta/Meta/Syntax.hs index 5e39de4d..ece17952 100644 --- a/libraries/eta-meta/Language/Eta/Meta/Syntax.hs +++ b/libraries/eta-meta/Language/Eta/Meta/Syntax.hs @@ -1281,7 +1281,7 @@ type Arity = Int -- | In 'PrimTyConI', is the type constructor unlifted? type Unlifted = Bool --- | 'InstanceDec' desribes a single instance of a class or type function. +-- | 'InstanceDec' describes a single instance of a class or type function. -- It is just a 'Dec', but guaranteed to be one of the following: -- -- * 'InstanceD' (with empty @['Dec']@) @@ -1566,7 +1566,7 @@ data Overlap = Overlappable -- ^ May be overlapped by more specific instances deriving( Show, Eq, Ord, Data, Typeable, Generic ) -- | Common elements of 'OpenTypeFamilyD' and 'ClosedTypeFamilyD'. --- By analogy with with "head" for type classes and type class instances as +-- By analogy with "head" for type classes and type class instances as -- defined in /Type classes: an exploration of the design space/, the -- @TypeFamilyHead@ is defined to be the elements of the declaration between -- @type family@ and @where@. diff --git a/libraries/eta-meta/eta-meta.cabal b/libraries/eta-meta/eta-meta.cabal index 80e6f6e2..51118b66 100644 --- a/libraries/eta-meta/eta-meta.cabal +++ b/libraries/eta-meta/eta-meta.cabal @@ -2,7 +2,7 @@ name: eta-meta version: 0.8.6.5 license: BSD3 license-file: LICENSE -category: Metaprogamming +category: Metaprogramming maintainer: typeleadhq@gmail.com bug-reports: http://github.com/typelead/eta/issues synopsis: Support library for Template Metaprogramming in Eta diff --git a/libraries/eta-repl/Eta/REPL/Map.hs b/libraries/eta-repl/Eta/REPL/Map.hs index 384c19e1..5a581fe9 100644 --- a/libraries/eta-repl/Eta/REPL/Map.hs +++ b/libraries/eta-repl/Eta/REPL/Map.hs @@ -3,7 +3,7 @@ -- This is a non-exposed internal module -- -- The code in this module has been ripped from containers-0.5.5.1:Data.Map.Base [1] almost --- verbatimely to avoid a dependency of 'template-haskell' on the containers package. +-- verbatimly to avoid a dependency of 'template-haskell' on the containers package. -- -- [1] see https://hackage.haskell.org/package/containers-0.5.5.1 -- diff --git a/libraries/eta-repl/Eta/REPL/RemoteTypes.hs b/libraries/eta-repl/Eta/REPL/RemoteTypes.hs index 27b0651d..54c5cdc8 100644 --- a/libraries/eta-repl/Eta/REPL/RemoteTypes.hs +++ b/libraries/eta-repl/Eta/REPL/RemoteTypes.hs @@ -40,7 +40,7 @@ import Control.Monad -- Static pointers only; don't use this for heap-resident pointers. -- Instead use HValueRef. We will fix the remote pointer to be 64 bits. This -- should cover 64 and 32bit systems, and permits the exchange of remote ptrs --- between machines of different word size. For exmaple, when connecting to +-- between machines of different word size. For example, when connecting to -- an iserv instance on a different architecture with different word size via -- -fexternal-interpreter. newtype RemotePtr a = RemotePtr Word64 diff --git a/libraries/ghc-prim/GHC/Classes.hs b/libraries/ghc-prim/GHC/Classes.hs index 5b0716ec..60380941 100644 --- a/libraries/ghc-prim/GHC/Classes.hs +++ b/libraries/ghc-prim/GHC/Classes.hs @@ -30,7 +30,7 @@ ----------------------------------------------------------------------------- module GHC.Classes( - -- * Implicit paramaters + -- * Implicit parameters IP(..), -- * Equality and ordering diff --git a/libraries/integer/GHC/Integer/Type.hs b/libraries/integer/GHC/Integer/Type.hs index 27617ca1..7a1cf9aa 100644 --- a/libraries/integer/GHC/Integer/Type.hs +++ b/libraries/integer/GHC/Integer/Type.hs @@ -13,7 +13,7 @@ -- @mpz_*()@ functions using @long@ types, which is smaller than -- @mp_limb_t@ on IL32P64. The @mpn_*()@ functions are often safe to -- use, as they use @mb_limb_t@ instead of @long@. --- (look out for @#if SIZEOF_HSWORD == SIZEOF_LONG@ occurences) +-- (look out for @#if SIZEOF_HSWORD == SIZEOF_LONG@ occurrences) -- #define INT_MINBOUND (-2147483648#) @@ -240,7 +240,7 @@ operations are more efficient. See Trac #8638. 'smartJ#' is the smart constructor for J# that performs the necessary tests. When returning a nested result, we always use smartJ# strictly, thus - let !r = smartJ# a b in (# r, somthing_else #) + let !r = smartJ# a b in (# r, something_else #) to avoid creating a thunk that is subsequently evaluated to a J#. smartJ# itself does a pretty small amount of work, so it's not worth thunking it. @@ -732,7 +732,7 @@ recipModInteger j@(S# _) m@(J# _) = recipModInteger (toBig j) m recipModInteger j@(J# _) m@(S# _) = recipModInteger j (toBig m) recipModInteger (J# o1#) (J# o2#) = smartJ# (recipModInteger# o1# o2#) --- | Probalistic Miller-Rabin primality test. +-- | Probabilistic Miller-Rabin primality test. -- -- \"@'testPrimeInteger' /n/ /k/@\" determines whether @/n/@ is prime -- and returns one of the following results: @@ -755,7 +755,7 @@ testPrimeInteger :: Integer -> Int# -> Int# testPrimeInteger j@(S# _) reps = testPrimeInteger (toBig j) reps testPrimeInteger (J# o#) reps = jbool2int# (testPrimeInteger# o# reps) --- | Compute next prime greater than @/n/@ probalistically. +-- | Compute next prime greater than @/n/@ probabilistically. -- -- According to the GMP documentation, the underlying function -- @mpz_nextprime()@ \"uses a probabilistic algorithm to identify @@ -775,7 +775,7 @@ nextPrimeInteger (J# o#) = smartJ# (nextPrimeInteger# o#) -- integers in order to call @mpz_sizeinbase()@. -- -- This function wraps @mpz_sizeinbase()@ which has some --- implementation pecularities to take into account: +-- implementation peculiarities to take into account: -- -- * \"@'sizeInBaseInteger' 0 /base/ = 1@\" (see also comment in 'exportIntegerToMutableByteArray'). -- diff --git a/rts/src/main/java/eta/runtime/Runtime.java b/rts/src/main/java/eta/runtime/Runtime.java index 1809dcf7..41d9089b 100644 --- a/rts/src/main/java/eta/runtime/Runtime.java +++ b/rts/src/main/java/eta/runtime/Runtime.java @@ -53,7 +53,7 @@ public static void setMaxGlobalSparks(int newMaxGlobalSparks) { /* Parameter: minTSOIdleTime (int) The minimum amount of time (in ms) the runtime should wait to spawn a new Worker - Capabiliity to handle an idle TSO in the Global Run Queue if the + Capability to handle an idle TSO in the Global Run Queue if the maxWorkerCapabilities requirement is satisfied. */ private static int minTSOIdleTime; diff --git a/rts/src/main/java/eta/runtime/exception/Exception.java b/rts/src/main/java/eta/runtime/exception/Exception.java index 39619dec..6554c69e 100644 --- a/rts/src/main/java/eta/runtime/exception/Exception.java +++ b/rts/src/main/java/eta/runtime/exception/Exception.java @@ -146,7 +146,7 @@ public static Closure catch_(StgContext context, Closure io, Closure handler) { Note that unmasking is not done for asynchronous exceptions. This may be due to the fact that raiseAsync & - maybePeformBlockedExceptions only run after unmasking has + maybePerformBlockedExceptions only run after unmasking has been set. Verify. -RM */ if (!async && (exceptionsBlocked & TSO_BLOCKEX) == 0) { unmask = true; diff --git a/rts/src/main/java/eta/runtime/io/IOManager.java b/rts/src/main/java/eta/runtime/io/IOManager.java index 6c5c55b6..29768333 100644 --- a/rts/src/main/java/eta/runtime/io/IOManager.java +++ b/rts/src/main/java/eta/runtime/io/IOManager.java @@ -99,7 +99,7 @@ keys that will only be removed upon the next select(). */ } } } catch (IOException e) { - /* If the channel is closed or some other anomalie happened, return instantly + /* If the channel is closed or some other anomaly happened, return instantly so that the rest of the code can do appropriate cleanup. */ return e; } diff --git a/rts/src/main/java/eta/runtime/stg/Capability.java b/rts/src/main/java/eta/runtime/stg/Capability.java index 76c35b2c..8885a9ca 100644 --- a/rts/src/main/java/eta/runtime/stg/Capability.java +++ b/rts/src/main/java/eta/runtime/stg/Capability.java @@ -88,7 +88,7 @@ public static boolean singletonCapabilities() { && workerCapabilitiesSize() == 0; } - /* This object is used to sychronize among all the idle worker capabilities. */ + /* This object is used to synchronize among all the idle worker capabilities. */ private static Object blockedLock = new Object(); /* The current number of worker capabilities waiting for work */ @@ -278,7 +278,7 @@ public final Closure schedule(TSO tso) throws java.lang.Exception { /* Thread is done executing, awaken the blocked exception queue. */ awakenBlockedExceptionQueue(t); - /* If an unhandled exception occured, throw it so that the caller + /* If an unhandled exception occurred, throw it so that the caller can handle it if they so choose. */ if (pendingException != null) { /* Cleanup resources in the Runtime before throwing the exception @@ -425,7 +425,7 @@ public final void threadPaused(TSO tso) { } } - /* Asychronous Exceptions */ + /* Asynchronous Exceptions */ public final boolean maybePerformBlockedException(TSO tso) { final Queue blockedExceptions = tso.blockedExceptions; diff --git a/rts/src/main/java/eta/runtime/stg/Closures.java b/rts/src/main/java/eta/runtime/stg/Closures.java index b606acaf..55908995 100644 --- a/rts/src/main/java/eta/runtime/stg/Closures.java +++ b/rts/src/main/java/eta/runtime/stg/Closures.java @@ -19,7 +19,7 @@ import eta.runtime.thunk.Ap4VUpd; import static eta.runtime.stg.TSO.WhatNext.*; -/* - Utilies for working with Closures from the Java side. +/* - Utilities for working with Closures from the Java side. - Standard closures used throughout the runtime system. */ public class Closures { diff --git a/rts/src/main/java/eta/runtime/stg/Stg.java b/rts/src/main/java/eta/runtime/stg/Stg.java index 5d032753..75da712b 100644 --- a/rts/src/main/java/eta/runtime/stg/Stg.java +++ b/rts/src/main/java/eta/runtime/stg/Stg.java @@ -120,7 +120,7 @@ public static Closure trampoline(final StgContext context, final Closure closure } } if (debug) { - debugTailCalls("Exiting trampoline sucessfully with result " + + debugTailCalls("Exiting trampoline successfully with result " + Print.classAndIdentity(ret) + " after " + context.tailCalls + " tail calls."); } diff --git a/rts/src/main/java/eta/runtime/stg/StgContext.java b/rts/src/main/java/eta/runtime/stg/StgContext.java index 27433998..767f8748 100644 --- a/rts/src/main/java/eta/runtime/stg/StgContext.java +++ b/rts/src/main/java/eta/runtime/stg/StgContext.java @@ -386,7 +386,7 @@ public static StgContext acquire() { public final void dump() { System.out.println("Context Dump"); System.out.println("currentTSO: " + currentTSO); - System.out.println("myCapabilitymyCapability: " + myCapability); + System.out.println("myCapability: " + myCapability); } public final Closure R(final int index) { diff --git a/rts/src/main/java/eta/runtime/thunk/CAF.java b/rts/src/main/java/eta/runtime/thunk/CAF.java index 98ae3b2d..f515f8fb 100644 --- a/rts/src/main/java/eta/runtime/thunk/CAF.java +++ b/rts/src/main/java/eta/runtime/thunk/CAF.java @@ -42,7 +42,7 @@ public final Closure evaluate(StgContext context) { } /* By default, if the single-argument constructor is used, it will just redirect - to the indirectee. Normally, it will be overriden by non-trivial top-level + to the indirectee. Normally, it will be overridden by non-trivial top-level thunks. */ @Override public Closure thunkEnter(StgContext context) { diff --git a/rts/src/main/java/eta/runtime/util/MPSCReferenceQueue.java b/rts/src/main/java/eta/runtime/util/MPSCReferenceQueue.java index 7aea78ba..0d268bbf 100644 --- a/rts/src/main/java/eta/runtime/util/MPSCReferenceQueue.java +++ b/rts/src/main/java/eta/runtime/util/MPSCReferenceQueue.java @@ -149,7 +149,7 @@ private boolean createNewBufferRetry(final long iteration, final int index, fina } /* WARNING: This function is unsafe! It may consume elements that were already - consumed if read() calls are made simulatenously to this call. */ + consumed if read() calls are made simultaneously to this call. */ public int forEach(Consumer consumer) { int processed = headBuffer.forEach(consumer); for (RingBuffer buffer: buffers) { @@ -242,7 +242,7 @@ public boolean isReadable(final long iteration, final int i) { } /* WARNING: This function is unsafe! It may consume elements that were already - consumed if read() calls are made simulatenously to this call. */ + consumed if read() calls are made simultaneously to this call. */ public int forEach(Consumer consumer) { int processed = 0; final int len = available.length(); diff --git a/rts/src/test/java/eta/runtime/io/MemoryManagerTest.java b/rts/src/test/java/eta/runtime/io/MemoryManagerTest.java index eca63d04..ad2c3a57 100644 --- a/rts/src/test/java/eta/runtime/io/MemoryManagerTest.java +++ b/rts/src/test/java/eta/runtime/io/MemoryManagerTest.java @@ -106,7 +106,7 @@ public void testGetBoundedBuffer() { b.remaining() >= 1024); touch(b,(byte)1); ByteBuffer b2 = getBoundedBuffer(addr); - assertThat("Two invocations with the same adress ,"+ + assertThat("Two invocations with the same address ,"+ "should return the same buffer", b2, is(b)); b2 = getBoundedBuffer(addr,0,b.remaining()); assertThat("Calling it with the same address, "+ diff --git a/tests/packages/Test.hs b/tests/packages/Test.hs index 4892d41c..320c8ad9 100755 --- a/tests/packages/Test.hs +++ b/tests/packages/Test.hs @@ -126,7 +126,7 @@ verifyScript = do when (not exists) $ mkdir (fromString outPath) procExitOnError Nothing "eta" ["-fforce-recomp", "-o", T.pack outJar, T.pack mainSource] mempty echo "=== ===" - echo "Compiled succesfully." + echo "Compiled successfully." echo "Verifying the bytecode of compiled program..." echo "=== Verify Script Output ===" procExitOnError Nothing "java" ["-cp", T.pack verifyScriptPath, "Verify", T.pack outJar] mempty diff --git a/tests/suite/array/run/Arr016/Arr016.hs b/tests/suite/array/run/Arr016/Arr016.hs index dbda65ea..7dd321f5 100644 --- a/tests/suite/array/run/Arr016/Arr016.hs +++ b/tests/suite/array/run/Arr016/Arr016.hs @@ -120,7 +120,7 @@ rift n xs = comb (drop n xs) (take n xs) comb [] [] = [] --- suffle makes n random rifts. Typically after +-- shuffle makes n random rifts. Typically after -- log n rifts, the list is in a pretty random order. -- (where n is the number of elements in the list) diff --git a/tests/suite/typecheck/compile/LoopOfTheDay2.hs b/tests/suite/typecheck/compile/LoopOfTheDay2.hs index b4535a07..72141a0f 100644 --- a/tests/suite/typecheck/compile/LoopOfTheDay2.hs +++ b/tests/suite/typecheck/compile/LoopOfTheDay2.hs @@ -32,7 +32,7 @@ baz = foo (T1b (T1a 3)) -->(I5) C2 () T1 -->(I3) C1 () T1, C1 () Int -->(I1,I2) C0 T1, C0 Int --->(recusive) C0 Int +-->(recursive) C0 Int -->(I6) C2 () Int -->(I4) C1 () Int -->(recursive) {} diff --git a/tests/suite/typecheck/compile/T3018.hs b/tests/suite/typecheck/compile/T3018.hs index bf178e08..0c0c54c2 100644 --- a/tests/suite/typecheck/compile/T3018.hs +++ b/tests/suite/typecheck/compile/T3018.hs @@ -23,7 +23,7 @@ class Subst_A a t t' where data SubstD_A a t t' = SubstD_A {substD_A:: forall m. (Monad m) => a -> t -> t' -> m t'} --- Allow override dictionary verion with implementation of type class Subst +-- Allow override dictionary version with implementation of type class Subst instance Subst_A a t t' => Sat (SubstD_A a t t') where dict = SubstD_A {substD_A = subst_A} @@ -39,7 +39,7 @@ class Subst_B a t t' where data SubstD_B a t t' = SubstD_B {substD_B :: a -> t -> t' -> t'} --- allow override dictionary verion with implementation of type class Subst +-- allow override dictionary version with implementation of type class Subst instance Subst_B a t t' => Sat (SubstD_B a t t') where dict = SubstD_B {substD_B = subst_B} diff --git a/tests/suite/typecheck/compile/T4284.hs b/tests/suite/typecheck/compile/T4284.hs index 2d5164a4..f3a166bd 100644 --- a/tests/suite/typecheck/compile/T4284.hs +++ b/tests/suite/typecheck/compile/T4284.hs @@ -8,7 +8,7 @@ works = id foo fails = (id) foo --- works type checks, but fails fails with the following error +-- works type checks, but fails with the following error -- message: -- -- Cannot match a monotype with `() -> forall b. b' diff --git a/tests/suite/typecheck/compile/T6018/T6018.hs b/tests/suite/typecheck/compile/T6018/T6018.hs index b3094fe9..bc24f54e 100644 --- a/tests/suite/typecheck/compile/T6018/T6018.hs +++ b/tests/suite/typecheck/compile/T6018/T6018.hs @@ -174,7 +174,7 @@ barapp2 :: Int barapp2 = bar 1 -- Declarations below test more liberal RHSs of injectivity annotations: --- permiting variables to appear in different order than the one in which they +-- permitting variables to appear in different order than the one in which they -- were declared. type family H a b = r | r -> b a type family Hc a b = r | r -> b a where diff --git a/tests/suite/typecheck/compile/T7147.hs b/tests/suite/typecheck/compile/T7147.hs index 247e5f0f..eb806ed1 100644 --- a/tests/suite/typecheck/compile/T7147.hs +++ b/tests/suite/typecheck/compile/T7147.hs @@ -8,7 +8,7 @@ class AddName i d | d->i where class Rec rec struct | rec->struct where mapRec :: (struct->struct) -> rec -> rec --- We got a very bogus siguature for addNameRec in thc 7.6rc1 +-- We got a very bogus signature for addNameRec in thc 7.6rc1 -- addNameRec :: forall rec struct. -- Recursive.Rec rec struct -- -> DefinedNames.AddName (GHC.Prim.Any *) struct diff --git a/tests/suite/typecheck/compile/tc109.hs b/tests/suite/typecheck/compile/tc109.hs index 6f3241d0..b7da951e 100644 --- a/tests/suite/typecheck/compile/tc109.hs +++ b/tests/suite/typecheck/compile/tc109.hs @@ -15,4 +15,4 @@ instance (P a,R a b) => P [b] {- GHC 4.08.1 doesn't seem to allow variables in the context that don't appear after the =>, but which are still ok since they are -determined by the functional dependenices. -} +determined by the functional dependencies. -} diff --git a/tests/suite/typecheck/compile/tc163.hs b/tests/suite/typecheck/compile/tc163.hs index 21d8a729..fb8682bc 100644 --- a/tests/suite/typecheck/compile/tc163.hs +++ b/tests/suite/typecheck/compile/tc163.hs @@ -24,7 +24,7 @@ flop = \m' k -> mkM3' m' (\bm k1 -> error "urk") -- But if we give mkM3' the type -- forall a r. M3' a -> (forall b. ...) -> r --- everthing works fine. Very very delicate. +-- everything works fine. Very very delicate. ---------------- A more complex case ------------- bind :: M3 a -> (a -> M3 b) -> M3 b diff --git a/tests/suite/typecheck/compile/tc165.hs b/tests/suite/typecheck/compile/tc165.hs index 0533c80c..233c677b 100644 --- a/tests/suite/typecheck/compile/tc165.hs +++ b/tests/suite/typecheck/compile/tc165.hs @@ -2,7 +2,7 @@ {-# OPTIONS_GHC -dcore-lint #-} -- Fails GHC 5.04.2 with -dcore-lint --- The issue ariseswhen you have a method that +-- The issue arises when you have a method that -- constrains a class variable module Test where diff --git a/tests/suite/typecheck/compile/tc196.hs b/tests/suite/typecheck/compile/tc196.hs index c34d5e7e..bf72724f 100644 --- a/tests/suite/typecheck/compile/tc196.hs +++ b/tests/suite/typecheck/compile/tc196.hs @@ -1,6 +1,6 @@ -- Test the refined dependency analysis of bindings --- with -fglagow-exts +-- with -fglasgow-exts module ShouldCompile where diff --git a/tests/suite/typecheck/compile/tc199.hs b/tests/suite/typecheck/compile/tc199.hs index 4e65358a..73df16f6 100644 --- a/tests/suite/typecheck/compile/tc199.hs +++ b/tests/suite/typecheck/compile/tc199.hs @@ -5,7 +5,7 @@ -- Hence needing AllowAmbiguousTypes -- -- However, arguably the instance declaration should be accepted, --- beause it's equivalent to +-- because it's equivalent to -- instance Baz Int Int where { foo x = x } -- which *does* typecheck diff --git a/tests/suite/typecheck/compile/tc222.hs b/tests/suite/typecheck/compile/tc222.hs index 4c5717e5..c5cadf4d 100644 --- a/tests/suite/typecheck/compile/tc222.hs +++ b/tests/suite/typecheck/compile/tc222.hs @@ -1,6 +1,6 @@ {-# LANGUAGE ImplicitParams, RankNTypes #-} --- Tests impredivative polymorphism with left-to-right +-- Tests impredicative polymorphism with left-to-right -- flow information; see the uses of "$" module TestIP where diff --git a/tests/suite/typecheck/compile/tcfail105.hs b/tests/suite/typecheck/compile/tcfail105.hs index b0efc5b4..68d88b89 100644 --- a/tests/suite/typecheck/compile/tcfail105.hs +++ b/tests/suite/typecheck/compile/tcfail105.hs @@ -2,7 +2,7 @@ -- Existential context should quantify over some new type variables -- --- Jan07: this is now fine, because we've lifted the restrction +-- Jan07: this is now fine, because we've lifted the restriction -- that the context on a constructor should mention -- existential type variables diff --git a/tests/suite/typecheck/compile/tcfail138.hs b/tests/suite/typecheck/compile/tcfail138.hs index 067b1388..e58b506b 100644 --- a/tests/suite/typecheck/compile/tcfail138.hs +++ b/tests/suite/typecheck/compile/tcfail138.hs @@ -3,7 +3,7 @@ FlexibleInstances, UndecidableInstances #-} -- UndecidableInstances because (L a b) is no smaller than (C a b) --- This one shows up another rather subtle functional-dependecy +-- This one shows up another rather subtle functional-dependency -- case. The error is: -- -- Could not deduce (C a b') from the context (C a b) diff --git a/tests/suite/typecheck/fail/tcfail129.hs b/tests/suite/typecheck/fail/tcfail129.hs index 1a74749f..271bd527 100644 --- a/tests/suite/typecheck/fail/tcfail129.hs +++ b/tests/suite/typecheck/fail/tcfail129.hs @@ -1,6 +1,6 @@ -- Both blocks are illegal Haskell 98, because of the un-saturated -- type synonym, but (rather obscurely) at one point (GHC 6.3), we --- acceped 'blah', but rejected 'blah1' +-- accepted 'blah', but rejected 'blah1' module ShouldFail where diff --git a/tests/verify/verify.sh b/tests/verify/verify.sh index 15bb2718..a4c77865 100755 --- a/tests/verify/verify.sh +++ b/tests/verify/verify.sh @@ -17,7 +17,7 @@ echo "=== Eta Compiler Output ===" mkdir build eta -fforce-recomp -o build/Out.jar Main.hs echo "=== ===" -echo "Compiled succesfully." +echo "Compiled successfully." # Do bytecode verification on all the core libraries' class files echo "Verifying the bytecode of compiled program..." diff --git a/tests/verify/win-verify.cmd b/tests/verify/win-verify.cmd index aa7820cb..91912adf 100644 --- a/tests/verify/win-verify.cmd +++ b/tests/verify/win-verify.cmd @@ -14,7 +14,7 @@ echo === Eta Compiler Output === mkdir build eta -fforce-recomp -o build/Out.jar Main.hs || exit /b echo === === -echo Compiled succesfully. +echo Compiled successfully. :: Do bytecode verification on all the core libraries' class files echo Verifying the bytecode of compiled program... diff --git a/utils/eta-pkg/Main.hs b/utils/eta-pkg/Main.hs index 39297b9a..b83e9358 100644 --- a/utils/eta-pkg/Main.hs +++ b/utils/eta-pkg/Main.hs @@ -770,7 +770,7 @@ readParseDatabase :: forall mode t. Verbosity -> FilePath -> IO (PackageDB mode) readParseDatabase verbosity mb_user_conf mode use_cache path - -- the user database (only) is allowed to be non-existent + -- the user database (only) is allowed to be nonexistent | Just (user_conf,False) <- mb_user_conf, path == user_conf = do lock <- F.forM mode $ \_ -> do createDirectoryIfMissing True path @@ -1894,7 +1894,7 @@ checkModule fieldName db_stack pkg else PackageIndex.lookupUnitId ipix definingPkgId in case mpkg of Nothing - -> verror ForceAll (fieldName ++ " refers to a non-existent " ++ + -> verror ForceAll (fieldName ++ " refers to a nonexistent " ++ "defining package: " ++ display definingPkgId)