diff --git a/libs/contrib/Text/Distance/Levenshtein.idr b/libs/contrib/Text/Distance/Levenshtein.idr index 83102786e0..903bb51dcc 100644 --- a/libs/contrib/Text/Distance/Levenshtein.idr +++ b/libs/contrib/Text/Distance/Levenshtein.idr @@ -10,7 +10,7 @@ import Data.List.Extra ||| Self-evidently correct but O(3 ^ (min mn)) complexity spec : String -> String -> Nat -spec a b = loop (fastUnpack a) (fastUnpack b) where +spec a b = loop (unpack a) (unpack b) where loop : List Char -> List Char -> Nat loop [] ys = length ys -- deletions diff --git a/libs/contrib/Text/Lexer/Core.idr b/libs/contrib/Text/Lexer/Core.idr index be91d372c4..6fde0660b9 100644 --- a/libs/contrib/Text/Lexer/Core.idr +++ b/libs/contrib/Text/Lexer/Core.idr @@ -159,7 +159,7 @@ tokenise pred line col acc tmap str Just (tok, rest) => let line' = line + cast (countNLs tok) col' = getCols tok col in - Just (MkBounded (fn (fastPack (reverse tok))) False (MkBounds line col line' col'), + Just (MkBounded (fn (pack (reverse tok))) False (MkBounds line col line' col'), line', col', rest) Nothing => getFirstToken ts str @@ -171,11 +171,11 @@ export lex : TokenMap a -> String -> (List (WithBounds a), (Int, Int, String)) lex tmap str = let (ts, (l, c, str')) = tokenise (const False) 0 0 [] tmap (unpack str) in - (ts, (l, c, fastPack str')) + (ts, (l, c, pack str')) export lexTo : (a -> Bool) -> TokenMap a -> String -> (List (WithBounds a), (Int, Int, String)) lexTo pred tmap str = let (ts, (l, c, str')) = tokenise pred 0 0 [] tmap (unpack str) in - (ts, (l, c, fastPack str')) + (ts, (l, c, pack str')) diff --git a/libs/contrib/Text/Lexer/Tokenizer.idr b/libs/contrib/Text/Lexer/Tokenizer.idr index f9d9bc8169..5fb4bb287b 100644 --- a/libs/contrib/Text/Lexer/Tokenizer.idr +++ b/libs/contrib/Text/Lexer/Tokenizer.idr @@ -110,7 +110,7 @@ tokenise reject tokenizer line col acc str | _ => Nothing line' = line + cast (countNLs token) col' = getCols token col - tokenStr = fastPack $ reverse token + tokenStr = pack $ reverse token in pure (tokenStr, line', col', rest) getFirstMatch : Tokenizer a -> List Char -> @@ -149,8 +149,8 @@ lexTo : Lexer -> (List (WithBounds a), (StopReason, Int, Int, String)) lexTo reject tokenizer str = let (ts, reason, (l, c, str')) = - tokenise reject tokenizer 0 0 [] (fastUnpack str) in - (ts, reason, (l, c, fastPack str')) + tokenise reject tokenizer 0 0 [] (unpack str) in + (ts, reason, (l, c, pack str')) ||| Given a tokenizer and an input string, return a list of recognised tokens, ||| and the line, column, and remainder of the input at the first point in the string diff --git a/libs/contrib/Text/Literate.idr b/libs/contrib/Text/Literate.idr index ca52b9ca31..eecf5fb6ae 100644 --- a/libs/contrib/Text/Literate.idr +++ b/libs/contrib/Text/Literate.idr @@ -78,7 +78,7 @@ namespace Compat ||| Merge the tokens into a single source file. reduce : List (WithBounds Token) -> List String -> String -reduce [] acc = fastConcat (reverse acc) +reduce [] acc = concat (reverse acc) reduce (MkBounded (Any x) _ _ :: rest) acc = -- newline will always be tokenized as a single token if x == "\n" diff --git a/libs/contrib/Text/PrettyPrint/Prettyprinter/Render/HTML.idr b/libs/contrib/Text/PrettyPrint/Prettyprinter/Render/HTML.idr index e772b79e9e..c457474c5f 100644 --- a/libs/contrib/Text/PrettyPrint/Prettyprinter/Render/HTML.idr +++ b/libs/contrib/Text/PrettyPrint/Prettyprinter/Render/HTML.idr @@ -6,7 +6,7 @@ import Data.String export htmlEscape : String -> String -htmlEscape s = fastConcat $ reverse $ go [] s +htmlEscape s = concat $ reverse $ go [] s where isSafe : Char -> Bool isSafe '"' = False