Euler problem 17.2

import Data.Char ( digitToInt )

one :: [String]
one =
  [ "one",
    "two",
    "three",
    "four",
    "five",
    "six",
    "seven",
    "eight",
    "nine",
    "ten",
    "eleven",
    "twelve",
    "thirteen",
    "fourteen",
    "fifteen",
    "sixteen",
    "seventeen",
    "eighteen",
    "nineteen"
  ]

ty :: [String]
ty =
  [ "twenty",
    "thirty",
    "forty",
    "fifty",
    "sixty",
    "seventy",
    "eighty",
    "ninety"
  ]

english :: Int -> [Char]
english x
  | x == 0 = []
  | x < 20 = one !! (x - 1)
  | x >= 20 && x < 100 =
      ty !! (firstDigit x - 2)
        ++ " "
        ++ english (x - firstDigit x * 10)
        
  | x < 1000 && x `mod` 100 == 0 =
      one !! (firstDigit x - 1)
        ++ " hundred"
      
  | x > 100 && x <= 999 =
      one !! (firstDigit x - 1)
        ++ " hundred and "
        ++ english (x - firstDigit x * 100)
        
  | x == 1000 = "one thousand"
  | otherwise = "error"
  where
    firstDigit = digitToInt . head . show

removeSpace :: [Char] -> [Char]
removeSpace = filter (`notElem` " ")

engCat :: [Int] -> [Char]
engCat = concatMap english

e17 :: Int
e17 = length . removeSpace $ engCat [1..1000]

— based on Haskell official

λ> e17
21124

— colorized by palette fm

— Me@2023-11-07 11:24:03 PM

.

.

2023.11.08 Wednesday (c) All rights reserved by ACHK