Skip to content

Updating HLint to require using 'pure' instead of 'return' #720

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .hlint.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,12 @@
#
# Will suggest replacing "wibbleMany [myvar]" with "wibbleOne myvar"
# - error: {lhs: "wibbleMany [x]", rhs: wibbleOne x}
- error:
{ note: Prefer pure to reduce Monad constraint
, lhs: return x
, rhs: pure x
}

- error:
name: "Use mkPrimVector"
lhs: "Data.Vector.Primitive.Vector"
Expand Down
6 changes: 3 additions & 3 deletions bench/macro/lsm-tree-bench-bloomfilter.hs
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ benchmarks = do
0
#endif

return ()
pure ()

type Alloc = Int

Expand Down Expand Up @@ -178,7 +178,7 @@ benchmark name description action n (subtractTime, subtractAlloc) expectedAlloc
(truncate allocPerKey :: Int)

putStrLn ""
return (timeNet, allocNet)
pure (timeNet, allocNet)

-- | (numEntries, sizeFactor, numBits, numHashFuncs)
type BloomFilterSizeInfo = (Integer, Integer, Integer, Integer)
Expand Down Expand Up @@ -252,7 +252,7 @@ elemManyEnv filterSizes rng0 =
let k :: Word256
(!k, !rng') = uniform rng
MBloom.insert mb (serialiseKey k)
return rng'
pure rng'
)
rng0
(zip [0 .. totalNumEntries filterSizes - 1]
Expand Down
2 changes: 1 addition & 1 deletion bench/macro/lsm-tree-bench-lookups.hs
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ benchmark name description action n (subtractTime, subtractAlloc) = do
printStat "Alloc net per key: " allocPerKey "bytes"

putStrLn ""
return (timeNet, allocNet)
pure (timeNet, allocNet)

-- | (numEntries, sizeFactor)
type RunSizeInfo = (Int, Int)
Expand Down
18 changes: 9 additions & 9 deletions bench/macro/lsm-tree-bench-wp8.hs
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ makeKey seed =
case v of
P.MutablePrimArray mba -> do
_ <- P.resizeMutableByteArray (P.MutableByteArray mba) 34
return v
pure v

of (P.PrimArray ba :: P.PrimArray Word64) ->
byteArrayToSBS (P.ByteArray ba)
Expand Down Expand Up @@ -262,7 +262,7 @@ timed action = do
printf "Running time: %.03f sec\n" t
printf "/proc/self/io after vs. before: %s\n" (ppShow p)
printf "RTSStats after vs. before: %s\n" (ppShow s)
return (x, t, s, p)
pure (x, t, s, p)

timed_ :: IO () -> IO (Double, RTSStatsDiff Triple, ProcIODiff)
timed_ action = do
Expand Down Expand Up @@ -479,7 +479,7 @@ doDryRun' gopts opts = do
let (batch1, batch2) = toOperations lookups inserts
_ <- evaluate $ force (batch1, batch2)

return g'
pure g'

when (check opts) $ do
duplicates <- readIORef duplicateRef
Expand Down Expand Up @@ -589,7 +589,7 @@ doRun gopts opts = do
checkvar <- newIORef $ pureReference
(initialSize gopts) (batchSize opts)
(batchCount opts) (seed opts)
let fcheck | not (check opts) = \_ _ -> return ()
let fcheck | not (check opts) = \_ _ -> pure ()
| otherwise = \b y -> do
(x:xs) <- readIORef checkvar
unless (x == y) $
Expand Down Expand Up @@ -642,7 +642,7 @@ sequentialIteration h output !initialSize !batchSize !tbl !b !g =
_ <- timeLatency tref $ LSM.updates tbl is

-- continue to the next batch
return g'
pure g'


sequentialIterations :: LatencyHandle
Expand Down Expand Up @@ -674,7 +674,7 @@ sequentialIterationLO output !initialSize !batchSize !tbl !b !g = do
output b (V.zip ls (fmap (fmap (const ())) results))

-- continue to the next batch
return g'
pure g'

sequentialIterationsLO :: (Int -> LookupResults -> IO ())
-> Int -> Int -> Int -> Word64
Expand Down Expand Up @@ -780,7 +780,7 @@ pipelinedIteration h output !initialSize !batchSize
!delta' = Map.fromList (V.toList is)
putMVar syncTblOut (tbl_n2, delta')

return tbl_n2
pure tbl_n2
where
applyUpdates :: Map K (LSM.Update V a)
-> V.Vector (K, LSM.LookupResult V b)
Expand Down Expand Up @@ -839,7 +839,7 @@ pipelinedIterations h output !initialSize !batchSize !batchCount !seed tbl_0 = d
-- If run with +RTS -N2 then we'll put each thread on a separate core.
withAsyncOn 0 threadA $ \ta ->
withAsyncOn 1 threadB $ \tb ->
waitBoth ta tb >> return ()
waitBoth ta tb >> pure ()

-------------------------------------------------------------------------------
-- Testing
Expand Down Expand Up @@ -1020,7 +1020,7 @@ main = do
forFoldM_ :: Monad m => s -> [a] -> (a -> s -> m s) -> m s
forFoldM_ !s0 xs0 f = go s0 xs0
where
go !s [] = return s
go !s [] = pure s
go !s (x:xs) = do
!s' <- f x s
go s' xs
Expand Down
10 changes: 5 additions & 5 deletions bench/macro/rocksdb-bench-wp8.hs
Original file line number Diff line number Diff line change
Expand Up @@ -156,14 +156,14 @@ timed action = do
x <- action
t2 <- Clock.getTime Clock.Monotonic
let !t = fromIntegral (Clock.toNanoSecs (Clock.diffTimeSpec t2 t1)) * 1e-9
return (x, t)
pure (x, t)

timed_ :: IO () -> IO Double
timed_ action = do
t1 <- Clock.getTime Clock.Monotonic
action
t2 <- Clock.getTime Clock.Monotonic
return $! fromIntegral (Clock.toNanoSecs (Clock.diffTimeSpec t2 t1)) * 1e-9
pure $! fromIntegral (Clock.toNanoSecs (Clock.diffTimeSpec t2 t1)) * 1e-9

-------------------------------------------------------------------------------
-- setup
Expand Down Expand Up @@ -270,7 +270,7 @@ doDryRun' gopts opts = do
let k = makeKey k'
evaluate k >> evaluate (makeValue k)

return nextG
pure nextG

when (check opts) $ do
duplicates <- readIORef duplicateRef
Expand Down Expand Up @@ -376,7 +376,7 @@ doRun' gopts opts =

RocksDB.write db wopts batch

return nextG
pure nextG
where
initGen = MCG.make
(fromIntegral $ initialSize gopts + batchSize opts * batchCount opts)
Expand Down Expand Up @@ -409,7 +409,7 @@ main = do
-------------------------------------------------------------------------------

forFoldM_ :: Monad m => s -> [a] -> (a -> s -> m s) -> m s
forFoldM_ !s [] _ = return s
forFoldM_ !s [] _ = pure s
forFoldM_ !s (x:xs) f = do
!s' <- f x s
forFoldM_ s' xs f
Expand Down
6 changes: 3 additions & 3 deletions bench/micro/Bench/Database/LSMTree/Internal/Index.hs
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,13 @@ benchmarks = bgroup "Bench.Database.LSMTree.Internal.Index" $
= bgroup (indexTypeName ++ " index") $
[
-- Search
env (return $ searchIndex indexType 10000) $ \ index ->
env (return $ searchKeys 1000) $ \ keys ->
env (pure $ searchIndex indexType 10000) $ \ index ->
env (pure $ searchKeys 1000) $ \ keys ->
bench "Search" $
searchBenchmarkable index keys,

-- Incremental construction
env (return $ incrementalConstructionAppends 10000) $ \ appends ->
env (pure $ incrementalConstructionAppends 10000) $ \ appends ->
bench "Incremental construction" $
incrementalConstructionBenchmarkable indexType appends
]
Expand Down
6 changes: 3 additions & 3 deletions blockio/src-sim/System/FS/BlockIO/Sim.hs
Original file line number Diff line number Diff line change
Expand Up @@ -82,14 +82,14 @@ simTryLockFile hfs path lockmode =
mkLockFileHandle
ExclusiveLock | n == 0 -> do writeCount h (-1)
mkLockFileHandle
_ -> return Nothing
_ -> pure Nothing
where
mkLockFileHandle = do
-- A lock file handle keeps open the file in read mode, such that a locked
-- file contributes to the number of open file handles. The mock FS allows
-- multiple readers and up to one writer to open the file concurrently.
h <- API.hOpen hfs path ReadMode
return (Just (LockFileHandle { hUnlock = hUnlock h }))
pure (Just (LockFileHandle { hUnlock = hUnlock h }))

hUnlock h0 =
API.withFile hfs path (ReadWriteMode AllowExisting) $ \h -> do
Expand All @@ -112,7 +112,7 @@ simTryLockFile hfs path lockmode =
writeCount h n = do
API.hSeek hfs h AbsoluteSeek 0
_ <- API.hPutAllStrict hfs h (BS.pack (show n))
return ()
pure ()

countCorrupt =
FsError {
Expand Down
2 changes: 1 addition & 1 deletion blockio/src/System/FS/BlockIO/API.hs
Original file line number Diff line number Diff line change
Expand Up @@ -321,7 +321,7 @@ rethrowFsErrorIO hfs fp action = do
res <- try action
case res of
Left err -> handleError err
Right a -> return a
Right a -> pure a
where
handleError :: HasCallStack => IOError -> IO a
handleError ioErr =
Expand Down
2 changes: 1 addition & 1 deletion bloomfilter/examples/spell.hs
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import Prelude hiding (notElem)
main :: IO ()
main = do
files <- getArgs
dictionary <- readFile "/usr/share/dict/words" `catchIO` \_ -> return "yes no"
dictionary <- readFile "/usr/share/dict/words" `catchIO` \_ -> pure "yes no"
let !bloom = easyList 0.01 (words dictionary)
forM_ files $ \file -> do
ws <- words <$> readFile file
Expand Down
4 changes: 2 additions & 2 deletions bloomfilter/src/Data/BloomFilter.hs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ empty :: Int -- ^ number of hash functions to use
-> Word64 -- ^ number of bits in filter
-> Bloom' h a
{-# INLINE [1] empty #-}
empty hash numBits = create hash numBits (\_ -> return ())
empty hash numBits = create hash numBits (\_ -> pure ())

-- | Create a Bloom filter with a single element.
singleton :: (Hashes h, Hashable a)
Expand Down Expand Up @@ -195,7 +195,7 @@ unfold hs numBits f k = create hs numBits (loop k)
where loop :: forall s. b -> MBloom' s h a -> ST s ()
loop j mb = case f j of
Just (a, j') -> insert mb a >> loop j' mb
_ -> return ()
_ -> pure ()

-- | Create an immutable Bloom filter, populating it from a list of
-- values.
Expand Down
4 changes: 2 additions & 2 deletions bloomfilter/src/Data/BloomFilter/BitVec64.hs
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ new s
| numWords >= 128 = do
mba <- newPinnedByteArray numBytes
setByteArray mba 0 numBytes (0 :: Word8)
return (MBV64 (VP.MVector 0 numWords mba))
pure (MBV64 (VP.MVector 0 numWords mba))
| otherwise =
MBV64 <$> VPM.new numWords
where
Expand All @@ -90,7 +90,7 @@ unsafeWrite (MBV64 mbv) i x = do
unsafeRead :: MBitVec64 s -> Word64 -> ST s Bool
unsafeRead (MBV64 mbv) i = do
!w <- VPM.unsafeRead mbv (w2i j)
return $! testBit w (w2i k)
pure $! testBit w (w2i k)
where
!j = unsafeShiftR i 6 -- `div` 64
!k = i .&. 63 -- `mod` 64
Expand Down
6 changes: 3 additions & 3 deletions bloomfilter/src/Data/BloomFilter/Mutable.hs
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ insert !mb !x = insertHashes mb (makeHashes x)
insertHashes :: Hashes h => MBloom' s h a -> h a -> ST s ()
insertHashes (MBloom k m v) !h = go 0
where
go !i | i >= k = return ()
go !i | i >= k = pure ()
| otherwise = let !idx = evalHashes h i `rem` m
in V.unsafeWrite v idx True >> go (i + 1)

Expand All @@ -90,13 +90,13 @@ elem elt mb = elemHashes (makeHashes elt) mb
elemHashes :: forall h s a. Hashes h => h a -> MBloom' s h a -> ST s Bool
elemHashes !ch (MBloom k m v) = go 0 where
go :: Int -> ST s Bool
go !i | i >= k = return True
go !i | i >= k = pure True
| otherwise = do let !idx' = evalHashes ch i
let !idx = idx' `rem` m
b <- V.unsafeRead v idx
if b
then go (i + 1)
else return False
else pure False

-- | Return the size of a mutable Bloom filter, in bits.
length :: MBloom' s h a -> Word64
Expand Down
4 changes: 2 additions & 2 deletions prototypes/FormatPage.hs
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ deserialisePage dpgsz p =
sizeKeys sizeValues

assert (offsetKeyOffsets == offKeyOffsets pageSizesOffsets) $
return PageIntermediate {
pure PageIntermediate {
pageNumKeys = fromIntegral pageNumKeys,
pageNumBlobs = fromIntegral pageNumBlobs,
pageDiskPageSize = dpgsz,
Expand Down Expand Up @@ -700,7 +700,7 @@ genPageContentOverfull dpgsz genkey genval =
kop <- genPageContentSingleSmall genkey genval
case pageSizeAddElem kop sz of
-- include as the /first/ element, the one that will make it overfull:
Nothing -> return (kop:kops) -- not reversed!
Nothing -> pure (kop:kops) -- not reversed!
Just sz' -> go (kop:kops) sz'

genPageContentLargeSmallFits :: DiskPageSize
Expand Down
Loading