Categorize o-1/o-n benchmarks correctly

This commit is contained in:
Harendra Kumar 2023-03-09 16:21:05 +05:30
parent 86cc020aa2
commit 35d888c2f9
10 changed files with 65 additions and 42 deletions

View File

@ -24,14 +24,12 @@
- ignore: {name: "Use uncurry"}
- ignore: {name: "Redundant $!"}
- ignore: {name: "Use fmap"}
- ignore: {name: "Use ++"}
# Warnings ignored in specific places
- ignore: {name: "Use ++", within: Stream.Transform}
- ignore: {name: "Use mapM", within: Stream.Transform}
- ignore: {name: "Use traverse", within: Stream.Transform}
- ignore: {name: "Redundant <*", within: Stream.Expand}
- ignore: {name: "Use ++", within: Stream.Reduce}
- ignore: {name: "Use ++", within: Stream.Split}
- ignore: {name: "Redundant bracket", within: Stream.Split}
- ignore: {name: "Use isDigit", within: Streamly.Internal.Unicode.Parser}

View File

@ -306,7 +306,6 @@ o_1_space_serial_elimination value =
, benchIOSink value "drainBy" (Stream.fold (FL.drainBy return))
, benchIOSink value "drainN" (Stream.fold (FL.drainN value))
, benchIOSink value "last" (Stream.fold FL.last)
, benchIOSink value "nub" (Stream.fold FL.nub)
, benchIOSink value "length" (Stream.fold FL.length)
, benchIOSink value "top" (Stream.fold $ FL.top 10)
, benchIOSink value "bottom" (Stream.fold $ FL.bottom 10)
@ -449,6 +448,7 @@ o_n_heap_serial value =
, benchIOSink value "toStreamRev"
(Stream.fold FL.toStreamRev
:: Stream IO a -> IO (Stream Identity a))
, benchIOSink value "nub" (Stream.fold FL.nub)
]
, bgroup "key-value"
[

View File

@ -12,6 +12,7 @@ import qualified Streamly.Internal.Data.Ring.Unboxed as Ring
import qualified Streamly.Internal.Data.Fold.Window as Window
import qualified Streamly.Internal.Data.Stream.StreamD as Stream
import Streamly.Benchmark.Common
import Gauge
{-# INLINE source #-}
@ -65,15 +66,9 @@ benchScanWith src len name f =
benchWithPostscan :: Int -> String -> Fold IO Double a -> Benchmark
benchWithPostscan = benchScanWith source
{-# INLINE numElements #-}
numElements :: Int
numElements = 100000
main :: IO ()
main =
defaultMain
[ bgroup
"fold"
o_1_space_folds :: Int -> [Benchmark]
o_1_space_folds numElements =
[ bgroup "fold"
[ benchWithFold numElements "minimum (window size 100)"
(Window.minimum 100)
, benchWithFold numElements "minimum (window size 1000)"
@ -126,8 +121,11 @@ main =
(Window.cumulative (Window.powerSum 2))
]
, bgroup
"scan"
]
o_1_space_scans :: Int -> [Benchmark]
o_1_space_scans numElements =
[ bgroup "scan"
[ benchWithPostscan numElements "minimum (window size 10)"
(Window.minimum 10)
-- Below window size 30 the linear search based impl performs better
@ -176,3 +174,18 @@ main =
(Ring.slidingWindow 1000 (Window.powerSum 2))
]
]
moduleName :: String
moduleName = "Data.Fold.Window"
main :: IO ()
main = runWithCLIOpts defaultStreamSize allBenchmarks
where
allBenchmarks value =
[ bgroup (o_1_space_prefix moduleName) $ concat
[ o_1_space_folds value
, o_1_space_scans value
]
]

View File

@ -723,14 +723,11 @@ o_1_space_serial value =
, benchIOSink value "dropWhile" $ dropWhile value
, benchIOSink value "takeStartBy" $ takeStartBy value
, benchIOSink value "takeEndBy_" $ takeEndBy_ value
, benchIOSrc sourceEscapedFrames value "takeFramedByEsc_"
$ takeFramedByEsc_ value
, benchIOSink value "groupBy" $ groupBy
, benchIOSink value "groupByRolling" $ groupByRolling
, benchIOSink value "wordBy" $ wordBy value
, benchIOSink value "sepBy (words)" sepByWords
, benchIOSink value "sepByAll (words)" sepByAllWords
, benchIOSink value "sepBy1" sepBy1
, benchIOSink value "sepBy1 (words)" sepByWords1
, benchIOSink value "deintercalate" $ deintercalate value
, benchIOSink value "deintercalate1" $ deintercalate1 value
@ -748,7 +745,6 @@ o_1_space_serial value =
, benchIOSink value "monad2" $ monad value
, benchIOSink value "monad4" $ monad4 value
, benchIOSink value "monad8" $ monad8 value
, benchIOSink value "monad16" $ monad16 value
-- Alternative
, benchIOSink value "alt2parseMany" $ altSmall value
, benchIOSink value "alt2" $ alt2 value
@ -783,7 +779,6 @@ o_1_space_serial value =
, benchIOSink value "shortest" $ shortestAllAny value
, benchIOSink value "longest" $ longestAllAny value
-}
, benchIOSink value "listEqBy" (listEqBy value)
, benchIOSink value "streamEqBy" (streamEqBy value)
]
@ -821,13 +816,21 @@ o_n_heap_serial value =
-- lookahead benchmark holds the entire input till end
benchIOSink value "lookAhead" $ lookAhead value
-- o-n-heap because of backtracking
, benchIOSrc sourceEscapedFrames value "takeFramedByEsc_"
$ takeFramedByEsc_ value
-- non-linear time complexity (parserD)
, benchIOSink value "split_" $ split_ value
-- XXX Takes lot of space when run on a long stream, why?
, benchIOSink value "monad16" $ monad16 value
-- These show non-linear time complexity.
-- They accumulate the results in a list.
, benchIOSink value "sepBy1" sepBy1
, benchIOSink value "manyAlt" manyAlt
, benchIOSink value "someAlt" someAlt
, benchIOSink value "listEqBy" (listEqBy value)
]
-- accumulate results in a list in IO

View File

@ -17,4 +17,4 @@ moduleName = "Data.Stream.Concurrent"
-------------------------------------------------------------------------------
main :: IO ()
main = runWithCLIOpts defaultStreamSize (allBenchmarks moduleName id)
main = runWithCLIOpts defaultStreamSize (allBenchmarks moduleName False id)

View File

@ -193,15 +193,16 @@ o_1_space_outerProduct value f =
-- Benchmark sets
-------------------------------------------------------------------------------
allBenchmarks :: String -> (Config -> Config) -> Int -> [Benchmark]
allBenchmarks moduleName modifier value =
allBenchmarks :: String -> Bool -> (Config -> Config) -> Int -> [Benchmark]
allBenchmarks moduleName wide modifier value =
[ bgroup (o_1_space_prefix moduleName) $ concat
[ o_1_space_mapping value modifier
, o_1_space_concatFoldable value modifier
, o_1_space_concatMap value modifier
, o_1_space_outerProduct value modifier
, o_1_space_joining value modifier
] ++ if wide then [] else o_1_space_outerProduct value modifier
, bgroup (o_n_heap_prefix moduleName) $ concat
[ if wide then o_1_space_outerProduct value modifier else []
, o_n_heap_buffering value modifier
]
, bgroup (o_n_heap_prefix moduleName)
(o_n_heap_buffering value modifier)
]

View File

@ -22,4 +22,4 @@ main :: IO ()
main =
runWithCLIOpts
defaultStreamSize
(allBenchmarks moduleName (Async.eager True))
(allBenchmarks moduleName True (Async.eager True))

View File

@ -22,4 +22,4 @@ main :: IO ()
main =
runWithCLIOpts
defaultStreamSize
(allBenchmarks moduleName (Async.interleaved True))
(allBenchmarks moduleName True (Async.interleaved True))

View File

@ -22,4 +22,4 @@ main :: IO ()
main =
runWithCLIOpts
defaultStreamSize
(allBenchmarks moduleName (Async.ordered True))
(allBenchmarks moduleName False (Async.ordered True))

View File

@ -47,6 +47,16 @@ rtsOpts exeName benchName0 = unwords [general, exeSpecific, benchSpecific]
| "Prelude.WAsync/o-n-space.monad-outer-product." `isPrefixOf` benchName =
"-K4M"
-- This module is dev only now, and can be removed at some point
| "Data.Stream.StreamDK/o-1-space.grouping.classifySessionsOf"
`isPrefixOf` benchName = "-K512K"
| "Data.Stream.StreamDK/o-n-space.foldr.foldrM/"
`isPrefixOf` benchName = "-K4M"
| "Data.Stream.StreamDK/o-n-space.iterated."
`isPrefixOf` benchName = "-K4M -M64M"
| "Data.Stream.StreamDK/o-n-space.traversable."
`isPrefixOf` benchName = "-K2M"
-----------------------------------------------------------------------
| "Data.Stream.StreamD/o-n-space.elimination.toList" == benchName =
@ -63,19 +73,7 @@ rtsOpts exeName benchName0 = unwords [general, exeSpecific, benchSpecific]
| "Data.Stream/o-n-space.iterated."
`isPrefixOf` benchName = "-K4M"
| "Data.Stream.StreamDK/o-1-space.grouping.classifySessionsOf"
`isPrefixOf` benchName = "-K512K"
| "Data.Stream.StreamDK/o-n-space.foldr.foldrM/"
`isPrefixOf` benchName = "-K4M"
| "Data.Stream.StreamDK/o-n-space.iterated."
`isPrefixOf` benchName = "-K4M -M64M"
| "Data.Stream.StreamDK/o-n-space.traversable."
`isPrefixOf` benchName = "-K2M"
| "Data.Stream.ConcurrentInterleaved/o-1-space.monad-outer-product.toNullAp"
`isPrefixOf` benchName = "-M32M"
| "Data.Stream.ConcurrentEager/o-1-space.monad-outer-product.toNullAp"
| "Data.Stream.ConcurrentEager/o-n-space.monad-outer-product.toNullAp"
`isPrefixOf` benchName = "-M768M"
| "Data.Stream.ConcurrentEager/o-1-space."
`isPrefixOf` benchName = "-M128M"
@ -89,6 +87,16 @@ rtsOpts exeName benchName0 = unwords [general, exeSpecific, benchSpecific]
| "Data.Array.Generic/o-1-space.transformationX4.map"
`isPrefixOf` benchName = "-M32M"
-- XXX For --long option, need to check why so much heap is required.
-- Note, if we remove the chunked stream module we need to keep the
-- chunked stream benchmarks in the stream module.
| "Data.Array.Stream/o-1-space"
`isPrefixOf` benchName = "-K4M -M512M"
-- XXX Takes up to 160MB heap for --long, we use chunked stream for
-- this, so the reason may be related to chunked streams.
| "Data.Parser.ParserK/o-1-space"
`isPrefixOf` benchName = "-K4M -M256M"
-----------------------------------------------------------------------
| "Unicode.Char/o-1-space." `isPrefixOf` benchName = "-M32M"