Rename arraysOf to chunksOf

This commit is contained in:
Adithya Kumar 2023-03-06 15:47:34 +05:30
parent ba0d42aec0
commit b5c79693fb
29 changed files with 76 additions and 78 deletions

View File

@ -295,8 +295,8 @@ main = do
then return (undefined, undefined)
else
do
small <- Stream.toList $ Stream.arraysOf 100 $ sourceUnfoldrM value 0
big <- Stream.toList $ Stream.arraysOf value $ sourceUnfoldrM value 0
small <- Stream.toList $ Stream.chunksOf 100 $ sourceUnfoldrM value 0
big <- Stream.toList $ Stream.chunksOf value $ sourceUnfoldrM value 0
return (small, big)
allBenchmarks env arrays value =

View File

@ -853,7 +853,7 @@ main = do
where
alloc value = Stream.fold Fold.toList $ Stream.arraysOf 100 $ sourceUnfoldrM value 0
alloc value = Stream.fold Fold.toList $ Stream.chunksOf 100 $ sourceUnfoldrM value 0
allBenchmarks env arrays value =
[ bgroup (o_1_space_prefix moduleName) (o_1_space_serial value)

View File

@ -65,7 +65,7 @@ benchIOSink value name f =
bench name $ nfIO $ randomRIO (1,1)
>>= f
. StreamK.fromStream
. Stream.arraysOf 4000
. Stream.chunksOf 4000
. sourceUnfoldrM value
-------------------------------------------------------------------------------

View File

@ -199,7 +199,7 @@ o_1_space_grouping value =
,
#endif
-- XXX parseMany/parseIterate benchmarks are in the Parser/ParserD
-- modules we can bring those here. arraysOf benchmarks are in
-- modules we can bring those here. chunksOf benchmarks are in
-- Parser/ParserD/Array.Stream/FileSystem.Handle.
benchIOSink value "foldMany" foldMany
, benchIOSink value "foldManyPost" foldManyPost

View File

@ -247,10 +247,10 @@ inspect $ 'groupsOf `hasNoType` ''AT.ArrayUnsafe -- AT.writeNUnsafe
inspect $ 'groupsOf `hasNoType` ''IUF.ConcatState -- FH.read/UF.many
#endif
{-# INLINE arraysOf #-}
arraysOf :: Int -> Handle -> IO Int
arraysOf n inh =
S.fold Fold.length $ Stream.arraysOf n (S.unfold FH.reader inh)
{-# INLINE chunksOf #-}
chunksOf :: Int -> Handle -> IO Int
chunksOf n inh =
S.fold Fold.length $ Stream.chunksOf n (S.unfold FH.reader inh)
o_1_space_reduce_read_grouped :: BenchEnv -> [Benchmark]
o_1_space_reduce_read_grouped env =
@ -288,13 +288,13 @@ o_1_space_reduce_read_grouped env =
, mkBench "S.groupsOf 1000" env $ \inh _ ->
groupsOf 1000 inh
-- arraysOf may use a different impl than groupsOf
, mkBenchSmall "S.arraysOf 1" env $ \inh _ ->
arraysOf 1 inh
, mkBench "S.arraysOf 10" env $ \inh _ ->
arraysOf 10 inh
, mkBench "S.arraysOf 1000" env $ \inh _ ->
arraysOf 1000 inh
-- chunksOf may use a different impl than groupsOf
, mkBenchSmall "S.chunksOf 1" env $ \inh _ ->
chunksOf 1 inh
, mkBench "S.chunksOf 10" env $ \inh _ ->
chunksOf 10 inh
, mkBench "S.chunksOf 1000" env $ \inh _ ->
chunksOf 1000 inh
]
]

View File

@ -110,7 +110,7 @@ readFromBytesNull inh devNull = IFH.putBytes devNull $ S.unfold FH.reader inh
inspect $ hasNoTypeClasses 'readFromBytesNull
inspect $ 'readFromBytesNull `hasNoType` ''Step
inspect $ 'readFromBytesNull `hasNoType` ''MAS.SpliceState
inspect $ 'readFromBytesNull `hasNoType` ''AT.ArrayUnsafe -- FH.fromBytes/S.arraysOf
inspect $ 'readFromBytesNull `hasNoType` ''AT.ArrayUnsafe -- FH.fromBytes/S.chunksOf
inspect $ 'readFromBytesNull `hasNoType` ''D.FoldMany
#endif
@ -124,7 +124,7 @@ readWithFromBytesNull inh devNull =
inspect $ hasNoTypeClasses 'readWithFromBytesNull
inspect $ 'readWithFromBytesNull `hasNoType` ''Step
inspect $ 'readWithFromBytesNull `hasNoType` ''MAS.SpliceState
inspect $ 'readWithFromBytesNull `hasNoType` ''AT.ArrayUnsafe -- FH.fromBytes/S.arraysOf
inspect $ 'readWithFromBytesNull `hasNoType` ''AT.ArrayUnsafe -- FH.fromBytes/S.chunksOf
inspect $ 'readWithFromBytesNull `hasNoType` ''D.FoldMany
#endif

View File

@ -431,7 +431,7 @@ module Streamly.Data.Stream
-- * Repeated Fold
, foldMany -- XXX Rename to foldRepeat
, parseMany
, Array.arraysOf
, Array.chunksOf
-- * Buffered Operations
-- | Operations that require buffering of the stream.

View File

@ -436,7 +436,7 @@ module Streamly.Data.Stream.StreamDK
-- * Repeated Fold
, foldMany -- XXX Rename to foldRepeat
, parseMany
, arraysOf
, chunksOf
-- * Buffered Operations
-- | Operations that require buffering of the stream.

View File

@ -175,7 +175,7 @@ fromStreamN n m = do
-- as efficient when the size is already known.
--
-- Note that if the input stream is too large memory allocation for the array
-- may fail. When the stream size is not known, `arraysOf` followed by
-- may fail. When the stream size is not known, `chunksOf` followed by
-- processing of indvidual arrays in the resulting stream should be preferred.
--
-- /Pre-release/

View File

@ -126,7 +126,7 @@ module Streamly.Internal.Data.Array.Generic.Mut.Type
-- multidimensional array representations.
-- ** Construct from streams
-- , arraysOf
-- , chunksOf
-- , arrayStreamKFromStreamD
-- , writeChunks

View File

@ -90,7 +90,7 @@ getSlicesFromLen from len =
-- as efficient when the size is already known.
--
-- Note that if the input stream is too large memory allocation for the array
-- may fail. When the stream size is not known, `arraysOf` followed by
-- may fail. When the stream size is not known, `chunksOf` followed by
-- processing of indvidual arrays in the resulting stream should be preferred.
--
-- /Pre-release/

View File

@ -11,7 +11,7 @@
module Streamly.Internal.Data.Array.Mut.Stream
(
-- * Generation
arraysOf
chunksOf
-- * Compaction
, packArraysChunksOf
@ -43,18 +43,18 @@ import qualified Streamly.Internal.Data.Fold.Type as FL
import qualified Streamly.Internal.Data.Stream.StreamD as D
import qualified Streamly.Internal.Data.Parser.ParserD as ParserD
-- | @arraysOf n stream@ groups the elements in the input stream into arrays of
-- | @chunksOf n stream@ groups the elements in the input stream into arrays of
-- @n@ elements each.
--
-- Same as the following but may be more efficient:
--
-- > arraysOf n = Stream.foldMany (MArray.writeN n)
-- > chunksOf n = Stream.foldMany (MArray.writeN n)
--
-- /Pre-release/
{-# INLINE arraysOf #-}
arraysOf :: (MonadIO m, Unbox a)
{-# INLINE chunksOf #-}
chunksOf :: (MonadIO m, Unbox a)
=> Int -> Stream m a -> Stream m (MutArray a)
arraysOf = MArray.arraysOf
chunksOf = MArray.chunksOf
-------------------------------------------------------------------------------
-- Compact

View File

@ -169,7 +169,7 @@ module Streamly.Internal.Data.Array.Mut.Type
-- multidimensional array representations.
-- ** Construct from streams
, arraysOf
, chunksOf
, arrayStreamKFromStreamD
, writeChunks
@ -1285,20 +1285,20 @@ data GroupState s contents start end bound
contents start end bound (GroupState s contents start end bound)
| GroupFinish
-- | @arraysOf n stream@ groups the input stream into a stream of
-- | @chunksOf n stream@ groups the input stream into a stream of
-- arrays of size n.
--
-- @arraysOf n = StreamD.foldMany (MutArray.writeN n)@
-- @chunksOf n = StreamD.foldMany (MutArray.writeN n)@
--
-- /Pre-release/
{-# INLINE_NORMAL arraysOf #-}
arraysOf :: forall m a. (MonadIO m, Unbox a)
{-# INLINE_NORMAL chunksOf #-}
chunksOf :: forall m a. (MonadIO m, Unbox a)
=> Int -> D.Stream m a -> D.Stream m (MutArray a)
-- XXX the idiomatic implementation leads to large regression in the D.reverse'
-- benchmark. It seems it has difficulty producing optimized code when
-- converting to StreamK. Investigate GHC optimizations.
-- arraysOf n = D.foldMany (writeN n)
arraysOf n (D.Stream step state) =
-- chunksOf n = D.foldMany (writeN n)
chunksOf n (D.Stream step state) =
D.Stream step' (GroupStart state)
where
@ -1307,7 +1307,7 @@ arraysOf n (D.Stream step state) =
step' _ (GroupStart st) = do
when (n <= 0) $
-- XXX we can pass the module string from the higher level API
error $ "Streamly.Internal.Data.MutArray.Mut.Type.arraysOf: "
error $ "Streamly.Internal.Data.MutArray.Mut.Type.chunksOf: "
++ "the size of arrays [" ++ show n
++ "] must be a natural number"
(MutArray contents start end bound :: MutArray a) <- liftIO $ newPinned n
@ -1343,7 +1343,7 @@ arrayStreamKFromStreamD :: forall m a. (MonadIO m, Unbox a) =>
D.Stream m a -> m (StreamK m (MutArray a))
arrayStreamKFromStreamD =
let n = allocBytesToElemCount (undefined :: a) defaultChunkSize
in D.foldr K.cons K.nil . arraysOf n
in D.foldr K.cons K.nil . chunksOf n
-------------------------------------------------------------------------------
-- Streams of arrays - Flattening

View File

@ -67,7 +67,7 @@ module Streamly.Internal.Data.Array.Type
, write
-- * Streams of arrays
, arraysOf
, chunksOf
, bufferChunks
, flattenArrays
, flattenArraysRev
@ -263,20 +263,20 @@ fromStreamD str = unsafeFreeze <$> MA.fromStreamD str
{-# INLINE bufferChunks #-}
bufferChunks :: (MonadIO m, Unbox a) =>
D.Stream m a -> m (K.StreamK m (Array a))
bufferChunks m = D.foldr K.cons K.nil $ arraysOf defaultChunkSize m
bufferChunks m = D.foldr K.cons K.nil $ chunksOf defaultChunkSize m
-- | @arraysOf n stream@ groups the elements in the input stream into arrays of
-- | @chunksOf n stream@ groups the elements in the input stream into arrays of
-- @n@ elements each.
--
-- Same as the following but may be more efficient:
--
-- >>> arraysOf n = Stream.foldMany (Array.writeN n)
-- >>> chunksOf n = Stream.foldMany (Array.writeN n)
--
-- /Pre-release/
{-# INLINE_NORMAL arraysOf #-}
arraysOf :: forall m a. (MonadIO m, Unbox a)
{-# INLINE_NORMAL chunksOf #-}
chunksOf :: forall m a. (MonadIO m, Unbox a)
=> Int -> D.Stream m a -> D.Stream m (Array a)
arraysOf n str = D.map unsafeFreeze $ MA.arraysOf n str
chunksOf n str = D.map unsafeFreeze $ MA.chunksOf n str
-- | Use the "read" unfold instead.
--

View File

@ -26,7 +26,7 @@
-- >>> import qualified Streamly.Data.StreamK as StreamK
--
-- >>> f = ChunkFold.fromFold (Fold.take 7 Fold.toList)
-- >>> s = Stream.arraysOf 5 $ Stream.fromList "hello world"
-- >>> s = Stream.chunksOf 5 $ Stream.fromList "hello world"
-- >>> ArrayStream.runArrayFold f (StreamK.fromStream s)
-- Right "hello w"
--

View File

@ -548,7 +548,7 @@ reverse' =
. D.fromStreamK
. K.reverse
. D.toStreamK
. A.arraysOf defaultChunkSize
. A.chunksOf defaultChunkSize
. toStreamD
------------------------------------------------------------------------------

View File

@ -11,7 +11,7 @@
module Streamly.Internal.Data.Stream.Chunked
(
-- * Creation
arraysOf
chunksOf
-- * Flattening to elements
, concat
@ -108,16 +108,16 @@ import qualified Streamly.Internal.Data.Stream.StreamK as K
-- Generation
-------------------------------------------------------------------------------
-- | @arraysOf n stream@ groups the elements in the input stream into arrays of
-- | @chunksOf n stream@ groups the elements in the input stream into arrays of
-- @n@ elements each.
--
-- > arraysOf n = Stream.groupsOf n (Array.writeN n)
-- > chunksOf n = Stream.groupsOf n (Array.writeN n)
--
-- /Pre-release/
{-# INLINE arraysOf #-}
arraysOf :: (MonadIO m, Unbox a)
{-# INLINE chunksOf #-}
chunksOf :: (MonadIO m, Unbox a)
=> Int -> Stream m a -> Stream m (Array a)
arraysOf = A.arraysOf
chunksOf = A.chunksOf
-------------------------------------------------------------------------------
-- Append

View File

@ -37,7 +37,7 @@ module Streamly.Internal.Data.Stream.Reduce
-- ** Chunking
-- | Element unaware grouping.
, arraysOf
, chunksOf
-- ** Splitting
-- XXX Implement these as folds or parsers instead.
@ -430,15 +430,15 @@ parseIterate f i m = fromStreamD $
-- Chunking
------------------------------------------------------------------------------
-- | @arraysOf n stream@ groups the elements in the input stream into arrays of
-- | @chunksOf n stream@ groups the elements in the input stream into arrays of
-- @n@ elements each.
--
-- Same as the following but may be more efficient:
--
-- >>> arraysOf n = Stream.foldMany (Array.writeN n)
-- >>> chunksOf n = Stream.foldMany (Array.writeN n)
--
-- /Pre-release/
{-# INLINE arraysOf #-}
arraysOf :: (MonadIO m, Unbox a)
{-# INLINE chunksOf #-}
chunksOf :: (MonadIO m, Unbox a)
=> Int -> Stream m a -> Stream m (Array a)
arraysOf n = fromStreamD . Array.arraysOf n . toStreamD
chunksOf n = fromStreamD . Array.chunksOf n . toStreamD

View File

@ -1498,7 +1498,7 @@ reverseUnbox =
. fromStreamK
. K.reverse
. toStreamK
. A.arraysOf defaultChunkSize
. A.chunksOf defaultChunkSize
-- | Buffer until the next element in sequence arrives. The function argument
-- determines the difference in sequence numbers. This could be useful in

View File

@ -397,8 +397,8 @@ fromChunksWithBufferOf n h xs = fromChunks h $ AS.compact n xs
-- @since 0.7.0
{-# INLINE fromStreamWithBufferOf #-}
fromStreamWithBufferOf :: MonadIO m => Int -> Handle -> Stream m Word8 -> m ()
fromStreamWithBufferOf n h m = fromChunks h $ S.arraysOf n m
-- fromStreamWithBufferOf n h m = fromChunks h $ AS.arraysOf n m
fromStreamWithBufferOf n h m = fromChunks h $ S.chunksOf n m
-- fromStreamWithBufferOf n h m = fromChunks h $ AS.chunksOf n m
-- > write = 'writeWithBufferOf' A.defaultChunkSize
--

View File

@ -405,7 +405,7 @@ fromChunks = fromChunksMode WriteMode
{-# INLINE fromBytesWith #-}
fromBytesWith :: (MonadIO m, MonadCatch m)
=> Int -> FilePath -> Stream m Word8 -> m ()
fromBytesWith n file xs = fromChunks file $ S.arraysOf n xs
fromBytesWith n file xs = fromChunks file $ S.chunksOf n xs
{-# DEPRECATED fromBytesWithBufferOf "Please use 'fromBytesWith' instead" #-}
{-# INLINE fromBytesWithBufferOf #-}
@ -502,7 +502,7 @@ appendChunks = fromChunksMode AppendMode
{-# INLINE appendWith #-}
appendWith :: (MonadIO m, MonadCatch m)
=> Int -> FilePath -> Stream m Word8 -> m ()
appendWith n file xs = appendChunks file $ S.arraysOf n xs
appendWith n file xs = appendChunks file $ S.chunksOf n xs
-- | Append a byte stream to a file. Combines the bytes in chunks of size up to
-- 'A.defaultChunkSize' before writing. If the file exists then the new data

View File

@ -429,13 +429,11 @@ putChunksWith n h xs = putChunks h $ AS.compact n xs
-- in chunks of @bufsize@. A write is performed to the IO device as soon as we
-- collect the required input size.
--
-- >>> putBytesWith n h m = Handle.putChunks h $ Stream.arraysOf n m
-- >>> putBytesWith n h m = Handle.putChunks h $ Stream.chunksOf n m
--
{-# INLINE putBytesWith #-}
putBytesWith :: MonadIO m => Int -> Handle -> Stream m Word8 -> m ()
putBytesWith n h m = putChunks h $ A.arraysOf n m
-- putBytesWith n h m = putChunks h $ AS.arraysOf n m
putBytesWith n h m = putChunks h $ A.chunksOf n m
-- | Write a byte stream to a file handle. Accumulates the input in chunks of
-- up to 'Streamly.Internal.Data.Array.Type.defaultChunkSize' before writing.
@ -494,7 +492,7 @@ writeChunksWithBufferOf = writeChunksWith
-- do not want buffering to occur at GHC level as well. Same thing applies to
-- writes as well.
-- XXX Maybe we should have a Fold.arraysOf like we have Stream.arraysOf
-- XXX Maybe we should have a Fold.chunksOf like we have Stream.chunksOf
-- | @writeWith reqSize handle@ writes the input stream to @handle@.
-- Bytes in the input stream are collected into a buffer until we have a chunk

View File

@ -533,7 +533,7 @@ reverse' =
. D.fromStreamK
. K.reverse
. D.toStreamK
. A.arraysOf defaultChunkSize
. A.chunksOf defaultChunkSize
. toStreamD
------------------------------------------------------------------------------

View File

@ -185,7 +185,7 @@ import Streamly.Internal.Data.Unboxed (Unbox)
import qualified Data.Heap as H
import qualified Streamly.Internal.Data.Unfold as Unfold
import qualified Streamly.Internal.Data.Array.Type as A
(arraysOf, read)
(chunksOf, read)
import qualified Streamly.Internal.Data.Fold as FL
(Fold, Step(..), takeEndBy_, takeEndBy, catMaybes, take)
import qualified Streamly.Internal.Data.IsMap as IsMap
@ -996,7 +996,7 @@ chunksOf n f = fromStreamD . D.groupsOf n f . toStreamD
{-# INLINE arraysOf #-}
arraysOf :: (IsStream t, MonadIO m, Unbox a)
=> Int -> t m a -> t m (Array a)
arraysOf n = fromStreamD . A.arraysOf n . toStreamD
arraysOf n = fromStreamD . A.chunksOf n . toStreamD
-- XXX we can implement this by repeatedly applying the 'lrunFor' fold.
-- XXX add this example after fixing the serial stream rate control

View File

@ -410,7 +410,7 @@ _writevArraysPackedUpto n h xs =
-- @since 0.7.0
{-# INLINE writeInChunksOf #-}
writeInChunksOf :: MonadIO m => Int -> Handle -> Stream m Word8 -> m ()
writeInChunksOf n h m = writeArrays h $ AS.arraysOf n m
writeInChunksOf n h m = writeArrays h $ AS.chunksOf n m
-- > write = 'writeInChunksOf' A.defaultChunkSize
--

View File

@ -385,7 +385,7 @@ putBytesWithBufferOf
-> PortNumber
-> Stream m Word8
-> m ()
putBytesWithBufferOf n addr port m = putChunks addr port $ A.arraysOf n m
putBytesWithBufferOf n addr port m = putChunks addr port $ A.chunksOf n m
-- | Like 'write' but provides control over the write buffer. Output will
-- be written to the IO device as soon as we collect the specified number of

View File

@ -97,7 +97,7 @@ import Streamly.Internal.System.IO (defaultChunkSize)
import qualified Streamly.Data.Array as A (reader, length, writeN)
import qualified Streamly.Data.Fold as FL
import qualified Streamly.Internal.Data.Array.Type as A
(unsafeFreeze, asPtrUnsafe, byteLength, writeNUnsafe, arraysOf)
(unsafeFreeze, asPtrUnsafe, byteLength, writeNUnsafe, chunksOf)
import qualified Streamly.Internal.Data.Array.Mut as MArray
(MutArray(..), newPinnedBytes, asPtrUnsafe)
import qualified Streamly.Internal.Data.Stream.StreamD as S
@ -495,7 +495,7 @@ writeChunksWithBufferOf = writeChunksWith
--
{-# INLINE putBytesWith #-}
putBytesWith :: MonadIO m => Int -> Socket -> Stream m Word8 -> m ()
putBytesWith n h m = putChunks h $ A.arraysOf n m
putBytesWith n h m = putChunks h $ A.chunksOf n m
-- | Write a byte stream to a socket. Accumulates the input in chunks of
-- specified number of bytes before writing.

View File

@ -659,7 +659,7 @@ parseUnfold = do
<*> chooseInt (1, len)
<*> chooseInt (1, len)) $ \(ls, clen, tlen) ->
monadicIO $ do
arrays <- toList $ S.arraysOf clen (S.fromList ls)
arrays <- toList $ S.chunksOf clen (S.fromList ls)
let src = Source.source (Just (Producer.OuterLoop arrays))
let parser = P.fromFold (FL.take tlen FL.toList)
let readSrc =

View File

@ -67,7 +67,7 @@ propDecodeEncodeIdArrays :: Property
propDecodeEncodeIdArrays =
forAll genUnicode $ \list ->
monadicIO $ do
let wrds = Stream.arraysOf 8 $ SS.encodeUtf8' $ Stream.fromList list
let wrds = Stream.chunksOf 8 $ SS.encodeUtf8' $ Stream.fromList list
chrs <- Stream.toList $ IUS.decodeUtf8Chunks wrds
assert (chrs == list)