2022-04-07 17:41:43 +03:00
|
|
|
{-# LANGUAGE DataKinds #-}
|
|
|
|
{-# LANGUAGE QuasiQuotes #-}
|
|
|
|
{-# LANGUAGE RankNTypes #-}
|
|
|
|
|
2023-02-20 20:41:55 +03:00
|
|
|
module Test.Hasura.StreamingSubscriptionSuite (buildStreamingSubscriptionSuite) where
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
import Control.Concurrent.Async qualified as Async
|
|
|
|
import Control.Concurrent.MVar qualified as MVar
|
|
|
|
import Control.Concurrent.STM qualified as STM
|
|
|
|
import Control.Immortal qualified as Immortal
|
|
|
|
import Control.Lens ((.~))
|
|
|
|
import Data.Aeson qualified as A
|
|
|
|
import Data.ByteString.Lazy.UTF8 qualified as LBS
|
|
|
|
import Data.HashMap.Strict qualified as Map
|
2022-07-01 14:47:20 +03:00
|
|
|
import Data.Text qualified as T
|
|
|
|
import Data.Text.IO qualified as T
|
2022-04-07 17:41:43 +03:00
|
|
|
import Data.UUID qualified as UUID
|
|
|
|
import Data.UUID.V4 qualified as UUID
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
import Database.PG.Query qualified as PG
|
2022-04-07 17:41:43 +03:00
|
|
|
import Hasura.Backends.Postgres.Connection
|
|
|
|
import Hasura.Backends.Postgres.Execute.Subscription (MultiplexedQuery (MultiplexedQuery))
|
|
|
|
import Hasura.Backends.Postgres.Instances.Transport ()
|
|
|
|
import Hasura.Backends.Postgres.SQL.Value (TxtEncodedVal (TELit))
|
2022-07-01 14:47:20 +03:00
|
|
|
import Hasura.Base.Error (showQErr)
|
2022-04-07 17:41:43 +03:00
|
|
|
import Hasura.GraphQL.Execute.Subscription.Options (mkSubscriptionsOptions)
|
|
|
|
import Hasura.GraphQL.Execute.Subscription.Plan
|
|
|
|
import Hasura.GraphQL.Execute.Subscription.Poll.Common
|
|
|
|
import Hasura.GraphQL.Execute.Subscription.Poll.StreamingQuery (pollStreamingQuery)
|
|
|
|
import Hasura.GraphQL.Execute.Subscription.State
|
|
|
|
import Hasura.GraphQL.Execute.Subscription.TMap qualified as TMap
|
|
|
|
import Hasura.GraphQL.ParameterizedQueryHash
|
|
|
|
import Hasura.GraphQL.Transport.WebSocket.Protocol (unsafeMkOperationId)
|
|
|
|
import Hasura.GraphQL.Transport.WebSocket.Server qualified as WS
|
|
|
|
import Hasura.Logging
|
2022-06-23 12:14:24 +03:00
|
|
|
import Hasura.Name qualified as Name
|
2022-04-07 17:41:43 +03:00
|
|
|
import Hasura.Prelude
|
|
|
|
import Hasura.RQL.Types.Backend
|
|
|
|
import Hasura.RQL.Types.Common
|
2022-10-20 04:32:54 +03:00
|
|
|
import Hasura.RQL.Types.ResizePool (ResizePoolStrategy (..))
|
2022-04-07 17:41:43 +03:00
|
|
|
import Hasura.SQL.Backend
|
2022-08-05 03:28:49 +03:00
|
|
|
import Hasura.Server.Init (considerEnv, databaseUrlOption, runWithEnv, _envVar)
|
2022-04-07 17:41:43 +03:00
|
|
|
import Hasura.Server.Metrics (createServerMetrics)
|
2022-07-24 00:18:01 +03:00
|
|
|
import Hasura.Server.Prometheus (makeDummyPrometheusMetrics)
|
2022-10-20 04:32:54 +03:00
|
|
|
import Hasura.Server.Types (RequestId (..))
|
2022-04-07 17:41:43 +03:00
|
|
|
import Hasura.Session (RoleName, mkRoleName)
|
2022-06-23 12:14:24 +03:00
|
|
|
import Language.GraphQL.Draft.Syntax.QQ qualified as G
|
2022-04-07 17:41:43 +03:00
|
|
|
import ListT qualified
|
|
|
|
import StmContainers.Map qualified as STMMap
|
|
|
|
import System.Environment (getEnvironment)
|
|
|
|
import System.Exit (exitFailure)
|
|
|
|
import System.Metrics
|
|
|
|
import Test.Hspec
|
|
|
|
|
2022-07-18 17:14:08 +03:00
|
|
|
buildStreamingSubscriptionSuite :: IO Spec
|
|
|
|
buildStreamingSubscriptionSuite = do
|
2022-04-07 17:41:43 +03:00
|
|
|
env <- getEnvironment
|
|
|
|
|
2022-07-01 14:47:20 +03:00
|
|
|
pgUrlText :: Text <- flip onLeft (printErrExit . T.pack) $
|
2022-04-07 17:41:43 +03:00
|
|
|
runWithEnv env $ do
|
2022-08-05 03:28:49 +03:00
|
|
|
let envVar = _envVar databaseUrlOption
|
2022-04-07 17:41:43 +03:00
|
|
|
maybeV <- considerEnv envVar
|
|
|
|
onNothing maybeV $
|
2022-11-02 23:53:23 +03:00
|
|
|
throwError $
|
|
|
|
"Expected: " <> envVar
|
2022-04-07 17:41:43 +03:00
|
|
|
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
let pgConnInfo = PG.ConnInfo 1 $ PG.CDDatabaseURI $ txtToBs pgUrlText
|
2022-04-07 17:41:43 +03:00
|
|
|
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
pgPool <- PG.initPGPool pgConnInfo PG.defaultConnParams print
|
2022-04-07 17:41:43 +03:00
|
|
|
|
2022-10-17 11:04:54 +03:00
|
|
|
let pgContext = mkPGExecCtx PG.ReadCommitted pgPool NeverResizePool
|
2023-03-21 00:54:55 +03:00
|
|
|
dbSourceConfig = PGSourceConfig pgContext pgConnInfo Nothing (pure ()) defaultPostgresExtensionsSchema mempty ConnTemplate_NotApplicable
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
pure $
|
2022-11-02 23:53:23 +03:00
|
|
|
describe "Streaming subscriptions polling tests" $
|
|
|
|
streamingSubscriptionPollingSpec dbSourceConfig
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
mkRoleNameE :: Text -> RoleName
|
|
|
|
mkRoleNameE = fromMaybe (error "Use a non empty string") . mkRoleName
|
|
|
|
|
|
|
|
-- | CohortStaticSnapshot is a data type to help to compare two cohorts at a specified instant.
|
|
|
|
-- Note that two Cohorts cannot be directly compared because they have mutable references within
|
|
|
|
-- them.
|
|
|
|
data CohortStaticSnapshot = CohortStaticSnapshot
|
|
|
|
{ _cssCohortId :: CohortId,
|
|
|
|
_cssExistingSubscribers :: [SubscriberId],
|
|
|
|
_cssNewSubscribers :: [SubscriberId]
|
|
|
|
}
|
|
|
|
deriving (Show, Eq)
|
|
|
|
|
|
|
|
getStaticCohortSnapshot :: Cohort (STM.TVar CursorVariableValues) -> STM.STM CohortStaticSnapshot
|
|
|
|
getStaticCohortSnapshot (Cohort cohortId _respRef existingSubsTV newSubsTV _) = do
|
|
|
|
existingSubs <- TMap.toList existingSubsTV
|
|
|
|
newSubs <- TMap.toList newSubsTV
|
|
|
|
pure $ CohortStaticSnapshot cohortId (fst <$> existingSubs) (fst <$> newSubs)
|
|
|
|
|
|
|
|
streamingSubscriptionPollingSpec :: SourceConfig ('Postgres 'Vanilla) -> Spec
|
|
|
|
streamingSubscriptionPollingSpec srcConfig = do
|
|
|
|
let setupDDLTx =
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
PG.unitQE
|
2022-04-07 17:41:43 +03:00
|
|
|
defaultTxErrorHandler
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
[PG.sql|
|
2022-04-07 17:41:43 +03:00
|
|
|
CREATE TABLE "public"."numbers" (id int primary key);
|
|
|
|
|]
|
|
|
|
()
|
|
|
|
False
|
|
|
|
|
|
|
|
setupValueTx =
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
PG.unitQE
|
2022-04-07 17:41:43 +03:00
|
|
|
defaultTxErrorHandler
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
[PG.sql|
|
2022-04-07 17:41:43 +03:00
|
|
|
INSERT INTO "public"."numbers" values (1),(2),(3),(4),(5);
|
|
|
|
|]
|
|
|
|
()
|
|
|
|
False
|
|
|
|
|
|
|
|
teardownDDLTx =
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
PG.unitQE
|
2022-04-07 17:41:43 +03:00
|
|
|
defaultTxErrorHandler
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
[PG.sql|
|
2022-04-07 17:41:43 +03:00
|
|
|
DROP TABLE "public"."numbers";
|
|
|
|
|]
|
|
|
|
()
|
|
|
|
False
|
|
|
|
|
|
|
|
let setup = do
|
2023-01-25 10:12:53 +03:00
|
|
|
runPgSourceWriteTx srcConfig InternalRawQuery setupDDLTx >>= (`onLeft` (printErrExit . showQErr))
|
|
|
|
runPgSourceWriteTx srcConfig InternalRawQuery setupValueTx >>= (`onLeft` (printErrExit . showQErr))
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
teardown =
|
2023-01-25 10:12:53 +03:00
|
|
|
runPgSourceWriteTx srcConfig InternalRawQuery teardownDDLTx >>= (`onLeft` (printErrExit . showQErr))
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
runIO setup
|
|
|
|
|
|
|
|
pollerId <- runIO $ PollerId <$> UUID.nextRandom
|
|
|
|
let defaultSubscriptionOptions = mkSubscriptionsOptions Nothing Nothing -- use default values
|
|
|
|
paramQueryHash = mkUnsafeParameterizedQueryHash "random"
|
|
|
|
-- hardcoded multiplexed query which is generated for the following GraphQL query:
|
|
|
|
--
|
|
|
|
-- { subscription { numbers_stream(cursor: {initial_value: {id: 1}}, batch_size: 1) { id }} }
|
|
|
|
multiplexedQuery =
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
[PG.sql|
|
2022-04-07 17:41:43 +03:00
|
|
|
SELECT "_subs"."result_id" , "_fld_resp"."root" AS "result", "_fld_resp"."cursor" AS "cursor" FROM UNNEST(($1)::uuid[], ($2)::json[]) AS "_subs"("result_id", "result_vars")
|
|
|
|
LEFT OUTER JOIN LATERAL
|
|
|
|
(SELECT json_build_object('numbers_stream', "numbers_stream"."root" ) AS "root", to_json("numbers_stream"."cursor" ) AS "cursor"
|
|
|
|
FROM
|
|
|
|
(SELECT coalesce(json_agg("root" ORDER BY "root.pg.id" ASC ), '[]' ) AS "root", json_build_object('id', (MAX("root.pg.id" ))::text ) AS "cursor"
|
|
|
|
FROM
|
|
|
|
(SELECT row_to_json((SELECT "_1_e" FROM (SELECT "_0_root.base"."id" AS "id" ) AS "_1_e" ) ) AS "root", "_0_root.base"."id" AS "root.pg.id"
|
|
|
|
FROM (SELECT * FROM "public"."numbers" WHERE (("public"."numbers"."id") > ((("_subs"."result_vars"#>>ARRAY['cursor', 'id']))::integer)) ORDER BY "id" ASC LIMIT 1 ) AS "_0_root.base"
|
|
|
|
ORDER BY "root.pg.id" ASC ) AS "_2_root" ) AS "numbers_stream" )
|
|
|
|
AS "_fld_resp" ON ('true')
|
|
|
|
|]
|
|
|
|
let pollingAction cohortMap testSyncAction =
|
|
|
|
pollStreamingQuery
|
|
|
|
@('Postgres 'Vanilla)
|
|
|
|
pollerId
|
|
|
|
defaultSubscriptionOptions
|
|
|
|
(SNDefault, srcConfig)
|
|
|
|
(mkRoleNameE "random")
|
|
|
|
paramQueryHash
|
|
|
|
(MultiplexedQuery multiplexedQuery)
|
|
|
|
cohortMap
|
2022-06-23 12:14:24 +03:00
|
|
|
[G.name|randomRootField|]
|
2022-04-07 17:41:43 +03:00
|
|
|
(const $ pure ())
|
|
|
|
testSyncAction
|
|
|
|
|
|
|
|
mkSubscriber sId =
|
|
|
|
let wsId = maybe (error "Invalid UUID") WS.mkUnsafeWSId $ UUID.fromString "ec981f92-8d5a-47ab-a306-80af7cfb1113"
|
|
|
|
requestId = RequestId "randomRequestId"
|
|
|
|
subscriberMetadata = mkSubscriberMetadata wsId (unsafeMkOperationId "randomOperationId") Nothing requestId
|
|
|
|
in Subscriber sId subscriberMetadata requestId Nothing (const (pure ()))
|
|
|
|
|
|
|
|
mkNewCohort cohortId initialCursorValue = do
|
|
|
|
latestCursorValues <- STM.newTVar (CursorVariableValues initialCursorValue)
|
|
|
|
Cohort cohortId <$> STM.newTVar Nothing <*> TMap.new <*> TMap.new <*> pure latestCursorValues
|
|
|
|
|
|
|
|
addSubscriberToCohort subscriber cohort =
|
|
|
|
TMap.insert subscriber (_sId subscriber) $ _cNewSubscribers cohort
|
|
|
|
|
|
|
|
-- The multiplexed query we chose only deals with cursor variables, so we might as
|
|
|
|
-- well create a new function with the other variables being set to `mempty`
|
|
|
|
mkCohortVariables' = mkCohortVariables mempty mempty mempty mempty
|
|
|
|
|
|
|
|
describe "Streaming subcription poll" $ do
|
|
|
|
cohortId1 <- runIO newCohortId
|
|
|
|
(subscriberId1, subscriberId2) <- runIO $ (,) <$> newSubscriberId <*> newSubscriberId
|
|
|
|
let subscriber1 = mkSubscriber subscriberId1
|
|
|
|
subscriber2 = mkSubscriber subscriberId2
|
2022-06-23 12:14:24 +03:00
|
|
|
let initialCursorValue = Map.singleton Name._id (TELit "1")
|
2022-04-07 17:41:43 +03:00
|
|
|
cohort1 <- runIO $
|
|
|
|
liftIO $
|
|
|
|
STM.atomically $ do
|
|
|
|
cohort1' <- mkNewCohort cohortId1 initialCursorValue
|
|
|
|
-- adding a subscriber to the newly created cohort
|
|
|
|
addSubscriberToCohort subscriber1 cohort1'
|
|
|
|
pure cohort1'
|
|
|
|
cohortMap <- runIO $ liftIO $ STM.atomically $ TMap.new
|
|
|
|
let cohortKey1 = mkCohortVariables' (mkUnsafeValidateVariables initialCursorValue)
|
|
|
|
cohortId2 <- runIO newCohortId
|
|
|
|
cohort2 <- runIO $
|
|
|
|
liftIO $
|
|
|
|
STM.atomically $ do
|
|
|
|
cohort2' <- mkNewCohort cohortId2 initialCursorValue
|
|
|
|
addSubscriberToCohort subscriber2 cohort2'
|
|
|
|
pure cohort2'
|
|
|
|
|
2022-06-23 12:14:24 +03:00
|
|
|
let mkCohortKey n = cohortKey1 & cvCursorVariables . unValidatedVariables . ix [G.name|id|] .~ TELit n
|
2022-04-07 17:41:43 +03:00
|
|
|
cohortKey2 = mkCohortKey "2"
|
|
|
|
cohortKey3 = mkCohortKey "3"
|
|
|
|
|
|
|
|
describe "after first poll, the key of the cohort should be updated to contain the next cursor value" $ do
|
|
|
|
runIO $
|
|
|
|
STM.atomically $ do
|
|
|
|
TMap.reset cohortMap
|
|
|
|
TMap.insert cohort1 cohortKey1 cohortMap
|
|
|
|
|
2023-01-25 10:12:53 +03:00
|
|
|
runIO $ pollingAction cohortMap Nothing Nothing
|
2022-04-07 17:41:43 +03:00
|
|
|
currentCohortMap <- runIO $ STM.atomically $ TMap.getMap cohortMap
|
|
|
|
|
|
|
|
it "the key of the cohort1 should have been moved from the cohortKey1 to cohortKey2, so it should not be found anymore at cohortKey1" $ do
|
|
|
|
cohortMappedToCohortKey1 <- STM.atomically $ traverse getStaticCohortSnapshot $ Map.lookup cohortKey1 currentCohortMap
|
|
|
|
cohortMappedToCohortKey1 `shouldBe` Nothing
|
|
|
|
|
|
|
|
it "the key of the cohort1 should have been moved from the cohortKey1 to cohortKey2, so it should be found anymore at cohortKey2" $ do
|
|
|
|
cohortMappedToCohortKey2 <- STM.atomically $ traverse getStaticCohortSnapshot $ Map.lookup cohortKey2 currentCohortMap
|
|
|
|
cohortMappedToCohortKey2 `shouldBe` Just (CohortStaticSnapshot cohortId1 [subscriberId1] mempty)
|
|
|
|
|
|
|
|
describe "manipulating cohorts" $ do
|
|
|
|
it "adding a new cohort concurrently to the poll, should leave the new cohort as it is in the cohort map" $ do
|
|
|
|
syncMVar <- MVar.newEmptyMVar -- MVar to synchronize code
|
|
|
|
STM.atomically $ do
|
|
|
|
TMap.reset cohortMap
|
|
|
|
TMap.insert cohort1 cohortKey1 cohortMap
|
|
|
|
let syncAction = do
|
|
|
|
-- This action will block until the `syncMVar` is not empty. This action
|
|
|
|
-- is run after the cohorts are snapshotted in `pollStreamingQuery`
|
|
|
|
MVar.readMVar syncMVar
|
|
|
|
STM.atomically $ TMap.insert cohort2 cohortKey3 cohortMap
|
|
|
|
Async.withAsync
|
2023-01-25 10:12:53 +03:00
|
|
|
(pollingAction cohortMap (Just syncAction) Nothing)
|
2022-04-07 17:41:43 +03:00
|
|
|
( \pollAsync -> do
|
|
|
|
MVar.putMVar syncMVar ()
|
|
|
|
Async.wait pollAsync
|
|
|
|
)
|
|
|
|
currentCohortMap <- STM.atomically $ TMap.getMap cohortMap
|
|
|
|
let currentCohort2 = Map.lookup cohortKey3 currentCohortMap
|
|
|
|
|
|
|
|
(originalCohort2StaticSnapshot, currentCohort2StaticSnapshot) <-
|
|
|
|
STM.atomically $
|
|
|
|
(,) <$> getStaticCohortSnapshot cohort2 <*> traverse getStaticCohortSnapshot currentCohort2
|
|
|
|
Just originalCohort2StaticSnapshot `shouldBe` currentCohort2StaticSnapshot
|
|
|
|
|
|
|
|
it "deleting a cohort concurrently should not retain the deleted cohort in the cohort map" $ do
|
|
|
|
STM.atomically $ do
|
|
|
|
-- reset the cohort map and add a single cohort
|
|
|
|
TMap.reset cohortMap
|
|
|
|
TMap.insert cohort2 cohortKey1 cohortMap
|
|
|
|
syncMVar <- MVar.newEmptyMVar
|
|
|
|
let syncAction = do
|
|
|
|
MVar.readMVar syncMVar
|
|
|
|
STM.atomically $ TMap.delete cohortKey1 cohortMap
|
|
|
|
Async.withAsync
|
2023-01-25 10:12:53 +03:00
|
|
|
(pollingAction cohortMap (Just syncAction) Nothing)
|
2022-04-07 17:41:43 +03:00
|
|
|
( \pollAsync -> do
|
|
|
|
MVar.putMVar syncMVar ()
|
|
|
|
Async.wait pollAsync
|
|
|
|
)
|
|
|
|
currentCohortMap <- STM.atomically $ TMap.getMap cohortMap
|
|
|
|
Map.size currentCohortMap `shouldBe` 0 -- since there was only one cohort initially, now the cohort map should not have any cohorts
|
|
|
|
describe "manipulating adding and deleting of subscribers concurrently" $ do
|
|
|
|
it "adding a new subscriber concurrently should place the subscriber in the appropriate cohort" $ do
|
|
|
|
temporarySubscriberId <- newSubscriberId
|
|
|
|
let newTemporarySubscriber = mkSubscriber temporarySubscriberId
|
|
|
|
STM.atomically $ do
|
|
|
|
TMap.reset cohortMap
|
|
|
|
TMap.insert cohort1 cohortKey1 cohortMap
|
|
|
|
syncMVar <- MVar.newEmptyMVar
|
|
|
|
let syncAction = do
|
|
|
|
MVar.readMVar syncMVar
|
|
|
|
STM.atomically $ addSubscriberToCohort newTemporarySubscriber cohort1
|
|
|
|
Async.withAsync
|
2023-01-25 10:12:53 +03:00
|
|
|
(pollingAction cohortMap (Just syncAction) Nothing)
|
2022-04-07 17:41:43 +03:00
|
|
|
( \pollAsync -> do
|
|
|
|
-- concurrently inserting a new cohort to a key (cohortKey2) to which
|
|
|
|
-- cohort1 is expected to be associated after the current poll
|
|
|
|
MVar.putMVar syncMVar ()
|
|
|
|
Async.wait pollAsync
|
|
|
|
)
|
|
|
|
currentCohortMap <- STM.atomically $ TMap.getMap cohortMap
|
|
|
|
let cohortKey2Cohort = Map.lookup cohortKey2 currentCohortMap
|
|
|
|
cohortKey1Cohort = Map.lookup cohortKey1 currentCohortMap
|
|
|
|
cohortKey1CohortSnapshot <- STM.atomically $ traverse getStaticCohortSnapshot cohortKey1Cohort
|
|
|
|
_cssNewSubscribers <$> cohortKey1CohortSnapshot `shouldBe` Just [temporarySubscriberId]
|
|
|
|
|
|
|
|
-- Checking below that the newly added subscriber is not added in the updated cohort
|
|
|
|
cohortKey2CohortSnapshot <- STM.atomically $ traverse getStaticCohortSnapshot cohortKey2Cohort
|
2022-10-04 00:49:32 +03:00
|
|
|
_cssNewSubscribers <$> cohortKey2CohortSnapshot `shouldSatisfy` all (notElem temporarySubscriberId)
|
|
|
|
_cssExistingSubscribers <$> cohortKey2CohortSnapshot `shouldSatisfy` all (notElem temporarySubscriberId)
|
2022-04-07 17:41:43 +03:00
|
|
|
STM.atomically $
|
|
|
|
TMap.delete temporarySubscriberId (_cNewSubscribers cohort1)
|
|
|
|
|
|
|
|
it "deleting a subscriber from a cohort should not retain the subscriber in any of the cohorts" $ do
|
|
|
|
temporarySubscriberId <- newSubscriberId
|
|
|
|
let newTemporarySubscriber = mkSubscriber temporarySubscriberId
|
|
|
|
STM.atomically $ do
|
|
|
|
TMap.reset cohortMap
|
|
|
|
TMap.insert cohort1 cohortKey1 cohortMap
|
|
|
|
addSubscriberToCohort newTemporarySubscriber cohort1
|
|
|
|
syncMVar <- MVar.newEmptyMVar
|
|
|
|
let syncAction = do
|
|
|
|
MVar.readMVar syncMVar
|
|
|
|
STM.atomically $ TMap.delete temporarySubscriberId (_cNewSubscribers cohort1)
|
|
|
|
Async.withAsync
|
2023-01-25 10:12:53 +03:00
|
|
|
(pollingAction cohortMap (Just syncAction) Nothing)
|
2022-04-07 17:41:43 +03:00
|
|
|
( \pollAsync -> do
|
|
|
|
MVar.putMVar syncMVar ()
|
|
|
|
Async.wait pollAsync
|
|
|
|
)
|
|
|
|
currentCohortMap <- STM.atomically $ TMap.getMap cohortMap
|
|
|
|
let cohortKey2Cohort = Map.lookup cohortKey2 currentCohortMap
|
|
|
|
cohortKey1Cohort = Map.lookup cohortKey1 currentCohortMap
|
|
|
|
cohortKey1CohortSnapshot <- STM.atomically $ traverse getStaticCohortSnapshot cohortKey1Cohort
|
|
|
|
cohortKey2CohortSnapshot <- STM.atomically $ traverse getStaticCohortSnapshot cohortKey2Cohort
|
|
|
|
|
|
|
|
-- check the deleted subscriber in the older cohort
|
|
|
|
_cssExistingSubscribers <$> cohortKey1CohortSnapshot
|
|
|
|
`shouldSatisfy` (\existingSubs -> temporarySubscriberId `notElem` concat (maybeToList existingSubs))
|
|
|
|
_cssNewSubscribers <$> cohortKey1CohortSnapshot
|
|
|
|
`shouldSatisfy` (\newSubs -> temporarySubscriberId `notElem` concat (maybeToList newSubs))
|
|
|
|
_cssExistingSubscribers <$> cohortKey2CohortSnapshot
|
|
|
|
`shouldSatisfy` (\existingSubs -> temporarySubscriberId `notElem` concat (maybeToList existingSubs))
|
|
|
|
_cssNewSubscribers <$> cohortKey2CohortSnapshot
|
|
|
|
`shouldSatisfy` (\newSubs -> temporarySubscriberId `notElem` concat (maybeToList newSubs))
|
|
|
|
STM.atomically $
|
|
|
|
TMap.delete temporarySubscriberId (_cNewSubscribers cohort1)
|
|
|
|
|
|
|
|
describe "Adding two subscribers concurrently" $ do
|
|
|
|
dummyServerStore <- runIO newStore
|
|
|
|
dummyServerMetrics <- runIO $ createServerMetrics dummyServerStore
|
2022-07-24 00:18:01 +03:00
|
|
|
dummyPromMetrics <- runIO makeDummyPrometheusMetrics
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
subscriptionState <- do
|
2023-02-24 21:09:36 +03:00
|
|
|
runIO $ initSubscriptionsState (const (pure ()))
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
let requestId1 = RequestId "request-id1"
|
|
|
|
requestId2 = RequestId "request-id2"
|
|
|
|
|
|
|
|
dummyWSId <- runIO $ WS.mkUnsafeWSId <$> UUID.nextRandom
|
2022-12-22 20:08:04 +03:00
|
|
|
cohortId <- runIO newCohortId
|
2022-04-07 17:41:43 +03:00
|
|
|
|
|
|
|
let parameterizedSubscriptionQueryPlan = ParameterizedSubscriptionQueryPlan (mkRoleNameE "user") $ MultiplexedQuery multiplexedQuery
|
|
|
|
opID1 = unsafeMkOperationId "1"
|
|
|
|
opID2 = unsafeMkOperationId "2"
|
|
|
|
subscriber1Metadata = mkSubscriberMetadata dummyWSId opID1 Nothing requestId1
|
|
|
|
subscriber2Metadata = mkSubscriberMetadata dummyWSId opID2 Nothing requestId2
|
|
|
|
dummyParamQueryHash = mkUnsafeParameterizedQueryHash "unsafeParamHash"
|
|
|
|
|
|
|
|
subscriptionQueryPlan =
|
|
|
|
SubscriptionQueryPlan
|
|
|
|
parameterizedSubscriptionQueryPlan
|
|
|
|
srcConfig
|
2022-12-22 20:08:04 +03:00
|
|
|
cohortId
|
2023-01-25 10:12:53 +03:00
|
|
|
Nothing
|
2022-04-07 17:41:43 +03:00
|
|
|
cohortKey1
|
|
|
|
Nothing
|
|
|
|
|
|
|
|
let logger :: Logger Hasura = Logger $ \l -> do
|
|
|
|
let (logLevel, logType :: EngineLogType Hasura, logDetail) = toEngineLog l
|
|
|
|
t <- liftIO $ getFormattedTime Nothing
|
|
|
|
liftIO $ putStrLn $ LBS.toString $ A.encode $ EngineLog t logLevel logType logDetail
|
|
|
|
|
2023-02-24 21:09:36 +03:00
|
|
|
subOptions = mkSubscriptionsOptions Nothing Nothing
|
2022-04-07 17:41:43 +03:00
|
|
|
addStreamSubQuery subscriberMetadata reqId =
|
|
|
|
addStreamSubscriptionQuery
|
|
|
|
@('Postgres 'Vanilla)
|
|
|
|
logger
|
|
|
|
dummyServerMetrics
|
2022-07-24 00:18:01 +03:00
|
|
|
dummyPromMetrics
|
2022-04-07 17:41:43 +03:00
|
|
|
subscriberMetadata
|
|
|
|
subscriptionState
|
2023-02-24 21:09:36 +03:00
|
|
|
subOptions
|
2022-04-07 17:41:43 +03:00
|
|
|
SNDefault
|
|
|
|
dummyParamQueryHash
|
|
|
|
Nothing
|
|
|
|
reqId
|
2022-09-05 12:12:51 +03:00
|
|
|
[G.name|numbers_stream|]
|
2022-04-07 17:41:43 +03:00
|
|
|
subscriptionQueryPlan
|
|
|
|
(const (pure ()))
|
|
|
|
|
|
|
|
it "concurrently adding two subscribers should retain both of them in the poller map" $ do
|
|
|
|
-- Adding two subscribers that query identical queries should be adding them into the same
|
|
|
|
-- cohort of the same poller
|
|
|
|
liftIO $
|
|
|
|
Async.concurrently_
|
|
|
|
(addStreamSubQuery subscriber1Metadata requestId1)
|
|
|
|
(addStreamSubQuery subscriber2Metadata requestId2)
|
|
|
|
|
|
|
|
let streamQueryMap = _ssStreamQueryMap subscriptionState
|
|
|
|
|
|
|
|
streamQueryMapEntries <- STM.atomically $ ListT.toList $ STMMap.listT streamQueryMap
|
|
|
|
length streamQueryMapEntries `shouldBe` 1
|
|
|
|
let (pollerKey, (Poller currentCohortMap ioState)) = head streamQueryMapEntries
|
|
|
|
cohorts <- STM.atomically $ TMap.toList currentCohortMap
|
|
|
|
length cohorts `shouldBe` 1
|
|
|
|
let (_cohortKey, Cohort _ _ curSubsTV newSubsTV _) = head cohorts
|
|
|
|
allSubscribers <- STM.atomically $ do
|
|
|
|
curSubs <- TMap.toList curSubsTV
|
|
|
|
newSubs <- TMap.toList newSubsTV
|
|
|
|
pure $ curSubs <> newSubs
|
|
|
|
length allSubscribers `shouldBe` 2
|
|
|
|
|
|
|
|
threadRef <- STM.atomically $ do
|
|
|
|
STMMap.delete pollerKey streamQueryMap
|
|
|
|
_pThread <$> STM.readTMVar ioState
|
|
|
|
Immortal.stop threadRef
|
|
|
|
|
|
|
|
runIO teardown
|
|
|
|
|
2022-07-01 14:47:20 +03:00
|
|
|
printErrExit :: Text -> IO a
|
|
|
|
printErrExit = (*> exitFailure) . T.putStrLn
|