Merge branch 'master' into docs-getting-started-update

This commit is contained in:
Rikin Kachhia 2020-09-10 15:18:41 +05:30 committed by GitHub
commit ba6329c90e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
34 changed files with 168 additions and 57 deletions

View File

@ -178,7 +178,7 @@ cd $PYTEST_ROOT
RUN_WEBHOOK_TESTS=true
for port in 8080 8081 9876 5592
for port in 8080 8081 9876 5592 5000 5594
do
fail_if_port_busy $port
done
@ -205,6 +205,7 @@ fi
export WEBHOOK_FROM_ENV="http://127.0.0.1:5592"
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN="http://127.0.0.1:5594"
export HASURA_GRAPHQL_STRINGIFY_NUMERIC_TYPES=true
export REMOTE_SCHEMAS_WEBHOOK_DOMAIN="http://127.0.0.1:5000"
HGE_PIDS=""
WH_PID=""

View File

@ -54,11 +54,22 @@ This release contains the [PDV refactor (#4111)](https://github.com/hasura/graph
- server: miscellaneous description changes (#4111)
- server: treat the absence of `backend_only` configuration and `backend_only: false` equally (closing #5059) (#4111)
- server: allow remote relationships joining `type` column with `[type]` input argument as spec allows this coercion (fixes #5133)
- server: add action-like URL templating for event triggers and remote schemas (fixes #2483)
- console: allow user to cascade Postgres dependencies when dropping Postgres objects (close #5109) (#5248)
- console: mark inconsistent remote schemas in the UI (close #5093) (#5181)
- cli: add missing global flags for seeds command (#5565)
- docs: add docs page on networking with docker (close #4346) (#4811)
## `v1.3.2`
### Bug fixes and improvements
(Add entries here in the order of: server, console, cli, docs, others)
- server: fixes column masking in select permission for computed fields regression (fix #5696)
## `v1.3.1`, `v1.3.1-beta.1`
### Breaking change

View File

@ -55,12 +55,13 @@ const setReadOnlyMode = data => ({
data,
});
export const fetchPostgresVersion = dispatch => {
export const fetchPostgresVersion = (dispatch, getState) => {
const req = getRunSqlQuery('SELECT version()');
const options = {
method: 'POST',
credentials: globalCookiePolicy,
body: JSON.stringify(req),
headers: getState().tables.dataHeaders,
};
return dispatch(requestAction(Endpoints.query, options)).then(

View File

@ -272,6 +272,8 @@ const analyzeFetcher = (headers, mode) => {
const changeRequestHeader = (index, key, newValue, isDisabled) => {
return (dispatch, getState) => {
websocketSubscriptionClient = null;
const currentState = getState().apiexplorer;
const updatedHeader = {

View File

@ -98,7 +98,7 @@
"firewallRuleName": "allow-all-azure-firewall-rule",
"containerGroupName": "[concat(parameters('name'), '-container-group')]",
"containerName": "hasura-graphql-engine",
"containerImage": "hasura/graphql-engine:v1.3.1"
"containerImage": "hasura/graphql-engine:v1.3.2"
},
"resources": [
{

View File

@ -55,7 +55,7 @@
"dbName": "[parameters('postgresDatabaseName')]",
"containerGroupName": "[concat(parameters('name'), '-container-group')]",
"containerName": "hasura-graphql-engine",
"containerImage": "hasura/graphql-engine:v1.3.1"
"containerImage": "hasura/graphql-engine:v1.3.2"
},
"resources": [
{

View File

@ -8,7 +8,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
graphql-engine:
image: hasura/graphql-engine:v1.3.1
image: hasura/graphql-engine:v1.3.2
depends_on:
- "postgres"
restart: always

View File

@ -19,7 +19,7 @@ services:
PGADMIN_DEFAULT_EMAIL: pgadmin@example.com
PGADMIN_DEFAULT_PASSWORD: admin
graphql-engine:
image: hasura/graphql-engine:v1.3.1
image: hasura/graphql-engine:v1.3.2
ports:
- "8080:8080"
depends_on:

View File

@ -8,7 +8,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
graphql-engine:
image: hasura/graphql-engine:v1.3.1
image: hasura/graphql-engine:v1.3.2
ports:
- "8080:8080"
depends_on:

View File

@ -8,7 +8,7 @@ services:
environment:
POSTGRES_PASSWORD: postgrespassword
graphql-engine:
image: hasura/graphql-engine:v1.3.1
image: hasura/graphql-engine:v1.3.2
ports:
- "8080:8080"
depends_on:

View File

@ -3,4 +3,4 @@ docker run -d -p 8080:8080 \
-e HASURA_GRAPHQL_DATABASE_URL=postgres://username:password@hostname:port/dbname \
-e HASURA_GRAPHQL_ENABLE_CONSOLE=true \
-e HASURA_GRAPHQL_DEV_MODE=true \
hasura/graphql-engine:v1.3.1
hasura/graphql-engine:v1.3.2

View File

@ -16,7 +16,7 @@ spec:
spec:
containers:
- name: graphql-engine
image: hasura/graphql-engine:v1.3.1
image: hasura/graphql-engine:v1.3.2
ports:
- containerPort: 8080
readinessProbe:

View File

@ -18,7 +18,7 @@ spec:
app: hasura
spec:
containers:
- image: hasura/graphql-engine:v1.3.1
- image: hasura/graphql-engine:v1.3.2
imagePullPolicy: IfNotPresent
name: hasura
env:

View File

@ -1,4 +1,4 @@
FROM hasura/graphql-engine:v1.3.1
FROM hasura/graphql-engine:v1.3.2
# set an env var to let the cli know that
# it is running in server environment

View File

@ -2,7 +2,7 @@ FROM hasura/haskell-docker-packager:20190731 as packager
WORKDIR /tmp
RUN apt-get update && apt-get download libstdc++6
FROM hasura/graphql-engine:v1.3.1
FROM hasura/graphql-engine:v1.3.2
# install libstdc++6 from .deb file
COPY --from=packager /tmp/libstdc++6* .

View File

@ -215,7 +215,7 @@ if [ "$MODE" = "graphql-engine" ]; then
echo_pretty " $ $0 postgres"
echo_pretty ""
RUN_INVOCATION=(cabal new-run --project-file=cabal.project.dev-sh --RTS --
RUN_INVOCATION=(cabal new-run --project-file=cabal.project.dev-sh --RTS --
exe:graphql-engine +RTS -N -T -s -RTS serve
--enable-console --console-assets-dir "$PROJECT_ROOT/console/static/dist"
)
@ -371,9 +371,11 @@ elif [ "$MODE" = "test" ]; then
# We'll get an hpc error if these exist; they will be deleted below too:
rm -f graphql-engine-tests.tix graphql-engine.tix graphql-engine-combined.tix
# Various tests take some configuration from the environment; set these up here:
export EVENT_WEBHOOK_HEADER="MyEnvValue"
export WEBHOOK_FROM_ENV="http://127.0.0.1:5592"
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN="http://127.0.0.1:5594"
export REMOTE_SCHEMAS_WEBHOOK_DOMAIN="http://127.0.0.1:5000"
# It's better UX to build first (possibly failing) before trying to launch
# PG, but make sure that new-run uses the exact same build plan, else we risk

View File

@ -483,6 +483,7 @@ executable graphql-engine
, pg-client
, text
, text-conversions
, time
, unix
, ekg-core

View File

@ -3,7 +3,9 @@
module Main where
import Control.Exception
import Data.Int (Int64)
import Data.Text.Conversions (convertText)
import Data.Time.Clock.POSIX (getPOSIXTime)
import Hasura.App
import Hasura.Logging (Hasura)
@ -42,7 +44,17 @@ runApp env (HGEOptionsG rci hgeCmd) =
withVersion $$(getVersionFromEnvironment) $ case hgeCmd of
HCServe serveOptions -> do
(initCtx, initTime) <- initialiseCtx env hgeCmd rci
ekgStore <- liftIO EKG.newStore
ekgStore <- liftIO do
s <- EKG.newStore
EKG.registerGcMetrics s
let getTimeMs :: IO Int64
getTimeMs = (round . (* 1000)) `fmap` getPOSIXTime
EKG.registerCounter "ekg.server_timestamp_ms" getTimeMs s
pure s
let shutdownApp = return ()
-- Catches the SIGTERM signal and initiates a graceful shutdown.
-- Graceful shutdown for regular HTTP requests is already implemented in

View File

@ -379,7 +379,6 @@ runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp pos
lockedEventsCtx <- liftIO $ atomically initLockedEventsCtx
-- prepare event triggers data
prepareEvents _icPgPool logger
eventEngineCtx <- liftIO $ atomically $ initEventEngineCtx maxEvThrds fetchI
unLogger logger $ mkGenericStrLog LevelInfo "event_triggers" "starting workers"
@ -441,8 +440,10 @@ runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp pos
liftIO $ Warp.runSettings warpSettings app
where
-- | prepareEvents is a function to unlock all the events that are
-- locked and unprocessed, which is called while hasura is started.
-- | prepareScheduledEvents is a function to unlock all the scheduled trigger
-- events that are locked and unprocessed, which is called while hasura is
-- started.
--
-- Locked and unprocessed events can occur in 2 ways
-- 1.
-- Hasura's shutdown was not graceful in which all the fetched
@ -452,12 +453,6 @@ runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp pos
-- There is another hasura instance which is processing events and
-- it will lock events to process them.
-- So, unlocking all the locked events might re-deliver an event(due to #2).
prepareEvents pool (Logger logger) = do
liftIO $ logger $ mkGenericStrLog LevelInfo "event_triggers" "preparing data"
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllEvents
either (printErrJExit EventSubSystemError) return res
-- | prepareScheduledEvents is like prepareEvents, but for scheduled triggers
prepareScheduledEvents pool (Logger logger) = do
liftIO $ logger $ mkGenericStrLog LevelInfo "scheduled_triggers" "preparing data"
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllLockedScheduledEvents

View File

@ -417,10 +417,11 @@ fetchEvents :: Int -> Q.TxE QErr [Event]
fetchEvents limitI =
map uncurryEvent <$> Q.listQE defaultTxErrorHandler [Q.sql|
UPDATE hdb_catalog.event_log
SET locked = 't'
SET locked = NOW()
WHERE id IN ( SELECT l.id
FROM hdb_catalog.event_log l
WHERE l.delivered = 'f' and l.error = 'f' and l.locked = 'f'
WHERE l.delivered = 'f' and l.error = 'f'
and (l.locked IS NULL or l.locked < (NOW() - interval '30 minute'))
and (l.next_retry_at is NULL or l.next_retry_at <= now())
and l.archived = 'f'
ORDER BY created_at
@ -457,14 +458,14 @@ insertInvocation invo = do
setSuccess :: Event -> Q.TxE QErr ()
setSuccess e = Q.unitQE defaultTxErrorHandler [Q.sql|
UPDATE hdb_catalog.event_log
SET delivered = 't', next_retry_at = NULL, locked = 'f'
SET delivered = 't', next_retry_at = NULL, locked = NULL
WHERE id = $1
|] (Identity $ eId e) True
setError :: Event -> Q.TxE QErr ()
setError e = Q.unitQE defaultTxErrorHandler [Q.sql|
UPDATE hdb_catalog.event_log
SET error = 't', next_retry_at = NULL, locked = 'f'
SET error = 't', next_retry_at = NULL, locked = NULL
WHERE id = $1
|] (Identity $ eId e) True
@ -472,7 +473,7 @@ setRetry :: Event -> UTCTime -> Q.TxE QErr ()
setRetry e time =
Q.unitQE defaultTxErrorHandler [Q.sql|
UPDATE hdb_catalog.event_log
SET next_retry_at = $1, locked = 'f'
SET next_retry_at = $1, locked = NULL
WHERE id = $2
|] (time, eId e) True
@ -480,8 +481,8 @@ unlockAllEvents :: Q.TxE QErr ()
unlockAllEvents =
Q.unitQE defaultTxErrorHandler [Q.sql|
UPDATE hdb_catalog.event_log
SET locked = 'f'
WHERE locked = 't'
SET locked = NULL
WHERE locked IS NOT NULL
|] () True
toInt64 :: (Integral a) => a -> Int64
@ -504,12 +505,12 @@ unlockEvents eventIds =
[Q.sql|
WITH "cte" AS
(UPDATE hdb_catalog.event_log
SET locked = 'f'
SET locked = NULL
WHERE id = ANY($1::text[])
-- only unlock those events that have been locked, it's possible
-- that an event has been processed but not yet been removed from
-- the saved locked events, which will lead to a double send
AND locked = 't'
AND locked IS NOT NULL
RETURNING *)
SELECT count(*) FROM "cte"
|] (Identity $ EventIdArray eventIds) True

View File

@ -158,7 +158,7 @@ fetchEvent :: EventId -> Q.TxE QErr (EventId, Bool)
fetchEvent eid = do
events <- Q.listQE defaultTxErrorHandler
[Q.sql|
SELECT l.id, l.locked
SELECT l.id, l.locked IS NOT NULL AND l.locked >= (NOW() - interval '30 minute')
FROM hdb_catalog.event_log l
JOIN hdb_catalog.event_triggers e
ON l.trigger_name = e.name
@ -332,7 +332,9 @@ getWebhookInfoFromConf
-> WebhookConf
-> m WebhookConfInfo
getWebhookInfoFromConf env wc = case wc of
WCValue w -> return $ WebhookConfInfo wc w
WCValue w -> do
resolvedWebhook <- resolveWebhook env w
return $ WebhookConfInfo wc $ unResolvedWebhook resolvedWebhook
WCEnv we -> do
envVal <- getEnv env we
return $ WebhookConfInfo wc envVal

View File

@ -56,6 +56,7 @@ import Hasura.RQL.DDL.Headers ()
import Control.Lens (makeLenses)
import Data.Aeson
import Data.Aeson.Casing
import Data.Bifunctor (bimap)
import Data.Aeson.TH
import Data.URL.Template
import Instances.TH.Lift ()
@ -305,6 +306,11 @@ instance FromJSON InputWebhook where
Left e -> fail $ "Parsing URL template failed: " ++ e
Right v -> pure $ InputWebhook v
instance Q.FromCol InputWebhook where
fromCol bs = do
urlTemplate <- parseURLTemplate <$> Q.fromCol bs
bimap (\e -> "Parsing URL template failed: " <> T.pack e) InputWebhook urlTemplate
resolveWebhook :: QErrM m => Env.Environment -> InputWebhook -> m ResolvedWebhook
resolveWebhook env (InputWebhook urlTemplate) = do
let eitherRenderedTemplate = renderURLTemplate env urlTemplate

View File

@ -30,7 +30,7 @@ import Data.Aeson.TH
import Hasura.Incremental (Cacheable)
import Hasura.Prelude
import Hasura.RQL.DDL.Headers
import Hasura.RQL.Types.Common (NonEmptyText (..))
import Hasura.RQL.Types.Common (NonEmptyText (..), InputWebhook)
import Hasura.SQL.Types
import Language.Haskell.TH.Syntax (Lift)
@ -104,18 +104,21 @@ data EventHeaderInfo
instance NFData EventHeaderInfo
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''EventHeaderInfo)
data WebhookConf = WCValue T.Text | WCEnv T.Text
data WebhookConf = WCValue InputWebhook | WCEnv T.Text
deriving (Show, Eq, Generic, Lift)
instance NFData WebhookConf
instance Cacheable WebhookConf
instance ToJSON WebhookConf where
toJSON (WCValue w) = String w
toJSON (WCValue w) = toJSON w
toJSON (WCEnv wEnv) = object ["from_env" .= wEnv ]
instance FromJSON WebhookConf where
parseJSON (Object o) = WCEnv <$> o .: "from_env"
parseJSON (String t) = pure $ WCValue t
parseJSON t@(String _) =
case (fromJSON t) of
Error s -> fail s
Success a -> pure $ WCValue a
parseJSON _ = fail "one of string or object must be provided for webhook"
data WebhookConfInfo
@ -135,7 +138,7 @@ data CreateEventTriggerQuery
, cetqDelete :: !(Maybe SubscribeOpSpec)
, cetqEnableManual :: !(Maybe Bool)
, cetqRetryConf :: !(Maybe RetryConf)
, cetqWebhook :: !(Maybe T.Text)
, cetqWebhook :: !(Maybe InputWebhook)
, cetqWebhookFromEnv :: !(Maybe T.Text)
, cetqHeaders :: !(Maybe [HeaderConf])
, cetqReplace :: !Bool
@ -203,7 +206,7 @@ data EventTriggerConf
= EventTriggerConf
{ etcName :: !TriggerName
, etcDefinition :: !TriggerOpsDef
, etcWebhook :: !(Maybe T.Text)
, etcWebhook :: !(Maybe InputWebhook)
, etcWebhookFromEnv :: !(Maybe T.Text)
, etcRetryConf :: !RetryConf
, etcHeaders :: !(Maybe [HeaderConf])

View File

@ -13,7 +13,7 @@ import qualified Data.Environment as Env
import Hasura.Incremental (Cacheable)
import Hasura.RQL.DDL.Headers (HeaderConf (..))
import Hasura.RQL.Types.Common (NonEmptyText (..))
import Hasura.RQL.Types.Common
import Hasura.RQL.Types.Error
import Hasura.SQL.Types
@ -42,7 +42,7 @@ $(J.deriveJSON (J.aesonDrop 2 J.snakeCase) ''RemoteSchemaInfo)
data RemoteSchemaDef
= RemoteSchemaDef
{ _rsdUrl :: !(Maybe N.URI)
{ _rsdUrl :: !(Maybe InputWebhook)
, _rsdUrlFromEnv :: !(Maybe UrlFromEnv)
, _rsdHeaders :: !(Maybe [HeaderConf])
, _rsdForwardClientHeaders :: !Bool
@ -94,8 +94,11 @@ validateRemoteSchemaDef
-> m RemoteSchemaInfo
validateRemoteSchemaDef env (RemoteSchemaDef mUrl mUrlEnv hdrC fwdHdrs mTimeout) =
case (mUrl, mUrlEnv) of
(Just url, Nothing) ->
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout
(Just url, Nothing) -> do
resolvedWebhookTxt <- unResolvedWebhook <$> resolveWebhook env url
case N.parseURI $ T.unpack resolvedWebhookTxt of
Nothing -> throw400 InvalidParams $ "not a valid URI: " <> resolvedWebhookTxt
Just uri -> return $ RemoteSchemaInfo uri hdrs fwdHdrs timeout
(Nothing, Just urlEnv) -> do
url <- getUrlFromEnv env urlEnv
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout

View File

@ -11,10 +11,8 @@ import Hasura.Prelude hiding (get, put)
import Control.Monad.Stateless
import Data.Aeson hiding (json)
import Data.Int (Int64)
import Data.IORef
import Data.Time.Clock (UTCTime)
import Data.Time.Clock.POSIX (getPOSIXTime)
import Network.Mime (defaultMimeLookup)
import System.FilePath (joinPath, takeFileName)
import Web.Spock.Core ((<//>))
@ -635,10 +633,6 @@ mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpMana
, scResponseInternalErrorsConfig = responseErrorsConfig
}
when (isDeveloperAPIEnabled serverCtx) $ do
liftIO $ EKG.registerGcMetrics ekgStore
liftIO $ EKG.registerCounter "ekg.server_timestamp_ms" getTimeMs ekgStore
spockApp <- liftWithStateless $ \lowerIO ->
Spock.spockAsApp $ Spock.spockT lowerIO $
httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry
@ -651,9 +645,6 @@ mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpMana
return $ HasuraApp waiApp schemaCacheRef cacheBuiltTime stopWSServer
where
getTimeMs :: IO Int64
getTimeMs = (round . (* 1000)) `fmap` getPOSIXTime
-- initialiseCache :: m (E.PlanCache, SchemaCacheRef)
initialiseCache :: m SchemaCacheRef
initialiseCache = do

View File

@ -1 +1 @@
37
38

View File

@ -30,3 +30,4 @@ v1.3.0-beta.4 36
v1.3.0 36
v1.3.1-beta.1 37
v1.3.1 37
v1.3.2 37

View File

@ -302,7 +302,8 @@ CREATE TABLE hdb_catalog.event_log
error BOOLEAN NOT NULL DEFAULT FALSE,
tries INTEGER NOT NULL DEFAULT 0,
created_at TIMESTAMP DEFAULT NOW(),
locked BOOLEAN NOT NULL DEFAULT FALSE,
/* when locked IS NULL the event is unlocked and can be processed */
locked TIMESTAMPTZ,
next_retry_at TIMESTAMP,
archived BOOLEAN NOT NULL DEFAULT FALSE
);

View File

@ -0,0 +1,3 @@
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked DROP DEFAULT;
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked DROP NOT NULL;
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked TYPE TIMESTAMPTZ USING CASE WHEN locked THEN NOW() ELSE NULL END;

View File

@ -0,0 +1,3 @@
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked TYPE BOOLEAN USING locked IS NOT NULL;
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked SET NOT NULL;
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked SET DEFAULT false;

View File

@ -0,0 +1,26 @@
type: bulk
args:
- type: run_sql
args:
sql: |
create table hge_tests.test_t1(
c1 int,
c2 text
);
- type: track_table
args:
schema: hge_tests
name: test_t1
- type: create_event_trigger
args:
name: t1_all
table:
schema: hge_tests
name: test_t1
insert:
columns: "*"
update:
columns: "*"
delete:
columns: "*"
webhook: "{{WEBHOOK_FROM_ENV}}/trigger"

View File

@ -0,0 +1,9 @@
type: bulk
args:
- type: delete_event_trigger
args:
name: t1_all
- type: run_sql
args:
sql: |
drop table hge_tests.test_t1

View File

@ -19,5 +19,5 @@ args:
name: simple2-graphql
comment: testing
definition:
url: http://localhost:5000/user-graphql
url: "{{REMOTE_SCHEMAS_WEBHOOK_DOMAIN}}/user-graphql"
forward_client_headers: false

View File

@ -535,6 +535,43 @@ class TestWebhookEnv(object):
assert st_code == 200, resp
check_event(hge_ctx, evts_webhook, "t1_all", table, "DELETE", exp_ev_data)
@usefixtures('per_method_tests_db_state')
class TestWebhookTemplateURL(object):
@classmethod
def dir(cls):
return 'queries/event_triggers/webhook_template_url'
def test_basic(self, hge_ctx, evts_webhook):
table = {"schema": "hge_tests", "name": "test_t1"}
init_row = {"c1": 1, "c2": "hello"}
exp_ev_data = {
"old": None,
"new": init_row
}
st_code, resp = insert(hge_ctx, table, init_row)
assert st_code == 200, resp
check_event(hge_ctx, evts_webhook, "t1_all", table, "INSERT", exp_ev_data, webhook_path = '/trigger')
where_exp = {"c1": 1}
set_exp = {"c2": "world"}
exp_ev_data = {
"old": init_row,
"new": {"c1": 1, "c2": "world"}
}
st_code, resp = update(hge_ctx, table, where_exp, set_exp)
assert st_code == 200, resp
check_event(hge_ctx, evts_webhook, "t1_all", table, "UPDATE", exp_ev_data, webhook_path = '/trigger')
exp_ev_data = {
"old": {"c1": 1, "c2": "world"},
"new": None
}
st_code, resp = delete(hge_ctx, table, where_exp)
assert st_code == 200, resp
check_event(hge_ctx, evts_webhook, "t1_all", table, "DELETE", exp_ev_data, webhook_path = '/trigger')
@usefixtures('per_method_tests_db_state')
class TestSessionVariables(object):