mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-14 17:02:49 +03:00
Merge branch 'master' into scheduled-triggers-created-at-bug-5272
This commit is contained in:
commit
6eb7a7dd8a
@ -178,7 +178,7 @@ cd $PYTEST_ROOT
|
||||
|
||||
RUN_WEBHOOK_TESTS=true
|
||||
|
||||
for port in 8080 8081 9876 5592
|
||||
for port in 8080 8081 9876 5592 5000 5594
|
||||
do
|
||||
fail_if_port_busy $port
|
||||
done
|
||||
@ -205,6 +205,7 @@ fi
|
||||
export WEBHOOK_FROM_ENV="http://127.0.0.1:5592"
|
||||
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN="http://127.0.0.1:5594"
|
||||
export HASURA_GRAPHQL_STRINGIFY_NUMERIC_TYPES=true
|
||||
export REMOTE_SCHEMAS_WEBHOOK_DOMAIN="http://127.0.0.1:5000"
|
||||
|
||||
HGE_PIDS=""
|
||||
WH_PID=""
|
||||
|
@ -54,6 +54,7 @@ This release contains the [PDV refactor (#4111)](https://github.com/hasura/graph
|
||||
- server: miscellaneous description changes (#4111)
|
||||
- server: treat the absence of `backend_only` configuration and `backend_only: false` equally (closing #5059) (#4111)
|
||||
- server: allow remote relationships joining `type` column with `[type]` input argument as spec allows this coercion (fixes #5133)
|
||||
- server: add action-like URL templating for event triggers and remote schemas (fixes #2483)
|
||||
- server: change `created_at` column type from `timestamp` to `timestamptz` for scheduled triggers tables (fix #5722)
|
||||
- console: allow user to cascade Postgres dependencies when dropping Postgres objects (close #5109) (#5248)
|
||||
- console: mark inconsistent remote schemas in the UI (close #5093) (#5181)
|
||||
|
@ -55,12 +55,13 @@ const setReadOnlyMode = data => ({
|
||||
data,
|
||||
});
|
||||
|
||||
export const fetchPostgresVersion = dispatch => {
|
||||
export const fetchPostgresVersion = (dispatch, getState) => {
|
||||
const req = getRunSqlQuery('SELECT version()');
|
||||
const options = {
|
||||
method: 'POST',
|
||||
credentials: globalCookiePolicy,
|
||||
body: JSON.stringify(req),
|
||||
headers: getState().tables.dataHeaders,
|
||||
};
|
||||
|
||||
return dispatch(requestAction(Endpoints.query, options)).then(
|
||||
|
@ -272,6 +272,8 @@ const analyzeFetcher = (headers, mode) => {
|
||||
|
||||
const changeRequestHeader = (index, key, newValue, isDisabled) => {
|
||||
return (dispatch, getState) => {
|
||||
websocketSubscriptionClient = null;
|
||||
|
||||
const currentState = getState().apiexplorer;
|
||||
|
||||
const updatedHeader = {
|
||||
|
@ -24,16 +24,17 @@ Step 2: Connect new/existing database
|
||||
-------------------------------------
|
||||
|
||||
- To use an existing database, choose ``I have an existing Postgres database``.
|
||||
- To create a new database, choose ``Try a free database with Heroku``.
|
||||
- To try out with a new database, choose ``Try a free database with Heroku``.
|
||||
|
||||
.. thumbnail:: /img/graphql/cloud/getting-started/connect-db.png
|
||||
:alt: Connect new or existing database
|
||||
:width: 591px
|
||||
|
||||
Step 2a: Enter database URL (for existing database)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
Step 2.1: Enter database URL (for existing database)
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If you chose ``I have an existing Postgres database`` in :ref:`Step 2 <cloud_connect_db>`, enter a database URL.
|
||||
If you chose ``I have an existing Postgres database`` in :ref:`Step 2 <cloud_connect_db>`, enter
|
||||
your database connection URL.
|
||||
|
||||
.. thumbnail:: /img/graphql/cloud/getting-started/connect-existing-db.png
|
||||
:alt: Enter URL for existing database
|
||||
@ -56,38 +57,33 @@ Click ``Create Project``.
|
||||
:group: create
|
||||
:class: inline-block
|
||||
|
||||
Next steps
|
||||
----------
|
||||
Step 4: Try Hasura out
|
||||
----------------------
|
||||
|
||||
Once you've created your project, you can get started with building with Hasura or manage your project.
|
||||
|
||||
.. contents::
|
||||
:backlinks: none
|
||||
:depth: 1
|
||||
:local:
|
||||
Click ``Launch Console`` to open the Hasura console in your browser and
|
||||
:ref:`make your first GraphQL query <first_graphql_query>` or :ref:`set up your first event trigger <first_event_trigger>`.
|
||||
|
||||
.. thumbnail:: /img/graphql/cloud/getting-started/project-functionalities.png
|
||||
:alt: Project actions
|
||||
:width: 860px
|
||||
|
||||
|
||||
Explore the Hasura console
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Click ``Launch Console`` to open the Hasura console in your browser and :ref:`make your first GraphQL query <first_graphql_query>` or :ref:`set up your first event trigger <first_event_trigger>`.
|
||||
|
||||
You can navigate to the ``Pro`` tab to check out the Pro features that Hasura Cloud has set up for you.
|
||||
You can navigate to the ``Pro`` tab in the console to check out the Pro features that Hasura Cloud has set up for you.
|
||||
|
||||
.. thumbnail:: /img/graphql/cloud/metrics/pro-tab-overview.png
|
||||
:alt: Hasura Console: Pro tab
|
||||
:width: 1118px
|
||||
:width: 1000px
|
||||
|
||||
Manage your project
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
Next steps
|
||||
----------
|
||||
|
||||
Click the gear icon to :ref:`manage your project <manage_project>` (e.g. add collaborators, env vars or custom domains).
|
||||
You can check out our `30-Minute Hasura Basics Course <https://hasura.io/learn/graphql/hasura/introduction/>`__
|
||||
and other `GraphQL & Hasura Courses <https://hasura.io/learn/>`__ for a more detailed introduction to Hasura.
|
||||
|
||||
Add an admin secret
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
You can also click the gear icon to :ref:`manage your Hasura Cloud project <manage_project>` (e.g. add
|
||||
collaborators, env vars or custom domains) and :ref:`add an admin secret <secure_project>` to make sure that your
|
||||
GraphQL endpoint and the Hasura console are not publicly accessible.
|
||||
|
||||
.. thumbnail:: /img/graphql/cloud/getting-started/project-functionalities.png
|
||||
:alt: Project actions
|
||||
:width: 860px
|
||||
|
||||
:ref:`Add an admin secret <secure_project>` to make sure that your GraphQL endpoint and the Hasura console are not publicly accessible.
|
@ -63,14 +63,13 @@ Step 3: Open the Hasura console
|
||||
|
||||
Head to ``http://localhost:8080/console`` to open the Hasura console.
|
||||
|
||||
Hello World (GraphQL or event triggers)
|
||||
---------------------------------------
|
||||
Step 4: Try Hasura out
|
||||
----------------------
|
||||
|
||||
Make your :ref:`first graphql query <first_graphql_query>`
|
||||
Make your :ref:`first graphql query <first_graphql_query>` or set up your :ref:`first event trigger <first_event_trigger>`
|
||||
|
||||
OR
|
||||
|
||||
Set up your :ref:`first event trigger <first_event_trigger>`
|
||||
You can also check out our `30-Minute Hasura Basics Course <https://hasura.io/learn/graphql/hasura/introduction/>`__
|
||||
and other `GraphQL & Hasura Courses <https://hasura.io/learn/>`__ for a more detailed introduction to Hasura.
|
||||
|
||||
Advanced
|
||||
--------
|
||||
|
@ -26,8 +26,6 @@ Guides / Tutorials / Resources
|
||||
Code Editor Integrations <code-editor-integrations/index>
|
||||
MySQL preview <mysql-preview>
|
||||
|
||||
|
||||
|
||||
.. note::
|
||||
|
||||
If you are new to GraphQL you can check out some front-end and back-end tutorials for building applications using
|
||||
|
@ -48,8 +48,8 @@ reach particular checkpoints. They can be used to roll-back the DB schema as wel
|
||||
|
||||
.. note::
|
||||
|
||||
You can manage database migrations using external tools like knex, TypeORM, Django/Rails migrations, etc.
|
||||
as well.
|
||||
You can choose to manage database migrations using external tools like knex, TypeORM,
|
||||
Django/Rails migrations, etc. as well.
|
||||
|
||||
Hasura metadata files
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
@ -60,9 +60,6 @@ in the snapshot.
|
||||
|
||||
Hasura metadata can be exported and imported as a whole.
|
||||
|
||||
.. note::
|
||||
|
||||
|
||||
Setting up migrations
|
||||
---------------------
|
||||
|
||||
|
@ -371,9 +371,11 @@ elif [ "$MODE" = "test" ]; then
|
||||
# We'll get an hpc error if these exist; they will be deleted below too:
|
||||
rm -f graphql-engine-tests.tix graphql-engine.tix graphql-engine-combined.tix
|
||||
|
||||
# Various tests take some configuration from the environment; set these up here:
|
||||
export EVENT_WEBHOOK_HEADER="MyEnvValue"
|
||||
export WEBHOOK_FROM_ENV="http://127.0.0.1:5592"
|
||||
export SCHEDULED_TRIGGERS_WEBHOOK_DOMAIN="http://127.0.0.1:5594"
|
||||
export REMOTE_SCHEMAS_WEBHOOK_DOMAIN="http://127.0.0.1:5000"
|
||||
|
||||
# It's better UX to build first (possibly failing) before trying to launch
|
||||
# PG, but make sure that new-run uses the exact same build plan, else we risk
|
||||
|
@ -483,6 +483,7 @@ executable graphql-engine
|
||||
, pg-client
|
||||
, text
|
||||
, text-conversions
|
||||
, time
|
||||
, unix
|
||||
, ekg-core
|
||||
|
||||
|
@ -3,7 +3,9 @@
|
||||
module Main where
|
||||
|
||||
import Control.Exception
|
||||
import Data.Int (Int64)
|
||||
import Data.Text.Conversions (convertText)
|
||||
import Data.Time.Clock.POSIX (getPOSIXTime)
|
||||
|
||||
import Hasura.App
|
||||
import Hasura.Logging (Hasura)
|
||||
@ -42,7 +44,17 @@ runApp env (HGEOptionsG rci hgeCmd) =
|
||||
withVersion $$(getVersionFromEnvironment) $ case hgeCmd of
|
||||
HCServe serveOptions -> do
|
||||
(initCtx, initTime) <- initialiseCtx env hgeCmd rci
|
||||
ekgStore <- liftIO EKG.newStore
|
||||
|
||||
ekgStore <- liftIO do
|
||||
s <- EKG.newStore
|
||||
EKG.registerGcMetrics s
|
||||
|
||||
let getTimeMs :: IO Int64
|
||||
getTimeMs = (round . (* 1000)) `fmap` getPOSIXTime
|
||||
|
||||
EKG.registerCounter "ekg.server_timestamp_ms" getTimeMs s
|
||||
pure s
|
||||
|
||||
let shutdownApp = return ()
|
||||
-- Catches the SIGTERM signal and initiates a graceful shutdown.
|
||||
-- Graceful shutdown for regular HTTP requests is already implemented in
|
||||
|
@ -379,7 +379,6 @@ runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp pos
|
||||
lockedEventsCtx <- liftIO $ atomically initLockedEventsCtx
|
||||
|
||||
-- prepare event triggers data
|
||||
prepareEvents _icPgPool logger
|
||||
eventEngineCtx <- liftIO $ atomically $ initEventEngineCtx maxEvThrds fetchI
|
||||
unLogger logger $ mkGenericStrLog LevelInfo "event_triggers" "starting workers"
|
||||
|
||||
@ -441,8 +440,10 @@ runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp pos
|
||||
liftIO $ Warp.runSettings warpSettings app
|
||||
|
||||
where
|
||||
-- | prepareEvents is a function to unlock all the events that are
|
||||
-- locked and unprocessed, which is called while hasura is started.
|
||||
-- | prepareScheduledEvents is a function to unlock all the scheduled trigger
|
||||
-- events that are locked and unprocessed, which is called while hasura is
|
||||
-- started.
|
||||
--
|
||||
-- Locked and unprocessed events can occur in 2 ways
|
||||
-- 1.
|
||||
-- Hasura's shutdown was not graceful in which all the fetched
|
||||
@ -452,12 +453,6 @@ runHGEServer env ServeOptions{..} InitCtx{..} pgExecCtx initTime shutdownApp pos
|
||||
-- There is another hasura instance which is processing events and
|
||||
-- it will lock events to process them.
|
||||
-- So, unlocking all the locked events might re-deliver an event(due to #2).
|
||||
prepareEvents pool (Logger logger) = do
|
||||
liftIO $ logger $ mkGenericStrLog LevelInfo "event_triggers" "preparing data"
|
||||
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllEvents
|
||||
either (printErrJExit EventSubSystemError) return res
|
||||
|
||||
-- | prepareScheduledEvents is like prepareEvents, but for scheduled triggers
|
||||
prepareScheduledEvents pool (Logger logger) = do
|
||||
liftIO $ logger $ mkGenericStrLog LevelInfo "scheduled_triggers" "preparing data"
|
||||
res <- liftIO $ runTx pool (Q.ReadCommitted, Nothing) unlockAllLockedScheduledEvents
|
||||
|
@ -204,13 +204,7 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
eventsNext <- LA.withAsync popEventsBatch $ \eventsNextA -> do
|
||||
-- process approximately in order, minding HASURA_GRAPHQL_EVENTS_HTTP_POOL_SIZE:
|
||||
forM_ events $ \event -> do
|
||||
tracingCtx <- liftIO (Tracing.extractEventContext (eEvent event))
|
||||
let runTraceT = maybe
|
||||
Tracing.runTraceT
|
||||
Tracing.runTraceTInContext
|
||||
tracingCtx
|
||||
t <- processEvent event
|
||||
& runTraceT "process event"
|
||||
& withEventEngineCtx eeCtx
|
||||
& flip runReaderT (logger, httpMgr)
|
||||
& LA.async
|
||||
@ -247,13 +241,20 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
, MonadReader r io
|
||||
, Has HTTP.Manager r
|
||||
, Has (L.Logger L.Hasura) r
|
||||
, Tracing.MonadTrace io
|
||||
, Tracing.HasReporter io
|
||||
)
|
||||
=> Event -> io ()
|
||||
processEvent e = do
|
||||
cache <- liftIO getSchemaCache
|
||||
let meti = getEventTriggerInfoFromEvent cache e
|
||||
case meti of
|
||||
|
||||
tracingCtx <- liftIO (Tracing.extractEventContext (eEvent e))
|
||||
let spanName eti = "Event trigger: " <> unNonEmptyText (unTriggerName (etiName eti))
|
||||
runTraceT = maybe
|
||||
Tracing.runTraceT
|
||||
Tracing.runTraceTInContext
|
||||
tracingCtx
|
||||
|
||||
case getEventTriggerInfoFromEvent cache e of
|
||||
Left err -> do
|
||||
-- This rare error can happen in the following known cases:
|
||||
-- i) schema cache is not up-to-date (due to some bug, say during schema syncing across multiple instances)
|
||||
@ -264,7 +265,7 @@ processEventQueue logger logenv httpMgr pool getSchemaCache eeCtx@EventEngineCtx
|
||||
-- For such an event, we unlock the event and retry after a minute
|
||||
setRetry e (addUTCTime 60 currentTime)
|
||||
>>= flip onLeft logQErr
|
||||
Right eti -> do
|
||||
Right eti -> runTraceT (spanName eti) do
|
||||
let webhook = T.unpack $ wciCachedValue $ etiWebhookInfo eti
|
||||
retryConf = etiRetryConf eti
|
||||
timeoutSeconds = fromMaybe defaultTimeoutSeconds (rcTimeoutSec retryConf)
|
||||
@ -417,10 +418,11 @@ fetchEvents :: Int -> Q.TxE QErr [Event]
|
||||
fetchEvents limitI =
|
||||
map uncurryEvent <$> Q.listQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET locked = 't'
|
||||
SET locked = NOW()
|
||||
WHERE id IN ( SELECT l.id
|
||||
FROM hdb_catalog.event_log l
|
||||
WHERE l.delivered = 'f' and l.error = 'f' and l.locked = 'f'
|
||||
WHERE l.delivered = 'f' and l.error = 'f'
|
||||
and (l.locked IS NULL or l.locked < (NOW() - interval '30 minute'))
|
||||
and (l.next_retry_at is NULL or l.next_retry_at <= now())
|
||||
and l.archived = 'f'
|
||||
ORDER BY created_at
|
||||
@ -457,14 +459,14 @@ insertInvocation invo = do
|
||||
setSuccess :: Event -> Q.TxE QErr ()
|
||||
setSuccess e = Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET delivered = 't', next_retry_at = NULL, locked = 'f'
|
||||
SET delivered = 't', next_retry_at = NULL, locked = NULL
|
||||
WHERE id = $1
|
||||
|] (Identity $ eId e) True
|
||||
|
||||
setError :: Event -> Q.TxE QErr ()
|
||||
setError e = Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET error = 't', next_retry_at = NULL, locked = 'f'
|
||||
SET error = 't', next_retry_at = NULL, locked = NULL
|
||||
WHERE id = $1
|
||||
|] (Identity $ eId e) True
|
||||
|
||||
@ -472,7 +474,7 @@ setRetry :: Event -> UTCTime -> Q.TxE QErr ()
|
||||
setRetry e time =
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET next_retry_at = $1, locked = 'f'
|
||||
SET next_retry_at = $1, locked = NULL
|
||||
WHERE id = $2
|
||||
|] (time, eId e) True
|
||||
|
||||
@ -480,8 +482,8 @@ unlockAllEvents :: Q.TxE QErr ()
|
||||
unlockAllEvents =
|
||||
Q.unitQE defaultTxErrorHandler [Q.sql|
|
||||
UPDATE hdb_catalog.event_log
|
||||
SET locked = 'f'
|
||||
WHERE locked = 't'
|
||||
SET locked = NULL
|
||||
WHERE locked IS NOT NULL
|
||||
|] () True
|
||||
|
||||
toInt64 :: (Integral a) => a -> Int64
|
||||
@ -504,12 +506,12 @@ unlockEvents eventIds =
|
||||
[Q.sql|
|
||||
WITH "cte" AS
|
||||
(UPDATE hdb_catalog.event_log
|
||||
SET locked = 'f'
|
||||
SET locked = NULL
|
||||
WHERE id = ANY($1::text[])
|
||||
-- only unlock those events that have been locked, it's possible
|
||||
-- that an event has been processed but not yet been removed from
|
||||
-- the saved locked events, which will lead to a double send
|
||||
AND locked = 't'
|
||||
AND locked IS NOT NULL
|
||||
RETURNING *)
|
||||
SELECT count(*) FROM "cte"
|
||||
|] (Identity $ EventIdArray eventIds) True
|
||||
|
@ -386,7 +386,7 @@ processCronEvents logger logEnv httpMgr pgpool getSC lockedCronEvents = do
|
||||
ctiHeaders
|
||||
ctiComment
|
||||
createdAt
|
||||
finally <- Tracing.runTraceT "scheduled event" . runExceptT $
|
||||
finally <- runExceptT $
|
||||
runReaderT (processScheduledEvent logEnv pgpool scheduledEvent Cron) (logger, httpMgr)
|
||||
removeEventFromLockedEvents id' lockedCronEvents
|
||||
either logInternalError pure finally
|
||||
@ -443,7 +443,7 @@ processOneOffScheduledEvents env logger logEnv httpMgr pgpool lockedOneOffSchedu
|
||||
headerInfo'
|
||||
comment
|
||||
createdAt
|
||||
finally <- Tracing.runTraceT "scheduled event" . runExceptT $
|
||||
finally <- runExceptT $
|
||||
runReaderT (processScheduledEvent logEnv pgpool scheduledEvent OneOff) $
|
||||
(logger, httpMgr)
|
||||
removeEventFromLockedEvents id' lockedOneOffScheduledEvents
|
||||
@ -480,15 +480,14 @@ processScheduledEvent ::
|
||||
, HasVersion
|
||||
, MonadIO m
|
||||
, MonadError QErr m
|
||||
, Tracing.MonadTrace m
|
||||
, Tracing.HasReporter m
|
||||
)
|
||||
=> LogEnvHeaders
|
||||
-> Q.PGPool
|
||||
-> ScheduledEventFull
|
||||
-> ScheduledEventType
|
||||
-> m ()
|
||||
processScheduledEvent
|
||||
logEnv pgpool se@ScheduledEventFull {..} type' = do
|
||||
processScheduledEvent logEnv pgpool se@ScheduledEventFull {..} type' = Tracing.runTraceT traceNote do
|
||||
currentTime <- liftIO getCurrentTime
|
||||
if convertDuration (diffUTCTime currentTime sefScheduledTime)
|
||||
> unNonNegativeDiffTime (strcToleranceSeconds sefRetryConf)
|
||||
@ -514,6 +513,8 @@ processScheduledEvent
|
||||
(processError pgpool se decodedHeaders type' webhookReqBodyJson)
|
||||
(processSuccess pgpool se decodedHeaders type' webhookReqBodyJson)
|
||||
res
|
||||
where
|
||||
traceNote = "Scheduled trigger" <> foldMap ((": " <>) . unNonEmptyText . unTriggerName) sefName
|
||||
|
||||
processError
|
||||
:: (MonadIO m, MonadError QErr m)
|
||||
|
@ -158,7 +158,7 @@ fetchEvent :: EventId -> Q.TxE QErr (EventId, Bool)
|
||||
fetchEvent eid = do
|
||||
events <- Q.listQE defaultTxErrorHandler
|
||||
[Q.sql|
|
||||
SELECT l.id, l.locked
|
||||
SELECT l.id, l.locked IS NOT NULL AND l.locked >= (NOW() - interval '30 minute')
|
||||
FROM hdb_catalog.event_log l
|
||||
JOIN hdb_catalog.event_triggers e
|
||||
ON l.trigger_name = e.name
|
||||
@ -332,7 +332,9 @@ getWebhookInfoFromConf
|
||||
-> WebhookConf
|
||||
-> m WebhookConfInfo
|
||||
getWebhookInfoFromConf env wc = case wc of
|
||||
WCValue w -> return $ WebhookConfInfo wc w
|
||||
WCValue w -> do
|
||||
resolvedWebhook <- resolveWebhook env w
|
||||
return $ WebhookConfInfo wc $ unResolvedWebhook resolvedWebhook
|
||||
WCEnv we -> do
|
||||
envVal <- getEnv env we
|
||||
return $ WebhookConfInfo wc envVal
|
||||
|
@ -56,6 +56,7 @@ import Hasura.RQL.DDL.Headers ()
|
||||
import Control.Lens (makeLenses)
|
||||
import Data.Aeson
|
||||
import Data.Aeson.Casing
|
||||
import Data.Bifunctor (bimap)
|
||||
import Data.Aeson.TH
|
||||
import Data.URL.Template
|
||||
import Instances.TH.Lift ()
|
||||
@ -305,6 +306,11 @@ instance FromJSON InputWebhook where
|
||||
Left e -> fail $ "Parsing URL template failed: " ++ e
|
||||
Right v -> pure $ InputWebhook v
|
||||
|
||||
instance Q.FromCol InputWebhook where
|
||||
fromCol bs = do
|
||||
urlTemplate <- parseURLTemplate <$> Q.fromCol bs
|
||||
bimap (\e -> "Parsing URL template failed: " <> T.pack e) InputWebhook urlTemplate
|
||||
|
||||
resolveWebhook :: QErrM m => Env.Environment -> InputWebhook -> m ResolvedWebhook
|
||||
resolveWebhook env (InputWebhook urlTemplate) = do
|
||||
let eitherRenderedTemplate = renderURLTemplate env urlTemplate
|
||||
|
@ -30,7 +30,7 @@ import Data.Aeson.TH
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.Prelude
|
||||
import Hasura.RQL.DDL.Headers
|
||||
import Hasura.RQL.Types.Common (NonEmptyText (..))
|
||||
import Hasura.RQL.Types.Common (NonEmptyText (..), InputWebhook)
|
||||
import Hasura.SQL.Types
|
||||
import Language.Haskell.TH.Syntax (Lift)
|
||||
|
||||
@ -104,18 +104,21 @@ data EventHeaderInfo
|
||||
instance NFData EventHeaderInfo
|
||||
$(deriveToJSON (aesonDrop 3 snakeCase){omitNothingFields=True} ''EventHeaderInfo)
|
||||
|
||||
data WebhookConf = WCValue T.Text | WCEnv T.Text
|
||||
data WebhookConf = WCValue InputWebhook | WCEnv T.Text
|
||||
deriving (Show, Eq, Generic, Lift)
|
||||
instance NFData WebhookConf
|
||||
instance Cacheable WebhookConf
|
||||
|
||||
instance ToJSON WebhookConf where
|
||||
toJSON (WCValue w) = String w
|
||||
toJSON (WCValue w) = toJSON w
|
||||
toJSON (WCEnv wEnv) = object ["from_env" .= wEnv ]
|
||||
|
||||
instance FromJSON WebhookConf where
|
||||
parseJSON (Object o) = WCEnv <$> o .: "from_env"
|
||||
parseJSON (String t) = pure $ WCValue t
|
||||
parseJSON t@(String _) =
|
||||
case (fromJSON t) of
|
||||
Error s -> fail s
|
||||
Success a -> pure $ WCValue a
|
||||
parseJSON _ = fail "one of string or object must be provided for webhook"
|
||||
|
||||
data WebhookConfInfo
|
||||
@ -135,7 +138,7 @@ data CreateEventTriggerQuery
|
||||
, cetqDelete :: !(Maybe SubscribeOpSpec)
|
||||
, cetqEnableManual :: !(Maybe Bool)
|
||||
, cetqRetryConf :: !(Maybe RetryConf)
|
||||
, cetqWebhook :: !(Maybe T.Text)
|
||||
, cetqWebhook :: !(Maybe InputWebhook)
|
||||
, cetqWebhookFromEnv :: !(Maybe T.Text)
|
||||
, cetqHeaders :: !(Maybe [HeaderConf])
|
||||
, cetqReplace :: !Bool
|
||||
@ -203,7 +206,7 @@ data EventTriggerConf
|
||||
= EventTriggerConf
|
||||
{ etcName :: !TriggerName
|
||||
, etcDefinition :: !TriggerOpsDef
|
||||
, etcWebhook :: !(Maybe T.Text)
|
||||
, etcWebhook :: !(Maybe InputWebhook)
|
||||
, etcWebhookFromEnv :: !(Maybe T.Text)
|
||||
, etcRetryConf :: !RetryConf
|
||||
, etcHeaders :: !(Maybe [HeaderConf])
|
||||
|
@ -13,7 +13,7 @@ import qualified Data.Environment as Env
|
||||
|
||||
import Hasura.Incremental (Cacheable)
|
||||
import Hasura.RQL.DDL.Headers (HeaderConf (..))
|
||||
import Hasura.RQL.Types.Common (NonEmptyText (..))
|
||||
import Hasura.RQL.Types.Common
|
||||
import Hasura.RQL.Types.Error
|
||||
import Hasura.SQL.Types
|
||||
|
||||
@ -42,7 +42,7 @@ $(J.deriveJSON (J.aesonDrop 2 J.snakeCase) ''RemoteSchemaInfo)
|
||||
|
||||
data RemoteSchemaDef
|
||||
= RemoteSchemaDef
|
||||
{ _rsdUrl :: !(Maybe N.URI)
|
||||
{ _rsdUrl :: !(Maybe InputWebhook)
|
||||
, _rsdUrlFromEnv :: !(Maybe UrlFromEnv)
|
||||
, _rsdHeaders :: !(Maybe [HeaderConf])
|
||||
, _rsdForwardClientHeaders :: !Bool
|
||||
@ -94,8 +94,11 @@ validateRemoteSchemaDef
|
||||
-> m RemoteSchemaInfo
|
||||
validateRemoteSchemaDef env (RemoteSchemaDef mUrl mUrlEnv hdrC fwdHdrs mTimeout) =
|
||||
case (mUrl, mUrlEnv) of
|
||||
(Just url, Nothing) ->
|
||||
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout
|
||||
(Just url, Nothing) -> do
|
||||
resolvedWebhookTxt <- unResolvedWebhook <$> resolveWebhook env url
|
||||
case N.parseURI $ T.unpack resolvedWebhookTxt of
|
||||
Nothing -> throw400 InvalidParams $ "not a valid URI: " <> resolvedWebhookTxt
|
||||
Just uri -> return $ RemoteSchemaInfo uri hdrs fwdHdrs timeout
|
||||
(Nothing, Just urlEnv) -> do
|
||||
url <- getUrlFromEnv env urlEnv
|
||||
return $ RemoteSchemaInfo url hdrs fwdHdrs timeout
|
||||
|
@ -11,10 +11,8 @@ import Hasura.Prelude hiding (get, put)
|
||||
|
||||
import Control.Monad.Stateless
|
||||
import Data.Aeson hiding (json)
|
||||
import Data.Int (Int64)
|
||||
import Data.IORef
|
||||
import Data.Time.Clock (UTCTime)
|
||||
import Data.Time.Clock.POSIX (getPOSIXTime)
|
||||
import Network.Mime (defaultMimeLookup)
|
||||
import System.FilePath (joinPath, takeFileName)
|
||||
import Web.Spock.Core ((<//>))
|
||||
@ -635,10 +633,6 @@ mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpMana
|
||||
, scResponseInternalErrorsConfig = responseErrorsConfig
|
||||
}
|
||||
|
||||
when (isDeveloperAPIEnabled serverCtx) $ do
|
||||
liftIO $ EKG.registerGcMetrics ekgStore
|
||||
liftIO $ EKG.registerCounter "ekg.server_timestamp_ms" getTimeMs ekgStore
|
||||
|
||||
spockApp <- liftWithStateless $ \lowerIO ->
|
||||
Spock.spockAsApp $ Spock.spockT lowerIO $
|
||||
httpApp corsCfg serverCtx enableConsole consoleAssetsDir enableTelemetry
|
||||
@ -651,9 +645,6 @@ mkWaiApp env isoLevel logger sqlGenCtx enableAL pool pgExecCtxCustom ci httpMana
|
||||
|
||||
return $ HasuraApp waiApp schemaCacheRef cacheBuiltTime stopWSServer
|
||||
where
|
||||
getTimeMs :: IO Int64
|
||||
getTimeMs = (round . (* 1000)) `fmap` getPOSIXTime
|
||||
|
||||
-- initialiseCache :: m (E.PlanCache, SchemaCacheRef)
|
||||
initialiseCache :: m SchemaCacheRef
|
||||
initialiseCache = do
|
||||
|
@ -1,4 +1,5 @@
|
||||
-- | Types and functions related to the server initialisation
|
||||
{-# OPTIONS_GHC -O0 #-}
|
||||
{-# LANGUAGE CPP #-}
|
||||
module Hasura.Server.Init
|
||||
( module Hasura.Server.Init
|
||||
|
@ -1 +1 @@
|
||||
38
|
||||
39
|
||||
|
@ -302,7 +302,8 @@ CREATE TABLE hdb_catalog.event_log
|
||||
error BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
tries INTEGER NOT NULL DEFAULT 0,
|
||||
created_at TIMESTAMP DEFAULT NOW(),
|
||||
locked BOOLEAN NOT NULL DEFAULT FALSE,
|
||||
/* when locked IS NULL the event is unlocked and can be processed */
|
||||
locked TIMESTAMPTZ,
|
||||
next_retry_at TIMESTAMP,
|
||||
archived BOOLEAN NOT NULL DEFAULT FALSE
|
||||
);
|
||||
|
@ -1,11 +1,3 @@
|
||||
ALTER TABLE hdb_catalog.hdb_cron_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_cron_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
||||
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked DROP DEFAULT;
|
||||
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked DROP NOT NULL;
|
||||
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked TYPE TIMESTAMPTZ USING CASE WHEN locked THEN NOW() ELSE NULL END;
|
||||
|
@ -1,11 +1,3 @@
|
||||
ALTER TABLE hdb_catalog.hdb_cron_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_cron_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
||||
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked TYPE BOOLEAN USING locked IS NOT NULL;
|
||||
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked SET NOT NULL;
|
||||
ALTER TABLE hdb_catalog.event_log ALTER COLUMN locked SET DEFAULT false;
|
||||
|
11
server/src-rsr/migrations/38_to_39.sql
Normal file
11
server/src-rsr/migrations/38_to_39.sql
Normal file
@ -0,0 +1,11 @@
|
||||
ALTER TABLE hdb_catalog.hdb_cron_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_cron_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMPTZ;
|
11
server/src-rsr/migrations/39_to_38.sql
Normal file
11
server/src-rsr/migrations/39_to_38.sql
Normal file
@ -0,0 +1,11 @@
|
||||
ALTER TABLE hdb_catalog.hdb_cron_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_cron_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_events
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
||||
|
||||
ALTER TABLE hdb_catalog.hdb_scheduled_event_invocation_logs
|
||||
ALTER COLUMN created_at TYPE TIMESTAMP;
|
@ -0,0 +1,26 @@
|
||||
type: bulk
|
||||
args:
|
||||
- type: run_sql
|
||||
args:
|
||||
sql: |
|
||||
create table hge_tests.test_t1(
|
||||
c1 int,
|
||||
c2 text
|
||||
);
|
||||
- type: track_table
|
||||
args:
|
||||
schema: hge_tests
|
||||
name: test_t1
|
||||
- type: create_event_trigger
|
||||
args:
|
||||
name: t1_all
|
||||
table:
|
||||
schema: hge_tests
|
||||
name: test_t1
|
||||
insert:
|
||||
columns: "*"
|
||||
update:
|
||||
columns: "*"
|
||||
delete:
|
||||
columns: "*"
|
||||
webhook: "{{WEBHOOK_FROM_ENV}}/trigger"
|
@ -0,0 +1,9 @@
|
||||
type: bulk
|
||||
args:
|
||||
- type: delete_event_trigger
|
||||
args:
|
||||
name: t1_all
|
||||
- type: run_sql
|
||||
args:
|
||||
sql: |
|
||||
drop table hge_tests.test_t1
|
@ -19,5 +19,5 @@ args:
|
||||
name: simple2-graphql
|
||||
comment: testing
|
||||
definition:
|
||||
url: http://localhost:5000/user-graphql
|
||||
url: "{{REMOTE_SCHEMAS_WEBHOOK_DOMAIN}}/user-graphql"
|
||||
forward_client_headers: false
|
||||
|
@ -535,6 +535,43 @@ class TestWebhookEnv(object):
|
||||
assert st_code == 200, resp
|
||||
check_event(hge_ctx, evts_webhook, "t1_all", table, "DELETE", exp_ev_data)
|
||||
|
||||
@usefixtures('per_method_tests_db_state')
|
||||
class TestWebhookTemplateURL(object):
|
||||
|
||||
@classmethod
|
||||
def dir(cls):
|
||||
return 'queries/event_triggers/webhook_template_url'
|
||||
|
||||
def test_basic(self, hge_ctx, evts_webhook):
|
||||
table = {"schema": "hge_tests", "name": "test_t1"}
|
||||
|
||||
init_row = {"c1": 1, "c2": "hello"}
|
||||
exp_ev_data = {
|
||||
"old": None,
|
||||
"new": init_row
|
||||
}
|
||||
st_code, resp = insert(hge_ctx, table, init_row)
|
||||
assert st_code == 200, resp
|
||||
check_event(hge_ctx, evts_webhook, "t1_all", table, "INSERT", exp_ev_data, webhook_path = '/trigger')
|
||||
|
||||
where_exp = {"c1": 1}
|
||||
set_exp = {"c2": "world"}
|
||||
exp_ev_data = {
|
||||
"old": init_row,
|
||||
"new": {"c1": 1, "c2": "world"}
|
||||
}
|
||||
st_code, resp = update(hge_ctx, table, where_exp, set_exp)
|
||||
assert st_code == 200, resp
|
||||
check_event(hge_ctx, evts_webhook, "t1_all", table, "UPDATE", exp_ev_data, webhook_path = '/trigger')
|
||||
|
||||
exp_ev_data = {
|
||||
"old": {"c1": 1, "c2": "world"},
|
||||
"new": None
|
||||
}
|
||||
st_code, resp = delete(hge_ctx, table, where_exp)
|
||||
assert st_code == 200, resp
|
||||
check_event(hge_ctx, evts_webhook, "t1_all", table, "DELETE", exp_ev_data, webhook_path = '/trigger')
|
||||
|
||||
@usefixtures('per_method_tests_db_state')
|
||||
class TestSessionVariables(object):
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user