mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 17:31:56 +03:00
schema cache sync improvements (#2098)
* build schema cache function without db setup The setup shouldn't happen for sync. The database is already setup by the instance which generated the event. This means that the sync is now faster. * use SQL loop to drop hdb_views schema views and routines with ordering This avoids deadlocks when schema is being changed concurrently * schema sync now only processes the latest event This becomes useful when a lot of schema change events happen while we are still processing an earlier event.
This commit is contained in:
parent
b24456788c
commit
c4c36e0ef4
@ -86,7 +86,6 @@ buildViewName (QualifiedObject sn tn) (RoleName rTxt) pt =
|
|||||||
QualifiedObject hdbViewsSchema $ TableName
|
QualifiedObject hdbViewsSchema $ TableName
|
||||||
(rTxt <> "__" <> T.pack (show pt) <> "__" <> snTxt <> "__" <> tnTxt)
|
(rTxt <> "__" <> T.pack (show pt) <> "__" <> snTxt <> "__" <> tnTxt)
|
||||||
where
|
where
|
||||||
hdbViewsSchema = SchemaName "hdb_views"
|
|
||||||
snTxt = getSchemaTxt sn
|
snTxt = getSchemaTxt sn
|
||||||
tnTxt = getTableTxt tn
|
tnTxt = getTableTxt tn
|
||||||
|
|
||||||
|
@ -333,9 +333,19 @@ buildSchemaCacheStrict = do
|
|||||||
buildSchemaCache
|
buildSchemaCache
|
||||||
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
||||||
=> m ()
|
=> m ()
|
||||||
buildSchemaCache = do
|
buildSchemaCache = buildSchemaCacheG True
|
||||||
|
|
||||||
|
buildSCWithoutSetup
|
||||||
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
||||||
|
=> m ()
|
||||||
|
buildSCWithoutSetup = buildSchemaCacheG False
|
||||||
|
|
||||||
|
buildSchemaCacheG
|
||||||
|
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
||||||
|
=> Bool -> m ()
|
||||||
|
buildSchemaCacheG withSetup = do
|
||||||
-- clean hdb_views
|
-- clean hdb_views
|
||||||
liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
|
when withSetup $ liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
|
||||||
-- reset the current schemacache
|
-- reset the current schemacache
|
||||||
writeSchemaCache emptySchemaCache
|
writeSchemaCache emptySchemaCache
|
||||||
hMgr <- askHttpManager
|
hMgr <- askHttpManager
|
||||||
@ -382,10 +392,10 @@ buildSchemaCache = do
|
|||||||
modifyErr (\e -> "table " <> tn <<> "; role " <> rn <<> "; " <> e) $
|
modifyErr (\e -> "table " <> tn <<> "; role " <> rn <<> "; " <> e) $
|
||||||
handleInconsistentObj mkInconsObj $
|
handleInconsistentObj mkInconsObj $
|
||||||
case pt of
|
case pt of
|
||||||
PTInsert -> permHelper sqlGenCtx sn tn rn pDef PAInsert
|
PTInsert -> permHelper withSetup sqlGenCtx sn tn rn pDef PAInsert
|
||||||
PTSelect -> permHelper sqlGenCtx sn tn rn pDef PASelect
|
PTSelect -> permHelper withSetup sqlGenCtx sn tn rn pDef PASelect
|
||||||
PTUpdate -> permHelper sqlGenCtx sn tn rn pDef PAUpdate
|
PTUpdate -> permHelper withSetup sqlGenCtx sn tn rn pDef PAUpdate
|
||||||
PTDelete -> permHelper sqlGenCtx sn tn rn pDef PADelete
|
PTDelete -> permHelper withSetup sqlGenCtx sn tn rn pDef PADelete
|
||||||
|
|
||||||
-- Fetch all the query templates
|
-- Fetch all the query templates
|
||||||
qtemplates <- liftTx $ Q.catchE defaultTxErrorHandler fetchQTemplates
|
qtemplates <- liftTx $ Q.catchE defaultTxErrorHandler fetchQTemplates
|
||||||
@ -410,7 +420,8 @@ buildSchemaCache = do
|
|||||||
etc <- decodeValue configuration
|
etc <- decodeValue configuration
|
||||||
subTableP2Setup qt etc
|
subTableP2Setup qt etc
|
||||||
allCols <- getCols . tiFieldInfoMap <$> askTabInfo qt
|
allCols <- getCols . tiFieldInfoMap <$> askTabInfo qt
|
||||||
liftTx $ mkTriggerQ trn qt allCols (stringifyNum sqlGenCtx) (etcDefinition etc)
|
when withSetup $ liftTx $
|
||||||
|
mkTriggerQ trn qt allCols (stringifyNum sqlGenCtx) (etcDefinition etc)
|
||||||
|
|
||||||
functions <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctions
|
functions <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctions
|
||||||
forM_ functions $ \(sn, fn) -> do
|
forM_ functions $ \(sn, fn) -> do
|
||||||
@ -431,14 +442,14 @@ buildSchemaCache = do
|
|||||||
forM_ remoteSchemas $ resolveSingleRemoteSchema hMgr
|
forM_ remoteSchemas $ resolveSingleRemoteSchema hMgr
|
||||||
|
|
||||||
where
|
where
|
||||||
permHelper sqlGenCtx sn tn rn pDef pa = do
|
permHelper setup sqlGenCtx sn tn rn pDef pa = do
|
||||||
qCtx <- mkAdminQCtx sqlGenCtx <$> askSchemaCache
|
qCtx <- mkAdminQCtx sqlGenCtx <$> askSchemaCache
|
||||||
perm <- decodeValue pDef
|
perm <- decodeValue pDef
|
||||||
let qt = QualifiedObject sn tn
|
let qt = QualifiedObject sn tn
|
||||||
permDef = PermDef rn perm Nothing
|
permDef = PermDef rn perm Nothing
|
||||||
createPerm = WithTable qt permDef
|
createPerm = WithTable qt permDef
|
||||||
(permInfo, deps) <- liftP1WithQCtx qCtx $ createPermP1 createPerm
|
(permInfo, deps) <- liftP1WithQCtx qCtx $ createPermP1 createPerm
|
||||||
addPermP2Setup qt permDef permInfo
|
when setup $ addPermP2Setup qt permDef permInfo
|
||||||
addPermToCache qt rn pa permInfo deps
|
addPermToCache qt rn pa permInfo deps
|
||||||
-- p2F qt rn p1Res
|
-- p2F qt rn p1Res
|
||||||
|
|
||||||
|
@ -2,35 +2,30 @@ module Hasura.RQL.DDL.Utils
|
|||||||
( clearHdbViews
|
( clearHdbViews
|
||||||
) where
|
) where
|
||||||
|
|
||||||
import qualified Data.Text as T
|
|
||||||
import qualified Database.PG.Query as Q
|
import qualified Database.PG.Query as Q
|
||||||
import Hasura.Prelude ((<>))
|
|
||||||
|
import Hasura.Prelude
|
||||||
|
|
||||||
clearHdbViews :: Q.Tx ()
|
clearHdbViews :: Q.Tx ()
|
||||||
clearHdbViews = Q.multiQ (Q.fromText (clearHdbOnlyViews <> clearHdbViewsFunc))
|
clearHdbViews = Q.multiQ (Q.fromText (clearHdbOnlyViews <> clearHdbViewsFunc))
|
||||||
|
|
||||||
clearHdbOnlyViews :: T.Text
|
clearHdbOnlyViews :: Text
|
||||||
clearHdbOnlyViews =
|
clearHdbOnlyViews =
|
||||||
"DO $$ DECLARE \
|
"DO $$ DECLARE \
|
||||||
\ r RECORD; \
|
\ r RECORD; \
|
||||||
\ BEGIN \
|
\ BEGIN \
|
||||||
\ FOR r IN (SELECT viewname FROM pg_views WHERE schemaname = 'hdb_views') LOOP \
|
\ FOR r IN (SELECT viewname FROM pg_views WHERE schemaname = 'hdb_views' ORDER BY viewname) LOOP \
|
||||||
\ EXECUTE 'DROP VIEW IF EXISTS hdb_views.' || quote_ident(r.viewname) || ' CASCADE'; \
|
\ EXECUTE 'DROP VIEW IF EXISTS hdb_views.' || quote_ident(r.viewname) || ' CASCADE'; \
|
||||||
\ END LOOP; \
|
\ END LOOP; \
|
||||||
\ END $$; "
|
\ END $$; "
|
||||||
|
|
||||||
|
|
||||||
clearHdbViewsFunc :: T.Text
|
clearHdbViewsFunc :: Text
|
||||||
clearHdbViewsFunc =
|
clearHdbViewsFunc =
|
||||||
"DO $$ DECLARE \
|
"DO $$ DECLARE \
|
||||||
\ _sql text; \
|
\ r RECORD; \
|
||||||
\ BEGIN \
|
\ BEGIN \
|
||||||
\ SELECT INTO _sql \
|
\ FOR r IN (SELECT routine_name FROM information_schema.routines WHERE specific_schema = 'hdb_views' ORDER BY routine_name) LOOP \
|
||||||
\ string_agg('DROP FUNCTION hdb_views.' || quote_ident(r.routine_name) || '() CASCADE;' \
|
\ EXECUTE 'DROP FUNCTION hdb_views.' || quote_ident(r.routine_name) || '() CASCADE'; \
|
||||||
\ , E'\n') \
|
\ END LOOP; \
|
||||||
\ FROM information_schema.routines r \
|
\ END $$; "
|
||||||
\ WHERE r.specific_schema = 'hdb_views'; \
|
|
||||||
\ IF _sql IS NOT NULL THEN \
|
|
||||||
\ EXECUTE _sql; \
|
|
||||||
\ END IF; \
|
|
||||||
\ END $$; "
|
|
||||||
|
@ -164,6 +164,9 @@ newtype SchemaName
|
|||||||
publicSchema :: SchemaName
|
publicSchema :: SchemaName
|
||||||
publicSchema = SchemaName "public"
|
publicSchema = SchemaName "public"
|
||||||
|
|
||||||
|
hdbViewsSchema :: SchemaName
|
||||||
|
hdbViewsSchema = SchemaName "hdb_views"
|
||||||
|
|
||||||
instance IsIden SchemaName where
|
instance IsIden SchemaName where
|
||||||
toIden (SchemaName t) = Iden t
|
toIden (SchemaName t) = Iden t
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ where
|
|||||||
import Hasura.Prelude
|
import Hasura.Prelude
|
||||||
|
|
||||||
import Hasura.Logging
|
import Hasura.Logging
|
||||||
import Hasura.RQL.DDL.Schema.Table (buildSchemaCache)
|
import Hasura.RQL.DDL.Schema.Table (buildSCWithoutSetup)
|
||||||
import Hasura.RQL.Types
|
import Hasura.RQL.Types
|
||||||
import Hasura.Server.App (SchemaCacheRef (..), withSCUpdate)
|
import Hasura.Server.App (SchemaCacheRef (..), withSCUpdate)
|
||||||
import Hasura.Server.Init (InstanceId (..))
|
import Hasura.Server.Init (InstanceId (..))
|
||||||
@ -80,17 +80,18 @@ startSchemaSync
|
|||||||
-> InstanceId
|
-> InstanceId
|
||||||
-> Maybe UTC.UTCTime -> IO ()
|
-> Maybe UTC.UTCTime -> IO ()
|
||||||
startSchemaSync sqlGenCtx pool logger httpMgr cacheRef instanceId cacheInitTime = do
|
startSchemaSync sqlGenCtx pool logger httpMgr cacheRef instanceId cacheInitTime = do
|
||||||
-- Init events queue
|
-- only the latest event is recorded here
|
||||||
eventsQueue <- STM.newTQueueIO
|
-- we don't want to store and process all the events, only the latest event
|
||||||
|
updateEventRef <- STM.newTVarIO Nothing
|
||||||
|
|
||||||
-- Start listener thread
|
-- Start listener thread
|
||||||
lTId <- C.forkIO $ listener sqlGenCtx pool
|
lTId <- C.forkIO $ listener sqlGenCtx pool
|
||||||
logger httpMgr eventsQueue cacheRef instanceId cacheInitTime
|
logger httpMgr updateEventRef cacheRef instanceId cacheInitTime
|
||||||
logThreadStarted TTListener lTId
|
logThreadStarted TTListener lTId
|
||||||
|
|
||||||
-- Start processor thread
|
-- Start processor thread
|
||||||
pTId <- C.forkIO $ processor sqlGenCtx pool
|
pTId <- C.forkIO $ processor sqlGenCtx pool
|
||||||
logger httpMgr eventsQueue cacheRef instanceId
|
logger httpMgr updateEventRef cacheRef instanceId
|
||||||
logThreadStarted TTProcessor pTId
|
logThreadStarted TTProcessor pTId
|
||||||
|
|
||||||
where
|
where
|
||||||
@ -109,11 +110,11 @@ listener
|
|||||||
-> PG.PGPool
|
-> PG.PGPool
|
||||||
-> Logger
|
-> Logger
|
||||||
-> HTTP.Manager
|
-> HTTP.Manager
|
||||||
-> STM.TQueue EventPayload
|
-> STM.TVar (Maybe EventPayload)
|
||||||
-> SchemaCacheRef
|
-> SchemaCacheRef
|
||||||
-> InstanceId
|
-> InstanceId
|
||||||
-> Maybe UTC.UTCTime -> IO ()
|
-> Maybe UTC.UTCTime -> IO ()
|
||||||
listener sqlGenCtx pool logger httpMgr eventsQueue
|
listener sqlGenCtx pool logger httpMgr updateEventRef
|
||||||
cacheRef instanceId cacheInitTime =
|
cacheRef instanceId cacheInitTime =
|
||||||
-- Never exits
|
-- Never exits
|
||||||
forever $ do
|
forever $ do
|
||||||
@ -150,7 +151,7 @@ listener sqlGenCtx pool logger httpMgr eventsQueue
|
|||||||
Right payload -> do
|
Right payload -> do
|
||||||
logInfo logger threadType $ object ["received_event" .= payload]
|
logInfo logger threadType $ object ["received_event" .= payload]
|
||||||
-- Push a notify event to Queue
|
-- Push a notify event to Queue
|
||||||
STM.atomically $ STM.writeTQueue eventsQueue payload
|
STM.atomically $ STM.writeTVar updateEventRef $ Just payload
|
||||||
|
|
||||||
onError = logError logger threadType . TEQueryError
|
onError = logError logger threadType . TEQueryError
|
||||||
logWarn = unLogger logger $
|
logWarn = unLogger logger $
|
||||||
@ -164,19 +165,28 @@ processor
|
|||||||
-> PG.PGPool
|
-> PG.PGPool
|
||||||
-> Logger
|
-> Logger
|
||||||
-> HTTP.Manager
|
-> HTTP.Manager
|
||||||
-> STM.TQueue EventPayload
|
-> STM.TVar (Maybe EventPayload)
|
||||||
-> SchemaCacheRef
|
-> SchemaCacheRef
|
||||||
-> InstanceId -> IO ()
|
-> InstanceId -> IO ()
|
||||||
processor sqlGenCtx pool logger httpMgr eventsQueue
|
processor sqlGenCtx pool logger httpMgr updateEventRef
|
||||||
cacheRef instanceId =
|
cacheRef instanceId =
|
||||||
-- Never exits
|
-- Never exits
|
||||||
forever $ do
|
forever $ do
|
||||||
event <- STM.atomically $ STM.readTQueue eventsQueue
|
event <- STM.atomically getLatestEvent
|
||||||
logInfo logger threadType $ object ["processed_event" .= event]
|
logInfo logger threadType $ object ["processed_event" .= event]
|
||||||
when (shouldReload event) $
|
when (shouldReload event) $
|
||||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
||||||
threadType "schema cache reloaded"
|
threadType "schema cache reloaded"
|
||||||
where
|
where
|
||||||
|
-- checks if there is an event
|
||||||
|
-- and replaces it with Nothing
|
||||||
|
getLatestEvent = do
|
||||||
|
eventM <- STM.readTVar updateEventRef
|
||||||
|
case eventM of
|
||||||
|
Just event -> do
|
||||||
|
STM.writeTVar updateEventRef Nothing
|
||||||
|
return event
|
||||||
|
Nothing -> STM.retry
|
||||||
threadType = TTProcessor
|
threadType = TTProcessor
|
||||||
|
|
||||||
-- If event is from another server
|
-- If event is from another server
|
||||||
@ -194,7 +204,7 @@ refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef threadType msg = d
|
|||||||
-- Reload schema cache from catalog
|
-- Reload schema cache from catalog
|
||||||
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger $
|
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger $
|
||||||
peelRun emptySchemaCache adminUserInfo
|
peelRun emptySchemaCache adminUserInfo
|
||||||
httpManager sqlGenCtx (PGExecCtx pool PG.Serializable) buildSchemaCache
|
httpManager sqlGenCtx (PGExecCtx pool PG.Serializable) buildSCWithoutSetup
|
||||||
case resE of
|
case resE of
|
||||||
Left e -> logError logger threadType $ TEQueryError e
|
Left e -> logError logger threadType $ TEQueryError e
|
||||||
Right _ ->
|
Right _ ->
|
||||||
|
Loading…
Reference in New Issue
Block a user