mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-15 17:31:56 +03:00
schema cache sync improvements (#2098)
* build schema cache function without db setup The setup shouldn't happen for sync. The database is already setup by the instance which generated the event. This means that the sync is now faster. * use SQL loop to drop hdb_views schema views and routines with ordering This avoids deadlocks when schema is being changed concurrently * schema sync now only processes the latest event This becomes useful when a lot of schema change events happen while we are still processing an earlier event.
This commit is contained in:
parent
b24456788c
commit
c4c36e0ef4
@ -86,7 +86,6 @@ buildViewName (QualifiedObject sn tn) (RoleName rTxt) pt =
|
||||
QualifiedObject hdbViewsSchema $ TableName
|
||||
(rTxt <> "__" <> T.pack (show pt) <> "__" <> snTxt <> "__" <> tnTxt)
|
||||
where
|
||||
hdbViewsSchema = SchemaName "hdb_views"
|
||||
snTxt = getSchemaTxt sn
|
||||
tnTxt = getTableTxt tn
|
||||
|
||||
|
@ -333,9 +333,19 @@ buildSchemaCacheStrict = do
|
||||
buildSchemaCache
|
||||
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
||||
=> m ()
|
||||
buildSchemaCache = do
|
||||
buildSchemaCache = buildSchemaCacheG True
|
||||
|
||||
buildSCWithoutSetup
|
||||
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
||||
=> m ()
|
||||
buildSCWithoutSetup = buildSchemaCacheG False
|
||||
|
||||
buildSchemaCacheG
|
||||
:: (MonadTx m, CacheRWM m, MonadIO m, HasHttpManager m, HasSQLGenCtx m)
|
||||
=> Bool -> m ()
|
||||
buildSchemaCacheG withSetup = do
|
||||
-- clean hdb_views
|
||||
liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
|
||||
when withSetup $ liftTx $ Q.catchE defaultTxErrorHandler clearHdbViews
|
||||
-- reset the current schemacache
|
||||
writeSchemaCache emptySchemaCache
|
||||
hMgr <- askHttpManager
|
||||
@ -382,10 +392,10 @@ buildSchemaCache = do
|
||||
modifyErr (\e -> "table " <> tn <<> "; role " <> rn <<> "; " <> e) $
|
||||
handleInconsistentObj mkInconsObj $
|
||||
case pt of
|
||||
PTInsert -> permHelper sqlGenCtx sn tn rn pDef PAInsert
|
||||
PTSelect -> permHelper sqlGenCtx sn tn rn pDef PASelect
|
||||
PTUpdate -> permHelper sqlGenCtx sn tn rn pDef PAUpdate
|
||||
PTDelete -> permHelper sqlGenCtx sn tn rn pDef PADelete
|
||||
PTInsert -> permHelper withSetup sqlGenCtx sn tn rn pDef PAInsert
|
||||
PTSelect -> permHelper withSetup sqlGenCtx sn tn rn pDef PASelect
|
||||
PTUpdate -> permHelper withSetup sqlGenCtx sn tn rn pDef PAUpdate
|
||||
PTDelete -> permHelper withSetup sqlGenCtx sn tn rn pDef PADelete
|
||||
|
||||
-- Fetch all the query templates
|
||||
qtemplates <- liftTx $ Q.catchE defaultTxErrorHandler fetchQTemplates
|
||||
@ -410,7 +420,8 @@ buildSchemaCache = do
|
||||
etc <- decodeValue configuration
|
||||
subTableP2Setup qt etc
|
||||
allCols <- getCols . tiFieldInfoMap <$> askTabInfo qt
|
||||
liftTx $ mkTriggerQ trn qt allCols (stringifyNum sqlGenCtx) (etcDefinition etc)
|
||||
when withSetup $ liftTx $
|
||||
mkTriggerQ trn qt allCols (stringifyNum sqlGenCtx) (etcDefinition etc)
|
||||
|
||||
functions <- liftTx $ Q.catchE defaultTxErrorHandler fetchFunctions
|
||||
forM_ functions $ \(sn, fn) -> do
|
||||
@ -431,14 +442,14 @@ buildSchemaCache = do
|
||||
forM_ remoteSchemas $ resolveSingleRemoteSchema hMgr
|
||||
|
||||
where
|
||||
permHelper sqlGenCtx sn tn rn pDef pa = do
|
||||
permHelper setup sqlGenCtx sn tn rn pDef pa = do
|
||||
qCtx <- mkAdminQCtx sqlGenCtx <$> askSchemaCache
|
||||
perm <- decodeValue pDef
|
||||
let qt = QualifiedObject sn tn
|
||||
permDef = PermDef rn perm Nothing
|
||||
createPerm = WithTable qt permDef
|
||||
(permInfo, deps) <- liftP1WithQCtx qCtx $ createPermP1 createPerm
|
||||
addPermP2Setup qt permDef permInfo
|
||||
when setup $ addPermP2Setup qt permDef permInfo
|
||||
addPermToCache qt rn pa permInfo deps
|
||||
-- p2F qt rn p1Res
|
||||
|
||||
|
@ -2,35 +2,30 @@ module Hasura.RQL.DDL.Utils
|
||||
( clearHdbViews
|
||||
) where
|
||||
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import Hasura.Prelude ((<>))
|
||||
|
||||
import Hasura.Prelude
|
||||
|
||||
clearHdbViews :: Q.Tx ()
|
||||
clearHdbViews = Q.multiQ (Q.fromText (clearHdbOnlyViews <> clearHdbViewsFunc))
|
||||
|
||||
clearHdbOnlyViews :: T.Text
|
||||
clearHdbOnlyViews :: Text
|
||||
clearHdbOnlyViews =
|
||||
"DO $$ DECLARE \
|
||||
\ r RECORD; \
|
||||
\ BEGIN \
|
||||
\ FOR r IN (SELECT viewname FROM pg_views WHERE schemaname = 'hdb_views') LOOP \
|
||||
\ FOR r IN (SELECT viewname FROM pg_views WHERE schemaname = 'hdb_views' ORDER BY viewname) LOOP \
|
||||
\ EXECUTE 'DROP VIEW IF EXISTS hdb_views.' || quote_ident(r.viewname) || ' CASCADE'; \
|
||||
\ END LOOP; \
|
||||
\ END $$; "
|
||||
|
||||
|
||||
clearHdbViewsFunc :: T.Text
|
||||
clearHdbViewsFunc :: Text
|
||||
clearHdbViewsFunc =
|
||||
"DO $$ DECLARE \
|
||||
\ _sql text; \
|
||||
\ BEGIN \
|
||||
\ SELECT INTO _sql \
|
||||
\ string_agg('DROP FUNCTION hdb_views.' || quote_ident(r.routine_name) || '() CASCADE;' \
|
||||
\ , E'\n') \
|
||||
\ FROM information_schema.routines r \
|
||||
\ WHERE r.specific_schema = 'hdb_views'; \
|
||||
\ IF _sql IS NOT NULL THEN \
|
||||
\ EXECUTE _sql; \
|
||||
\ END IF; \
|
||||
\ END $$; "
|
||||
\ r RECORD; \
|
||||
\ BEGIN \
|
||||
\ FOR r IN (SELECT routine_name FROM information_schema.routines WHERE specific_schema = 'hdb_views' ORDER BY routine_name) LOOP \
|
||||
\ EXECUTE 'DROP FUNCTION hdb_views.' || quote_ident(r.routine_name) || '() CASCADE'; \
|
||||
\ END LOOP; \
|
||||
\ END $$; "
|
||||
|
@ -164,6 +164,9 @@ newtype SchemaName
|
||||
publicSchema :: SchemaName
|
||||
publicSchema = SchemaName "public"
|
||||
|
||||
hdbViewsSchema :: SchemaName
|
||||
hdbViewsSchema = SchemaName "hdb_views"
|
||||
|
||||
instance IsIden SchemaName where
|
||||
toIden (SchemaName t) = Iden t
|
||||
|
||||
|
@ -5,7 +5,7 @@ where
|
||||
import Hasura.Prelude
|
||||
|
||||
import Hasura.Logging
|
||||
import Hasura.RQL.DDL.Schema.Table (buildSchemaCache)
|
||||
import Hasura.RQL.DDL.Schema.Table (buildSCWithoutSetup)
|
||||
import Hasura.RQL.Types
|
||||
import Hasura.Server.App (SchemaCacheRef (..), withSCUpdate)
|
||||
import Hasura.Server.Init (InstanceId (..))
|
||||
@ -80,17 +80,18 @@ startSchemaSync
|
||||
-> InstanceId
|
||||
-> Maybe UTC.UTCTime -> IO ()
|
||||
startSchemaSync sqlGenCtx pool logger httpMgr cacheRef instanceId cacheInitTime = do
|
||||
-- Init events queue
|
||||
eventsQueue <- STM.newTQueueIO
|
||||
-- only the latest event is recorded here
|
||||
-- we don't want to store and process all the events, only the latest event
|
||||
updateEventRef <- STM.newTVarIO Nothing
|
||||
|
||||
-- Start listener thread
|
||||
lTId <- C.forkIO $ listener sqlGenCtx pool
|
||||
logger httpMgr eventsQueue cacheRef instanceId cacheInitTime
|
||||
logger httpMgr updateEventRef cacheRef instanceId cacheInitTime
|
||||
logThreadStarted TTListener lTId
|
||||
|
||||
-- Start processor thread
|
||||
pTId <- C.forkIO $ processor sqlGenCtx pool
|
||||
logger httpMgr eventsQueue cacheRef instanceId
|
||||
logger httpMgr updateEventRef cacheRef instanceId
|
||||
logThreadStarted TTProcessor pTId
|
||||
|
||||
where
|
||||
@ -109,11 +110,11 @@ listener
|
||||
-> PG.PGPool
|
||||
-> Logger
|
||||
-> HTTP.Manager
|
||||
-> STM.TQueue EventPayload
|
||||
-> STM.TVar (Maybe EventPayload)
|
||||
-> SchemaCacheRef
|
||||
-> InstanceId
|
||||
-> Maybe UTC.UTCTime -> IO ()
|
||||
listener sqlGenCtx pool logger httpMgr eventsQueue
|
||||
listener sqlGenCtx pool logger httpMgr updateEventRef
|
||||
cacheRef instanceId cacheInitTime =
|
||||
-- Never exits
|
||||
forever $ do
|
||||
@ -150,7 +151,7 @@ listener sqlGenCtx pool logger httpMgr eventsQueue
|
||||
Right payload -> do
|
||||
logInfo logger threadType $ object ["received_event" .= payload]
|
||||
-- Push a notify event to Queue
|
||||
STM.atomically $ STM.writeTQueue eventsQueue payload
|
||||
STM.atomically $ STM.writeTVar updateEventRef $ Just payload
|
||||
|
||||
onError = logError logger threadType . TEQueryError
|
||||
logWarn = unLogger logger $
|
||||
@ -164,19 +165,28 @@ processor
|
||||
-> PG.PGPool
|
||||
-> Logger
|
||||
-> HTTP.Manager
|
||||
-> STM.TQueue EventPayload
|
||||
-> STM.TVar (Maybe EventPayload)
|
||||
-> SchemaCacheRef
|
||||
-> InstanceId -> IO ()
|
||||
processor sqlGenCtx pool logger httpMgr eventsQueue
|
||||
processor sqlGenCtx pool logger httpMgr updateEventRef
|
||||
cacheRef instanceId =
|
||||
-- Never exits
|
||||
forever $ do
|
||||
event <- STM.atomically $ STM.readTQueue eventsQueue
|
||||
event <- STM.atomically getLatestEvent
|
||||
logInfo logger threadType $ object ["processed_event" .= event]
|
||||
when (shouldReload event) $
|
||||
refreshSchemaCache sqlGenCtx pool logger httpMgr cacheRef
|
||||
threadType "schema cache reloaded"
|
||||
where
|
||||
-- checks if there is an event
|
||||
-- and replaces it with Nothing
|
||||
getLatestEvent = do
|
||||
eventM <- STM.readTVar updateEventRef
|
||||
case eventM of
|
||||
Just event -> do
|
||||
STM.writeTVar updateEventRef Nothing
|
||||
return event
|
||||
Nothing -> STM.retry
|
||||
threadType = TTProcessor
|
||||
|
||||
-- If event is from another server
|
||||
@ -194,7 +204,7 @@ refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef threadType msg = d
|
||||
-- Reload schema cache from catalog
|
||||
resE <- liftIO $ runExceptT $ withSCUpdate cacheRef logger $
|
||||
peelRun emptySchemaCache adminUserInfo
|
||||
httpManager sqlGenCtx (PGExecCtx pool PG.Serializable) buildSchemaCache
|
||||
httpManager sqlGenCtx (PGExecCtx pool PG.Serializable) buildSCWithoutSetup
|
||||
case resE of
|
||||
Left e -> logError logger threadType $ TEQueryError e
|
||||
Right _ ->
|
||||
|
Loading…
Reference in New Issue
Block a user