Fix the way replace_metadata drops event triggers (fix #5461) (#6137)

https://github.com/hasura/graphql-engine/pull/6137
This commit is contained in:
Alexis King 2020-11-10 06:02:38 -06:00 committed by GitHub
parent d769a7536e
commit 6787a1b1b0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 159 additions and 47 deletions

View File

@ -128,6 +128,7 @@ This release contains the [PDV refactor (#4111)](https://github.com/hasura/graph
- server: fix issue with tracking custom functions that return `SETOF` materialized view (close #5294) (#5945)
- server: introduce optional custom table name in table configuration to track the table according to the custom name. The `set_table_custom_fields` API has been deprecated, A new API `set_table_customization` has been added to set the configuration. (#3811)
- server: allow remote relationships with union, interface and enum type fields as well (fixes #5875) (#6080)
- server: fix event trigger cleanup on deletion via replace_metadata (fix #5461) (#6137)
- console: allow user to cascade Postgres dependencies when dropping Postgres objects (close #5109) (#5248)
- console: mark inconsistent remote schemas in the UI (close #5093) (#5181)
- console: remove ONLY as default for ALTER TABLE in column alter operations (close #5512) #5706

View File

@ -9,19 +9,20 @@ module Hasura.RQL.DDL.EventTrigger
-- TODO(from master): review
, delEventTriggerFromCatalog
, subTableP2
, subTableP2Setup
, mkEventTriggerInfo
, mkAllTriggersQ
, delTriggerQ
, getEventTriggerDef
, getWebhookInfoFromConf
, getHeaderInfosFromConf
, updateEventTriggerInCatalog
, replaceEventTriggersInCatalog
) where
import Hasura.Prelude
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict.Extended as Map
import qualified Data.Text as T
import qualified Data.Text.Lazy as TL
import qualified Database.PG.Query as Q
@ -213,13 +214,13 @@ subTableP1 (CreateEventTriggerQuery name qt insert update delete enableManual re
SubCStar -> return ()
SubCArray pgcols -> forM_ pgcols (assertPGCol (_tciFieldInfoMap ti) "")
subTableP2Setup
mkEventTriggerInfo
:: QErrM m
=> Env.Environment
-> QualifiedTable
-> EventTriggerConf
-> m (EventTriggerInfo, [SchemaDependency])
subTableP2Setup env qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
mkEventTriggerInfo env qt (EventTriggerConf name def webhook webhookFromEnv rconf mheaders) = do
webhookConf <- case (webhook, webhookFromEnv) of
(Just w, Nothing) -> return $ WCValue w
(Nothing, Just wEnv) -> return $ WCEnv wEnv
@ -251,19 +252,14 @@ getTrigDefDeps qt (TriggerOpsDef mIns mUpd mDel _) =
SubCStar -> []
SubCArray pgcols -> pgcols
subTableP2
:: (MonadTx m)
=> QualifiedTable -> Bool -> EventTriggerConf -> m ()
subTableP2 qt replace etc = liftTx if replace
then updateEventTriggerInCatalog etc
else addEventTriggerToCatalog qt etc
runCreateEventTriggerQuery
:: (QErrM m, UserInfoM m, CacheRWM m, MonadTx m)
=> CreateEventTriggerQuery -> m EncJSON
runCreateEventTriggerQuery q = do
(qt, replace, etc) <- subTableP1 q
subTableP2 qt replace etc
liftTx if replace
then updateEventTriggerInCatalog etc
else addEventTriggerToCatalog qt etc
buildSchemaCacheFor $ MOTableObj qt (MTOTrigger $ etcName etc)
return successMsg
@ -365,3 +361,35 @@ updateEventTriggerInCatalog trigConf =
configuration = $1
WHERE name = $2
|] (Q.AltJ $ toJSON trigConf, etcName trigConf) True
-- | Replaces /all/ event triggers in the catalog with new ones, taking care to
-- drop SQL trigger functions and archive events for any deleted event triggers.
--
-- See Note [Diff-and-patch event triggers on replace] for more details.
replaceEventTriggersInCatalog
:: MonadTx m
=> HashMap TriggerName (QualifiedTable, EventTriggerConf)
-> m ()
replaceEventTriggersInCatalog triggerConfs = do
existingTriggers <- Map.fromListOn id <$> fetchExistingTriggers
liftTx $ for_ (align existingTriggers triggerConfs) \case
This triggerName -> delEventTriggerFromCatalog triggerName
That (tableName, triggerConf) -> addEventTriggerToCatalog tableName triggerConf
These _ (_, triggerConf) -> updateEventTriggerInCatalog triggerConf
where
fetchExistingTriggers = liftTx $ map runIdentity <$>
Q.listQE defaultTxErrorHandler
[Q.sql|SELECT name FROM hdb_catalog.event_triggers|] () True
{- Note [Diff-and-patch event triggers on replace]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When executing a replace_metadata API call, we usually just drop everything in
the catalog and recreate it from scratch, then rebuild the schema cache. This
works fine for most things, but its a bad idea for event triggers, because
delEventTriggerFromCatalog does extra work: it deletes the SQL trigger functions
and archives all associated events.
Therefore, we have to be more careful about which event triggers we drop. We
diff the new metadata against the old metadata, and we only drop triggers that
are actually absent in the new metadata. The replaceEventTriggersInCatalog
function implements this diff-and-patch operation. -}

View File

@ -15,6 +15,7 @@ module Hasura.RQL.DDL.Metadata
import Hasura.Prelude
import qualified Data.Aeson.Ordered as AO
import qualified Data.HashMap.Strict as HM
import qualified Data.HashMap.Strict.InsOrd as HMIns
import qualified Data.HashSet as HS
import qualified Data.HashSet.InsOrd as HSIns
@ -36,7 +37,8 @@ import qualified Hasura.RQL.DDL.Schema as Schema
import Hasura.Backends.Postgres.SQL.Types
import Hasura.EncJSON
import Hasura.RQL.DDL.ComputedField (dropComputedFieldFromCatalog)
import Hasura.RQL.DDL.EventTrigger (delEventTriggerFromCatalog, subTableP2)
import Hasura.RQL.DDL.EventTrigger (delEventTriggerFromCatalog,
replaceEventTriggersInCatalog)
import Hasura.RQL.DDL.Metadata.Types
import Hasura.RQL.DDL.Permission.Internal (dropPermFromCatalog)
import Hasura.RQL.DDL.RemoteSchema (addRemoteSchemaToCatalog,
@ -49,10 +51,11 @@ import Hasura.RQL.Types
-- | Purge all user-defined metadata; metadata with is_system_defined = false
clearUserMetadata :: MonadTx m => m ()
clearUserMetadata = liftTx $ Q.catchE defaultTxErrorHandler $ do
-- Note: we dont drop event triggers here because we update them a different
-- way; see Note [Diff-and-patch event triggers on replace] in Hasura.RQL.DDL.EventTrigger.
Q.unitQ "DELETE FROM hdb_catalog.hdb_function WHERE is_system_defined <> 'true'" () False
Q.unitQ "DELETE FROM hdb_catalog.hdb_permission WHERE is_system_defined <> 'true'" () False
Q.unitQ "DELETE FROM hdb_catalog.hdb_relationship WHERE is_system_defined <> 'true'" () False
Q.unitQ "DELETE FROM hdb_catalog.event_triggers" () False
Q.unitQ "DELETE FROM hdb_catalog.hdb_computed_field" () False
Q.unitQ "DELETE FROM hdb_catalog.hdb_remote_relationship" () False
Q.unitQ "DELETE FROM hdb_catalog.hdb_table WHERE is_system_defined <> 'true'" () False
@ -69,6 +72,7 @@ runClearMetadata
=> ClearMetadata -> m EncJSON
runClearMetadata _ = do
clearUserMetadata
replaceEventTriggersInCatalog mempty
buildSchemaCacheStrict
return successMsg
@ -110,9 +114,10 @@ saveMetadata (Metadata tables functions
withPathK "update_permissions" $ processPerms _tmTable _tmUpdatePermissions
withPathK "delete_permissions" $ processPerms _tmTable _tmDeletePermissions
-- Event triggers
withPathK "event_triggers" $
indexedForM_ _tmEventTriggers $ \etc -> subTableP2 _tmTable False etc
-- Event triggers
let allEventTriggers = HMIns.elems tables & map \table ->
(_tmTable table,) <$> HMIns.toHashMap (_tmEventTriggers table)
replaceEventTriggersInCatalog $ HM.unions allEventTriggers
-- sql functions
withPathK "functions" $ indexedForM_ functions $

View File

@ -332,7 +332,7 @@ buildSchemaCacheRule env = proc (catalogMetadata, invalidationKeys) -> do
(| withRecordInconsistency (
(| modifyErrA (do
etc <- bindErrorA -< decodeValue configuration
(info, dependencies) <- bindErrorA -< subTableP2Setup env qt etc
(info, dependencies) <- bindErrorA -< mkEventTriggerInfo env qt etc
let tableColumns = M.mapMaybe (^? _FIColumn) (_tciFieldInfoMap tableInfo)
recreateViewIfNeeded -< (qt, tableColumns, trn, etcDefinition etc)
recordDependencies -< (metadataObject, schemaObjectId, dependencies)
@ -487,9 +487,9 @@ withMetadataCheck cascade action = do
sc <- askSchemaCache
for_ alteredTables $ \(oldQtn, tableDiff) -> do
ti <- case M.lookup oldQtn $ scTables sc of
Just ti -> return ti
Nothing -> throw500 $ "old table metadata not found in cache : " <>> oldQtn
ti <- onNothing
(M.lookup oldQtn $ scTables sc)
(throw500 $ "old table metadata not found in cache : " <>> oldQtn)
processTableChanges (_tiCoreInfo ti) tableDiff
where
SchemaDiff droppedTables alteredTables = schemaDiff

View File

@ -1 +1 @@
41
42

View File

@ -297,11 +297,30 @@ CREATE TABLE hdb_catalog.event_triggers
schema_name TEXT NOT NULL,
table_name TEXT NOT NULL,
configuration JSON,
comment TEXT,
FOREIGN KEY (schema_name, table_name)
REFERENCES hdb_catalog.hdb_table(table_schema, table_name) ON UPDATE CASCADE
comment TEXT
);
-- since we do not have a foreign key constraint (with 'ON UPDATE CASCADE') with hdb_catalog.hdb_table
-- (see Note [Diff-and-patch event triggers on replace] in Hasura.RQL.DDL.EventTrigger), we perform the update using trigger
CREATE OR REPLACE FUNCTION hdb_catalog.event_trigger_table_name_update()
RETURNS TRIGGER
LANGUAGE PLPGSQL
AS
$$
BEGIN
IF (NEW.table_schema, NEW.table_name) <> (OLD.table_schema, OLD.table_name) THEN
UPDATE hdb_catalog.event_triggers
SET schema_name = NEW.table_schema, table_name = NEW.table_name
WHERE (schema_name, table_name) = (OLD.table_schema, OLD.table_name);
END IF;
RETURN NEW;
END;
$$;
CREATE TRIGGER event_trigger_table_name_update_trigger
AFTER UPDATE ON hdb_catalog.hdb_table
FOR EACH ROW EXECUTE PROCEDURE hdb_catalog.event_trigger_table_name_update();
CREATE TABLE hdb_catalog.event_log
(
id TEXT DEFAULT hdb_catalog.gen_hasura_uuid() PRIMARY KEY,

View File

@ -0,0 +1,26 @@
ALTER TABLE hdb_catalog.event_triggers
DROP CONSTRAINT IF EXISTS event_triggers_schema_name_fkey;
ALTER TABLE hdb_catalog.event_triggers
DROP CONSTRAINT IF EXISTS event_triggers_schema_name_table_name_fkey;
-- since we removed the foreign key constraint with hdb_catalog.hdb_table which had 'ON UPDATE CASCADE'
-- (see Note [Diff-and-patch event triggers on replace] in Hasura.RQL.DDL.EventTrigger), we perform the update using trigger
CREATE OR REPLACE FUNCTION hdb_catalog.event_trigger_table_name_update()
RETURNS TRIGGER
LANGUAGE PLPGSQL
AS
$$
BEGIN
IF (NEW.table_schema, NEW.table_name) <> (OLD.table_schema, OLD.table_name) THEN
UPDATE hdb_catalog.event_triggers
SET schema_name = NEW.table_schema, table_name = NEW.table_name
WHERE (schema_name, table_name) = (OLD.table_schema, OLD.table_name);
END IF;
RETURN NEW;
END;
$$;
CREATE TRIGGER event_trigger_table_name_update_trigger
AFTER UPDATE ON hdb_catalog.hdb_table
FOR EACH ROW EXECUTE PROCEDURE hdb_catalog.event_trigger_table_name_update();

View File

@ -0,0 +1,9 @@
DROP TRIGGER event_trigger_table_name_update_trigger ON hdb_catalog.hdb_table;
DROP FUNCTION hdb_catalog.event_trigger_table_name_update();
ALTER TABLE hdb_catalog.event_triggers
ADD CONSTRAINT event_triggers_schema_name_table_name_fkey
FOREIGN KEY (schema_name, table_name)
REFERENCES hdb_catalog.hdb_table(table_schema, table_name)
ON UPDATE CASCADE;

View File

@ -1,26 +1,50 @@
description: create an event trigger and then reset metadata
url: /v1/query
status: 200
query:
type: bulk
args:
- type: track_table
- description: create an event trigger and then reset metadata
url: /v1/query
status: 200
query:
type: bulk
args:
schema: hge_tests
name: test_t1
- type: create_event_trigger
args: &def_args
name: t1_1
table:
- type: track_table
args:
schema: hge_tests
name: test_t1
insert:
columns: "*"
update:
columns: "*"
delete:
columns: "*"
webhook: http://127.0.0.1:5592
- type: create_event_trigger
args: &def_args
name: t1_1
table:
schema: hge_tests
name: test_t1
insert:
columns: "*"
update:
columns: "*"
delete:
columns: "*"
webhook: http://127.0.0.1:5592
- type: insert
args:
table:
schema: hge_tests
name: test_t1
objects:
- c1: 1
c2: world
returning: []
- type: clear_metadata
args: {}
- type: clear_metadata
args: {}
- description: ensure the event was archived
url: /v1/query
status: 200
response:
- trigger_name: t1_1
archived: true
query:
type: select
args:
table:
schema: hdb_catalog
name: event_log
columns: [trigger_name, archived]
order_by: ['-created_at']
limit: 1