server: template the schema and table names in the event trigger PG functions

Earlier, while creating the event trigger's internal postgres trigger, we used to get the name of the table from the `TG_TABLE_NAME` special trigger variable. Using this with normal tables works fine, but it breaks when the parent table is partitioned because we associate the ET configuration in the schema only with the original table (as it should be).

In this PR, we supply the table name and schema name through template variables instead of using `TG_TABLE_NAME` and `TG_TABLE_SCHEMA`, so that event triggers work with a partitioned table as well.

TODO:

- [x] Changelog
- [x] unit test (ET on partition table)

GitOrigin-RevId: 556376881a85525300dcf64da0611ee9ad387eb0
This commit is contained in:
Karthikeyan Chinnakonda 2021-01-07 01:51:39 +05:30 committed by hasura-bot
parent 5bc0355bdd
commit 44347d2d74
9 changed files with 80 additions and 6 deletions

View File

@ -87,6 +87,7 @@ and be accessible according to the permissions that were configured for the role
- server: update `forkImmortal` function to log more information, i.e log starting of threads and log asynchronous and synchronous exception.
- server: various changes to ensure timely cleanup of background threads and other resources in the event of a SIGTERM signal.
- server: fix issue when the `relationships` field in `objects` field is passed `[]` in the `set_custom_types` API (fix #6357)
- server: fix issue with event triggers defined on a table which is partitioned (fixes #6261)
- server: fix issue with non-optional fields of the remote schema being added as optional in the graphql-engine (fix #6401)
- console: allow user to cascade Postgres dependencies when dropping Postgres objects (close #5109) (#5248)
- console: mark inconsistent remote schemas in the UI (close #5093) (#5181)

View File

@ -73,7 +73,7 @@ mkTriggerQ
-> Ops
-> SubscribeOpSpec
-> m ()
mkTriggerQ trn qt allCols op (SubscribeOpSpec columns payload) = do
mkTriggerQ trn qt@(QualifiedObject schema table) allCols op (SubscribeOpSpec columns payload) = do
strfyNum <- stringifyNum <$> askSQLGenCtx
liftTx $ Q.multiQE defaultTxErrorHandler $ Q.fromText . TL.toStrict $
let payloadColumns = fromMaybe SubCStar payload
@ -105,6 +105,8 @@ mkTriggerQ trn qt allCols op (SubscribeOpSpec columns payload) = do
name = triggerNameToTxt trn
qualifiedTriggerName = pgIdenTrigger op trn
qualifiedTable = toSQLTxt qt
schemaName = pgFmtLit $ getSchemaTxt schema
tableName = pgFmtLit $ getTableTxt table
operation = tshow op
oldRow = toSQLTxt $ renderRow OLD

View File

@ -19,12 +19,16 @@ CREATE OR REPLACE function hdb_catalog.#{qualifiedTriggerName}() RETURNS trigger
'new', #{newPayloadExpression}
);
BEGIN
/* NOTE: formerly we used TG_TABLE_NAME in place of tableName here. However in the case of
partitioned tables this will give the name of the partitioned table and since we use the table name to
get the event trigger configuration from the schema, this fails because the event trigger is only created
on the original table. */
IF (TG_OP <> 'UPDATE') OR (_old <> _new) THEN
PERFORM hdb_catalog.insert_event_log(CAST(TG_TABLE_SCHEMA AS text), CAST(TG_TABLE_NAME AS text), CAST('#{name}' AS text), TG_OP, _data);
PERFORM hdb_catalog.insert_event_log(CAST(#{schemaName} AS text), CAST(#{tableName} AS text), CAST('#{name}' AS text), TG_OP, _data);
END IF;
EXCEPTION WHEN undefined_function THEN
IF (TG_OP <> 'UPDATE') OR (_old *<> _new) THEN
PERFORM hdb_catalog.insert_event_log(CAST(TG_TABLE_SCHEMA AS text), CAST(TG_TABLE_NAME AS text), CAST('#{name}' AS text), TG_OP, _data);
PERFORM hdb_catalog.insert_event_log(CAST(#{schemaName} AS text), CAST(#{tableName} AS text), CAST('#{name}' AS text), TG_OP, _data);
END IF;
END;

View File

@ -0,0 +1,37 @@
type: bulk
args:
- type: run_sql
args:
sql: |
CREATE TABLE hge_tests.measurement (
city_id int not null,
logdate date not null,
peaktemp int,
unitsales int
) PARTITION BY RANGE (logdate);
CREATE TABLE hge_tests.measurement_y2006m02 PARTITION OF hge_tests.measurement
FOR VALUES FROM ('2006-02-01') TO ('2006-03-01');
CREATE TABLE hge_tests.measurement_y2006m03 PARTITION OF hge_tests.measurement
FOR VALUES FROM ('2006-03-01') TO ('2006-04-01');
- type: track_table
args:
schema: hge_tests
name: measurement
- type: create_event_trigger
args:
name: measurement_all
table:
schema: hge_tests
name: measurement
insert:
columns: "*"
update:
columns: "*"
delete:
columns: "*"
webhook: http://127.0.0.1:5592

View File

@ -0,0 +1,9 @@
type: bulk
args:
- type: delete_event_trigger
args:
name: measurement_all
- type: run_sql
args:
sql: |
drop table hge_tests.measurement;

View File

@ -13,6 +13,7 @@ args:
args:
schema: hge_tests
name: test_t1
- type: create_event_trigger
args:
name: t1_all

View File

@ -6,4 +6,4 @@ args:
- type: run_sql
args:
sql: |
drop table hge_tests.test_t1
drop table hge_tests.test_t1;

View File

@ -3,4 +3,4 @@ args:
- type: run_sql
args:
sql: |
drop table hge_tests.test_t1
drop table hge_tests.test_t1;

View File

@ -112,7 +112,7 @@ class TestEventFlood(object):
assert ns == list(payload)
@usefixtures("per_method_tests_db_state")
@usefixtures("per_class_tests_db_state")
class TestCreateEvtQuery(object):
@classmethod
@ -149,6 +149,26 @@ class TestCreateEvtQuery(object):
assert st_code == 200, resp
check_event(hge_ctx, evts_webhook, "t1_all", table, "DELETE", exp_ev_data)
def test_partitioned_table_basic_insert(self, hge_ctx, evts_webhook):
if hge_ctx.pg_version < 110000:
pytest.skip('Event triggers on partioned tables are not supported in Postgres versions < 11')
return
st_code, resp = hge_ctx.v1q_f(self.dir() + '/partition_table_setup.yaml')
assert st_code == 200, resp
table = { "schema":"hge_tests", "name": "measurement"}
init_row = { "city_id": 1, "logdate": "2006-02-02", "peaktemp": 1, "unitsales": 1}
exp_ev_data = {
"old": None,
"new": init_row
}
st_code, resp = insert(hge_ctx, table, init_row)
assert st_code == 200, resp
check_event(hge_ctx, evts_webhook, "measurement_all", table, "INSERT", exp_ev_data)
st_code, resp = hge_ctx.v1q_f(self.dir() + '/partition_table_teardown.yaml')
assert st_code == 200, resp
@usefixtures('per_method_tests_db_state')
class TestRetryConf(object):