2018-06-27 16:11:32 +03:00
module Hasura.RQL.DDL.Metadata
2019-12-14 09:47:38 +03:00
( runReplaceMetadata ,
2021-02-16 11:08:19 +03:00
runReplaceMetadataV2 ,
2018-12-13 10:26:15 +03:00
runExportMetadata ,
2021-02-19 05:39:30 +03:00
runExportMetadataV2 ,
2018-12-13 10:26:15 +03:00
runClearMetadata ,
runReloadMetadata ,
runDumpInternalState ,
2019-04-17 19:29:39 +03:00
runGetInconsistentMetadata ,
runDropInconsistentMetadata ,
2021-01-07 12:04:22 +03:00
runGetCatalogState ,
runSetCatalogState ,
2021-09-29 11:13:30 +03:00
runTestWebhookTransform ,
2021-02-11 20:54:25 +03:00
runSetMetricsConfig ,
runRemoveMetricsConfig ,
2019-12-14 09:47:38 +03:00
module Hasura.RQL.DDL.Metadata.Types ,
2018-06-27 16:11:32 +03:00
)
where
2021-09-24 01:56:37 +03:00
2022-08-29 03:58:03 +03:00
import Control.Lens ( to , ( .~ ) , ( ^. ) , ( ^? ) )
2022-04-11 14:24:11 +03:00
import Control.Monad.Trans.Control ( MonadBaseControl )
2021-09-29 11:13:30 +03:00
import Data.Aeson qualified as J
2021-08-04 17:51:20 +03:00
import Data.Aeson.Ordered qualified as AO
2022-02-03 17:56:24 +03:00
import Data.Attoparsec.Text qualified as AT
2022-03-11 02:22:54 +03:00
import Data.Bifunctor ( first )
2022-02-03 17:56:24 +03:00
import Data.Bitraversable
2021-12-09 10:58:41 +03:00
import Data.ByteString.Lazy qualified as BL
2021-09-16 14:03:01 +03:00
import Data.CaseInsensitive qualified as CI
2021-12-03 10:12:43 +03:00
import Data.Environment qualified as Env
2021-09-09 14:54:19 +03:00
import Data.Has ( Has , getter )
2021-08-04 17:51:20 +03:00
import Data.HashMap.Strict qualified as Map
import Data.HashMap.Strict.InsOrd.Extended qualified as OMap
import Data.HashSet qualified as HS
2022-03-15 11:41:03 +03:00
import Data.HashSet qualified as Set
2021-08-04 17:51:20 +03:00
import Data.List qualified as L
2022-10-04 00:49:32 +03:00
import Data.List.Extended qualified as L
2022-06-17 12:56:38 +03:00
import Data.SerializableBlob qualified as SB
2021-12-03 10:12:43 +03:00
import Data.Text qualified as T
2021-12-09 10:58:41 +03:00
import Data.Text.Encoding qualified as TE
2022-08-23 11:49:51 +03:00
import Data.Text.Extended ( dquoteList , ( <<> ) )
2021-09-20 10:34:59 +03:00
import Hasura.Base.Error
import Hasura.EncJSON
2021-09-09 14:54:19 +03:00
import Hasura.Logging qualified as HL
2021-01-07 12:04:22 +03:00
import Hasura.Metadata.Class
2023-01-30 19:04:56 +03:00
import Hasura.NativeQuery.API
2021-12-09 10:58:41 +03:00
import Hasura.Prelude hiding ( first )
2020-12-08 17:22:31 +03:00
import Hasura.RQL.DDL.Action
import Hasura.RQL.DDL.ComputedField
import Hasura.RQL.DDL.CustomTypes
2021-01-29 04:02:34 +03:00
import Hasura.RQL.DDL.Endpoint
2020-12-08 17:22:31 +03:00
import Hasura.RQL.DDL.EventTrigger
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
import Hasura.RQL.DDL.InheritedRoles
2021-09-20 10:34:59 +03:00
import Hasura.RQL.DDL.Metadata.Types
2020-12-08 17:22:31 +03:00
import Hasura.RQL.DDL.Permission
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.RemoteRelationship
import Hasura.RQL.DDL.ScheduledTrigger
import Hasura.RQL.DDL.Schema
2022-04-11 14:24:11 +03:00
import Hasura.RQL.DDL.Schema.Source
2022-03-08 03:42:06 +03:00
import Hasura.RQL.DDL.Webhook.Transform
2022-04-27 16:57:28 +03:00
import Hasura.RQL.Types.Allowlist
import Hasura.RQL.Types.ApiLimit
import Hasura.RQL.Types.Backend
import Hasura.RQL.Types.Common
2022-03-13 10:40:06 +03:00
import Hasura.RQL.Types.Endpoint
2022-04-27 16:57:28 +03:00
import Hasura.RQL.Types.EventTrigger
2022-03-15 11:41:03 +03:00
import Hasura.RQL.Types.EventTrigger qualified as ET
2021-09-09 14:54:19 +03:00
import Hasura.RQL.Types.Eventing.Backend ( BackendEventTrigger ( .. ) )
2022-04-27 16:57:28 +03:00
import Hasura.RQL.Types.Metadata
import Hasura.RQL.Types.Metadata.Backend
import Hasura.RQL.Types.Metadata.Object
import Hasura.RQL.Types.Network
2022-11-07 09:54:49 +03:00
import Hasura.RQL.Types.OpenTelemetry
2022-04-27 16:57:28 +03:00
import Hasura.RQL.Types.QueryCollection
import Hasura.RQL.Types.ScheduledTrigger
import Hasura.RQL.Types.SchemaCache
import Hasura.RQL.Types.SchemaCache.Build
2023-02-03 15:27:53 +03:00
import Hasura.RQL.Types.Source ( unsafeSourceInfo )
2022-04-27 16:57:28 +03:00
import Hasura.RQL.Types.SourceCustomization
2021-08-04 17:51:20 +03:00
import Hasura.SQL.AnyBackend qualified as AB
2022-09-01 08:27:57 +03:00
import Hasura.SQL.Backend ( BackendType ( .. ) )
import Hasura.SQL.BackendMap qualified as BackendMap
2022-12-08 19:03:20 +03:00
import Hasura.Server.Logging ( MetadataLog ( .. ) )
2021-09-16 14:03:01 +03:00
import Network.HTTP.Client.Transformable qualified as HTTP
2021-09-24 01:56:37 +03:00
2023-01-31 20:39:36 +03:00
-- | Helper function to run the post drop source hook
postDropSourceHookHelper ::
( MonadError QErr m ,
MonadIO m ,
MonadBaseControl IO m ,
MonadReader r m ,
Has ( HL . Logger HL . Hasura ) r
) =>
SchemaCache ->
SourceName ->
AB . AnyBackend SourceMetadata ->
m ()
postDropSourceHookHelper oldSchemaCache sourceName sourceMetadataBackend = do
logger :: ( HL . Logger HL . Hasura ) <- asks getter
AB . dispatchAnyBackend @ BackendMetadata sourceMetadataBackend \ ( _ :: SourceMetadata b ) -> do
let sourceInfoMaybe = unsafeSourceInfo @ b =<< Map . lookup sourceName ( scSources oldSchemaCache )
case sourceInfoMaybe of
Nothing ->
HL . unLogger logger $
MetadataLog
HL . LevelWarn
( " Could not cleanup the source ' "
<> sourceName
<<> " ' while dropping it from the graphql-engine as it is inconsistent. "
<> " Please consider cleaning the resources created by the graphql engine, "
<> " refer https://hasura.io/docs/latest/graphql/core/event-triggers/remove-event-triggers/#clean-footprints-manually "
)
J . Null
Just sourceInfo -> runPostDropSourceHook defaultSource sourceInfo
2018-12-13 10:26:15 +03:00
runClearMetadata ::
2022-04-11 14:24:11 +03:00
forall m r .
2022-10-06 12:07:14 +03:00
( MonadIO m ,
2021-05-26 19:19:26 +03:00
CacheRWM m ,
MetadataM m ,
MonadMetadataStorageQueryAPI m ,
2022-04-11 14:24:11 +03:00
MonadBaseControl IO m ,
2021-09-09 14:54:19 +03:00
MonadReader r m ,
2023-02-03 04:03:23 +03:00
MonadError QErr m ,
2022-09-15 14:45:14 +03:00
Has ( HL . Logger HL . Hasura ) r ,
MonadEventLogCleanup m
2019-03-18 19:22:21 +03:00
) =>
ClearMetadata ->
m EncJSON
2018-12-13 10:26:15 +03:00
runClearMetadata _ = do
2020-12-28 15:56:00 +03:00
metadata <- getMetadata
2023-01-31 20:39:36 +03:00
oldSchemaCache <- askSchemaCache
2022-04-11 14:24:11 +03:00
2020-12-28 15:56:00 +03:00
-- We can infer whether the server is started with `--database-url` option
-- (or corresponding env variable) by checking the existence of @'defaultSource'
-- in current metadata.
2022-08-29 03:58:03 +03:00
let maybeDefaultSourceMetadata = metadata ^? metaSources . ix defaultSource . to unBackendSourceMetadata
2020-12-28 15:56:00 +03:00
emptyMetadata' = case maybeDefaultSourceMetadata of
Nothing -> emptyMetadata
2021-03-15 16:02:58 +03:00
Just exists ->
2020-12-28 15:56:00 +03:00
-- If default postgres source is defined, we need to set metadata
-- which contains only default source without any tables and functions.
2021-03-15 16:02:58 +03:00
let emptyDefaultSource =
2021-04-22 00:44:37 +03:00
AB . dispatchAnyBackend @ Backend exists \ ( s :: SourceMetadata b ) ->
2022-08-29 03:58:03 +03:00
BackendSourceMetadata $
AB . mkAnyBackend @ b $
SourceMetadata
@ b
defaultSource
( _smKind @ b s )
mempty
mempty
2023-01-16 20:19:45 +03:00
mempty
2022-08-29 03:58:03 +03:00
( _smConfiguration @ b s )
Nothing
emptySourceCustomization
2022-09-02 09:33:21 +03:00
Nothing
2020-12-28 15:56:00 +03:00
in emptyMetadata
& metaSources %~ OMap . insert defaultSource emptyDefaultSource
2023-01-31 20:39:36 +03:00
resp <- runReplaceMetadataV1 $ RMWithSources emptyMetadata'
-- Cleanup the default source explicitly because in the `runReplaceMetadataV1`
-- call it won't be considered as a dropped source because we artificially add
-- a empty source metadata for the default source metadata. So, for `runReplaceMetadataV1`
-- the default source will not be a dropped source and hence there will not be
-- any post drop source action on it. So, here we expicitly do the post drop source
-- action for the default source, if it existed in the metadata.
for_ maybeDefaultSourceMetadata $ postDropSourceHookHelper oldSchemaCache defaultSource
pure resp
2020-12-08 17:22:31 +03:00
2021-09-09 14:54:19 +03:00
{- Note [Cleanup for dropped triggers]
2020-12-08 17:22:31 +03:00
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There was an issue ( https :// github . com / hasura / graphql - engine / issues / 5461 )
fixed ( via https :// github . com / hasura / graphql - engine / pull / 6137 ) related to
event triggers while replacing metadata in the catalog prior to metadata
separation . The metadata separation solves the issue naturally , since the
'hdb_catalog . event_triggers' table is no more in use and new / updated event
triggers are processed in building schema cache . But we need to drop the
2021-09-09 14:54:19 +03:00
database trigger and archive events for dropped event triggers . This is handled
2020-12-08 17:22:31 +03:00
explicitly in @ 'runReplaceMetadata' function .
- }
2018-06-27 16:11:32 +03:00
2021-07-29 11:29:12 +03:00
-- | Replace the 'current metadata' with the 'new metadata'
-- The 'new metadata' might come via the 'Import Metadata' in console
2018-12-13 10:26:15 +03:00
runReplaceMetadata ::
2021-05-26 19:19:26 +03:00
( CacheRWM m ,
2020-12-08 17:22:31 +03:00
MetadataM m ,
2020-12-28 15:56:00 +03:00
MonadIO m ,
2022-09-13 07:16:35 +03:00
MonadBaseControl IO m ,
2021-05-26 19:19:26 +03:00
MonadMetadataStorageQueryAPI m ,
2021-09-09 14:54:19 +03:00
MonadReader r m ,
2023-02-03 04:03:23 +03:00
MonadError QErr m ,
2022-09-15 14:45:14 +03:00
Has ( HL . Logger HL . Hasura ) r ,
MonadEventLogCleanup m
2020-12-28 15:56:00 +03:00
) =>
ReplaceMetadata ->
m EncJSON
2021-02-16 11:08:19 +03:00
runReplaceMetadata = \ case
2022-12-16 13:19:42 +03:00
RMReplaceMetadataV1 v1args -> runReplaceMetadataV1 v1args
RMReplaceMetadataV2 v2args -> runReplaceMetadataV2 v2args
2021-02-16 11:08:19 +03:00
runReplaceMetadataV1 ::
2022-10-06 12:07:14 +03:00
( CacheRWM m ,
2021-02-16 11:08:19 +03:00
MetadataM m ,
MonadIO m ,
2022-09-13 07:16:35 +03:00
MonadBaseControl IO m ,
2021-05-26 19:19:26 +03:00
MonadMetadataStorageQueryAPI m ,
2021-09-09 14:54:19 +03:00
MonadReader r m ,
2023-02-03 04:03:23 +03:00
MonadError QErr m ,
2022-09-15 14:45:14 +03:00
Has ( HL . Logger HL . Hasura ) r ,
MonadEventLogCleanup m
2021-02-16 11:08:19 +03:00
) =>
ReplaceMetadataV1 ->
m EncJSON
2022-12-16 13:19:42 +03:00
runReplaceMetadataV1 =
( successMsg <$ ) . runReplaceMetadataV2 . ReplaceMetadataV2 NoAllowInconsistentMetadata
2021-02-16 11:08:19 +03:00
runReplaceMetadataV2 ::
2021-09-09 14:54:19 +03:00
forall m r .
2022-10-06 12:07:14 +03:00
( CacheRWM m ,
2021-02-16 11:08:19 +03:00
MetadataM m ,
MonadIO m ,
2022-09-13 07:16:35 +03:00
MonadBaseControl IO m ,
2021-05-26 19:19:26 +03:00
MonadMetadataStorageQueryAPI m ,
2021-09-09 14:54:19 +03:00
MonadReader r m ,
2023-02-03 04:03:23 +03:00
MonadError QErr m ,
2022-09-15 14:45:14 +03:00
Has ( HL . Logger HL . Hasura ) r ,
MonadEventLogCleanup m
2021-02-16 11:08:19 +03:00
) =>
ReplaceMetadataV2 ->
m EncJSON
2022-12-16 13:19:42 +03:00
runReplaceMetadataV2 ReplaceMetadataV2 { .. } = do
2021-09-09 14:54:19 +03:00
logger :: ( HL . Logger HL . Hasura ) <- asks getter
2021-05-26 19:19:26 +03:00
-- we drop all the future cron trigger events before inserting the new metadata
-- and re-populating future cron events below
2021-10-05 15:28:38 +03:00
let introspectionDisabledRoles =
2021-05-05 15:25:27 +03:00
case _rmv2Metadata of
RMWithSources m -> _metaSetGraphqlIntrospectionOptions m
RMWithoutSources _ -> mempty
2020-12-08 17:22:31 +03:00
oldMetadata <- getMetadata
2022-04-21 10:19:37 +03:00
oldSchemaCache <- askSchemaCache
2021-08-04 17:51:20 +03:00
( cronTriggersMetadata , cronTriggersToBeAdded ) <- processCronTriggers oldMetadata
2021-02-16 11:08:19 +03:00
metadata <- case _rmv2Metadata of
2021-08-04 17:51:20 +03:00
RMWithSources m -> pure $ m { _metaCronTriggers = cronTriggersMetadata }
2020-12-28 15:56:00 +03:00
RMWithoutSources MetadataNoSources { .. } -> do
2021-02-14 09:07:52 +03:00
let maybeDefaultSourceMetadata = oldMetadata ^? metaSources . ix defaultSource . toSourceMetadata
defaultSourceMetadata <-
onNothing maybeDefaultSourceMetadata $
2021-03-15 16:02:58 +03:00
throw400 NotSupported " cannot import metadata without sources since no default source is defined "
let newDefaultSourceMetadata =
2022-08-29 03:58:03 +03:00
BackendSourceMetadata $
AB . mkAnyBackend
defaultSourceMetadata
{ _smTables = _mnsTables ,
_smFunctions = _mnsFunctions
}
2021-01-07 12:04:22 +03:00
pure $
Metadata
( OMap . singleton defaultSource newDefaultSourceMetadata )
_mnsRemoteSchemas
_mnsQueryCollections
_mnsAllowlist
2021-08-04 17:51:20 +03:00
_mnsCustomTypes
_mnsActions
cronTriggersMetadata
( _metaRestEndpoints oldMetadata )
2021-09-23 15:37:56 +03:00
emptyApiLimit
emptyMetricsConfig
mempty
introspectionDisabledRoles
emptyNetwork
2022-04-29 05:13:13 +03:00
mempty
2022-11-07 09:54:49 +03:00
emptyOpenTelemetryConfig
2022-09-13 07:16:35 +03:00
2023-01-31 20:39:36 +03:00
let ( oldSources , newSources ) = ( _metaSources oldMetadata , _metaSources metadata )
2022-09-13 07:16:35 +03:00
-- Check for duplicate trigger names in the new source metadata
2022-08-23 11:49:51 +03:00
for_ ( OMap . toList newSources ) $ \ ( source , newBackendSourceMetadata ) -> do
2022-10-04 00:49:32 +03:00
for_ ( OMap . lookup source oldSources ) $ \ _oldBackendSourceMetadata ->
2022-08-23 11:49:51 +03:00
dispatch newBackendSourceMetadata \ ( newSourceMetadata :: SourceMetadata b ) -> do
let newTriggerNames = concatMap ( OMap . keys . _tmEventTriggers ) ( OMap . elems $ _smTables newSourceMetadata )
2022-10-04 00:49:32 +03:00
duplicateTriggerNamesInNewMetadata = newTriggerNames \\ ( L . uniques newTriggerNames )
2022-08-23 11:49:51 +03:00
unless ( null duplicateTriggerNamesInNewMetadata ) $ do
throw400 NotSupported ( " Event trigger with duplicate names not allowed: " <> dquoteList ( map triggerNameToTxt duplicateTriggerNamesInNewMetadata ) )
2023-01-31 20:39:36 +03:00
2022-09-28 16:47:02 +03:00
let cacheInvalidations =
CacheInvalidations
{ ciMetadata = False ,
ciRemoteSchemas = mempty ,
ciSources = Set . fromList $ OMap . keys newSources ,
ciDataConnectors = mempty
}
2023-01-31 20:39:36 +03:00
-- put the new metadata in the state managed by the `MetadataT`
putMetadata metadata
-- build the schema cache with the new metadata
2022-09-28 16:47:02 +03:00
buildSchemaCacheWithInvalidations cacheInvalidations mempty
2023-01-31 20:39:36 +03:00
2021-02-16 11:08:19 +03:00
case _rmv2AllowInconsistentMetadata of
2022-09-28 16:47:02 +03:00
AllowInconsistentMetadata -> pure ()
NoAllowInconsistentMetadata -> throwOnInconsistencies
2021-02-16 11:08:19 +03:00
2021-05-26 19:19:26 +03:00
-- populate future cron events for all the new cron triggers that are imported
2021-08-04 17:51:20 +03:00
for_ cronTriggersToBeAdded $ \ CronTriggerMetadata { .. } ->
2021-05-26 19:19:26 +03:00
populateInitialCronTriggerEvents ctSchedule ctName
2021-09-09 14:54:19 +03:00
-- See Note [Cleanup for dropped triggers]
2022-04-21 10:19:37 +03:00
dropSourceSQLTriggers logger oldSchemaCache ( _metaSources oldMetadata ) ( _metaSources metadata )
2019-01-25 06:31:54 +03:00
2023-02-03 15:27:53 +03:00
newSchemaCache <- askSchemaCache
updateTriggerCleanupSchedules logger ( _metaSources oldMetadata ) ( _metaSources metadata ) newSchemaCache
>>= ( ` onLeft ` throwError )
2022-09-15 14:45:14 +03:00
2023-01-31 20:39:36 +03:00
let droppedSources = OMap . difference oldSources newSources
-- Clean up the sources that are not present in the new metadata
for_ ( OMap . toList droppedSources ) $ \ ( oldSource , oldSourceBackendMetadata ) ->
postDropSourceHookHelper oldSchemaCache oldSource ( unBackendSourceMetadata oldSourceBackendMetadata )
2023-02-03 15:27:53 +03:00
pure . encJFromJValue . formatInconsistentObjs . scInconsistentObjs $ newSchemaCache
2021-02-23 20:37:27 +03:00
where
2021-08-04 17:51:20 +03:00
{- Note [Cron triggers behaviour with replace metadata]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When the metadata is replaced , we delete only the cron triggers
that were deleted , instead of deleting all the old cron triggers ( which
existed in the metadata before it was replaced ) and inserting all the
new cron triggers . This is done this way , because when a cron trigger is
dropped , the cron events associated with it will also be dropped from the DB
and when a new cron trigger is added , new cron events are generated by the
graphql - engine . So , this way we only delete and insert the data which has been changed .
The cron triggers that were deleted is calculated by getting a diff
of the old cron triggers and the new cron triggers . Note that we don't just
check the name of the trigger to calculate the diff , the whole cron trigger
definition is considered in the calculation .
Note : Only cron triggers with ` include_in_metadata ` set to ` true ` can be updated / deleted
via the replace metadata API . Cron triggers with ` include_in_metadata ` can only be modified
via the ` create_cron_trigger ` and ` delete_cron_trigger ` APIs .
- }
processCronTriggers oldMetadata = do
let ( oldCronTriggersIncludedInMetadata , oldCronTriggersNotIncludedInMetadata ) =
OMap . partition ctIncludeInMetadata ( _metaCronTriggers oldMetadata )
allNewCronTriggers =
case _rmv2Metadata of
RMWithoutSources m -> _mnsCronTriggers m
RMWithSources m -> _metaCronTriggers m
-- this function is intended to use with `Map.differenceWith`, it's used when two
-- equal keys are encountered, then the values are compared to calculate the diff.
-- see https://hackage.haskell.org/package/unordered-containers-0.2.14.0/docs/Data-HashMap-Internal.html#v:differenceWith
leftIfDifferent l r
| l == r = Nothing
| otherwise = Just l
cronTriggersToBeAdded =
Map . differenceWith
leftIfDifferent
( OMap . toHashMap allNewCronTriggers )
( OMap . toHashMap oldCronTriggersIncludedInMetadata )
cronTriggersToBeDropped =
Map . differenceWith
leftIfDifferent
( OMap . toHashMap oldCronTriggersIncludedInMetadata )
( OMap . toHashMap allNewCronTriggers )
2023-02-03 04:03:23 +03:00
liftEitherM $ dropFutureCronEvents $ MetadataCronTriggers $ Map . keys cronTriggersToBeDropped
2021-08-04 17:51:20 +03:00
cronTriggers <- do
-- traverse over the new cron triggers and check if any of them
-- already exists as a cron trigger with "included_in_metadata: false"
for_ allNewCronTriggers $ \ ct ->
when ( ctName ct ` OMap . member ` oldCronTriggersNotIncludedInMetadata ) $
throw400 AlreadyExists $
" cron trigger with name "
<> ctName ct
<<> " already exists as a cron trigger with \ " included_in_metadata \ " as false "
-- we add the old cron triggers with included_in_metadata set to false with the
-- newly added cron triggers
pure $ allNewCronTriggers <> oldCronTriggersNotIncludedInMetadata
pure $ ( cronTriggers , cronTriggersToBeAdded )
2021-09-09 14:54:19 +03:00
dropSourceSQLTriggers ::
HL . Logger HL . Hasura ->
2022-04-21 10:19:37 +03:00
SchemaCache ->
2021-09-09 14:54:19 +03:00
InsOrdHashMap SourceName BackendSourceMetadata ->
InsOrdHashMap SourceName BackendSourceMetadata ->
2021-02-23 20:37:27 +03:00
m ()
2022-04-21 10:19:37 +03:00
dropSourceSQLTriggers ( HL . Logger logger ) oldSchemaCache oldSources newSources = do
2021-09-09 14:54:19 +03:00
-- NOTE: the current implementation of this function has an edge case.
-- The edge case is that when a `SourceA` which contained some event triggers
-- is modified to point to a new database, this function will try to drop the
-- SQL triggers of the dropped event triggers on the new database which doesn't exist.
-- In the current implementation, this doesn't throw an error because the trigger is dropped
-- using `DROP IF EXISTS..` meaning this silently fails without throwing an error.
for_ ( OMap . toList newSources ) $ \ ( source , newBackendSourceMetadata ) -> do
2022-10-04 00:49:32 +03:00
for_ ( OMap . lookup source oldSources ) $ \ oldBackendSourceMetadata ->
2022-08-29 03:58:03 +03:00
compose source ( unBackendSourceMetadata newBackendSourceMetadata ) ( unBackendSourceMetadata oldBackendSourceMetadata ) \ ( newSourceMetadata :: SourceMetadata b ) -> do
2021-09-09 14:54:19 +03:00
dispatch oldBackendSourceMetadata \ oldSourceMetadata -> do
let oldTriggersMap = getTriggersMap oldSourceMetadata
newTriggersMap = getTriggersMap newSourceMetadata
2022-03-15 11:41:03 +03:00
droppedEventTriggers = OMap . keys $ oldTriggersMap ` OMap . difference ` newTriggersMap
retainedNewTriggers = newTriggersMap ` OMap . intersection ` oldTriggersMap
2021-09-09 14:54:19 +03:00
catcher e @ QErr { qeCode }
| qeCode == Unexpected = pure () -- NOTE: This information should be returned by the inconsistent_metadata response, so doesn't need additional logging.
| otherwise = throwError e -- rethrow other errors
-- This will swallow Unexpected exceptions for sources if allow_inconsistent_metadata is enabled
-- This should be ok since if the sources are already missing from the cache then they should
-- not need to be removed.
--
-- TODO: Determine if any errors should be thrown from askSourceConfig at all if the errors are just being discarded
return $
flip catchError catcher do
2022-12-08 19:03:20 +03:00
sourceConfigMaybe <- askSourceConfigMaybe @ b source
case sourceConfigMaybe of
Nothing ->
-- TODO: Add user facing docs on how to drop triggers manually. Issue #7104
logger $
MetadataLog
HL . LevelWarn
( " Could not drop SQL triggers present in the source ' "
<> source
<<> " ' as it is inconsistent. "
<> " While creating an event trigger, Hasura creates SQL triggers on the table. "
<> " Please refer https://hasura.io/docs/latest/graphql/core/event-triggers/remove-event-triggers/#clean-up-event-trigger-footprints-manually "
<> " to delete the sql triggers from the database manually. "
<> " For more details, please refer https://hasura.io/docs/latest/graphql/core/event-triggers/index.html "
)
J . Null
Just sourceConfig -> do
for_ droppedEventTriggers $
\ triggerName -> do
2023-01-31 20:39:36 +03:00
-- TODO: The `tableName` parameter could be computed while building
-- the triggers map and avoid the cache lookup.
2022-12-08 19:03:20 +03:00
tableNameMaybe <- getTableNameFromTrigger @ b oldSchemaCache source triggerName
case tableNameMaybe of
Nothing ->
logger $
MetadataLog
HL . LevelWarn
( sqlTriggerError triggerName )
J . Null
2023-01-31 20:39:36 +03:00
Just tableName ->
dropTriggerAndArchiveEvents @ b sourceConfig triggerName tableName
2022-12-08 19:03:20 +03:00
for_ ( OMap . toList retainedNewTriggers ) $ \ ( retainedNewTriggerName , retainedNewTriggerConf ) ->
case OMap . lookup retainedNewTriggerName oldTriggersMap of
Nothing ->
logger $
MetadataLog
HL . LevelWarn
( sqlTriggerError retainedNewTriggerName )
J . Null
Just oldTriggerConf -> do
let newTriggerOps = etcDefinition retainedNewTriggerConf
oldTriggerOps = etcDefinition oldTriggerConf
isDroppedOp old new = isJust old && isNothing new
droppedOps =
[ ( bool Nothing ( Just INSERT ) ( isDroppedOp ( tdInsert oldTriggerOps ) ( tdInsert newTriggerOps ) ) ) ,
( bool Nothing ( Just UPDATE ) ( isDroppedOp ( tdUpdate oldTriggerOps ) ( tdUpdate newTriggerOps ) ) ) ,
( bool Nothing ( Just ET . DELETE ) ( isDroppedOp ( tdDelete oldTriggerOps ) ( tdDelete newTriggerOps ) ) )
]
tableNameMaybe <- getTableNameFromTrigger @ b oldSchemaCache source retainedNewTriggerName
case tableNameMaybe of
Nothing ->
logger $
MetadataLog
HL . LevelWarn
( sqlTriggerError retainedNewTriggerName )
J . Null
2023-01-31 20:39:36 +03:00
Just tableName ->
2022-12-08 19:03:20 +03:00
dropDanglingSQLTrigger @ b sourceConfig retainedNewTriggerName tableName ( Set . fromList $ catMaybes droppedOps )
2021-02-23 20:37:27 +03:00
where
2021-09-09 14:54:19 +03:00
compose ::
SourceName ->
AB . AnyBackend i ->
AB . AnyBackend i ->
( forall b . BackendEventTrigger b => i b -> i b -> m () ) ->
m ()
2022-06-17 12:56:38 +03:00
compose sourceName x y f = AB . composeAnyBackend @ BackendEventTrigger f x y ( logger $ HL . UnstructuredLog HL . LevelInfo $ SB . fromText $ " Event trigger clean up couldn't be done on the source " <> sourceName <<> " because it has changed its type " )
2021-01-29 08:48:17 +03:00
2022-12-08 19:03:20 +03:00
sqlTriggerError :: TriggerName -> Text
sqlTriggerError triggerName =
( " Could not drop SQL triggers associated with event trigger ' "
<> triggerName
<<> " '. While creating an event trigger, Hasura creates SQL triggers on the table. "
<> " Please refer https://hasura.io/docs/latest/graphql/core/event-triggers/remove-event-triggers/#clean-up-event-trigger-footprints-manually "
<> " to delete the sql triggers from the database manually. "
<> " For more details, please refer https://hasura.io/docs/latest/graphql/core/event-triggers/index.html "
)
2022-08-29 03:58:03 +03:00
dispatch ( BackendSourceMetadata bs ) = AB . dispatchAnyBackend @ BackendEventTrigger bs
2022-08-23 11:49:51 +03:00
2021-05-26 19:19:26 +03:00
-- | Only includes the cron triggers with `included_in_metadata` set to `True`
processCronTriggersMetadata :: Metadata -> Metadata
processCronTriggersMetadata metadata =
let cronTriggersIncludedInMetadata = OMap . filter ctIncludeInMetadata $ _metaCronTriggers metadata
in metadata { _metaCronTriggers = cronTriggersIncludedInMetadata }
2018-12-13 10:26:15 +03:00
runExportMetadata ::
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
forall m .
2021-10-05 15:28:38 +03:00
( QErrM m , MetadataM m ) =>
2019-03-18 19:22:21 +03:00
ExportMetadata ->
m EncJSON
2021-07-23 02:06:10 +03:00
runExportMetadata ExportMetadata { } =
2021-10-05 15:28:38 +03:00
encJFromOrderedValue . metadataToOrdJSON <$> ( processCronTriggersMetadata <$> getMetadata )
2021-02-19 05:39:30 +03:00
runExportMetadataV2 ::
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
forall m .
2021-10-05 15:28:38 +03:00
( QErrM m , MetadataM m ) =>
2021-02-19 05:39:30 +03:00
MetadataResourceVersion ->
ExportMetadata ->
m EncJSON
runExportMetadataV2 currentResourceVersion ExportMetadata { } = do
2021-10-05 15:28:38 +03:00
exportMetadata <- processCronTriggersMetadata <$> getMetadata
2021-08-06 16:39:00 +03:00
pure $
encJFromOrderedValue $
AO . object
2021-02-19 05:39:30 +03:00
[ ( " resource_version " , AO . toOrdered currentResourceVersion ) ,
( " metadata " , metadataToOrdJSON exportMetadata )
]
2018-09-05 18:25:30 +03:00
2020-12-08 17:22:31 +03:00
runReloadMetadata :: ( QErrM m , CacheRWM m , MetadataM m ) => ReloadMetadata -> m EncJSON
2022-09-14 15:59:37 +03:00
runReloadMetadata ( ReloadMetadata reloadRemoteSchemas reloadSources reloadRecreateEventTriggers reloadDataConnectors ) = do
2021-07-13 10:56:32 +03:00
metadata <- getMetadata
let allSources = HS . fromList $ OMap . keys $ _metaSources metadata
allRemoteSchemas = HS . fromList $ OMap . keys $ _metaRemoteSchemas metadata
2022-09-14 15:59:37 +03:00
allDataConnectors =
maybe mempty ( HS . fromList . OMap . keys . unBackendConfigWrapper ) $
BackendMap . lookup @ 'DataConnector $
_metaBackendConfigs metadata
2021-07-13 10:56:32 +03:00
checkRemoteSchema name =
unless ( HS . member name allRemoteSchemas ) $
throw400 NotExists $
" Remote schema with name " <> name <<> " not found in metadata "
checkSource name =
unless ( HS . member name allSources ) $
throw400 NotExists $
" Source with name " <> name <<> " not found in metadata "
2022-09-14 15:59:37 +03:00
checkDataConnector name =
unless ( HS . member name allDataConnectors ) $
throw400 NotExists $
" Data connector with name " <> name <<> " not found in metadata "
2021-07-13 10:56:32 +03:00
remoteSchemaInvalidations <- case reloadRemoteSchemas of
RSReloadAll -> pure allRemoteSchemas
RSReloadList l -> mapM_ checkRemoteSchema l *> pure l
2022-09-28 16:47:02 +03:00
sourcesInvalidations <- case reloadSources of
2021-07-13 10:56:32 +03:00
RSReloadAll -> pure allSources
RSReloadList l -> mapM_ checkSource l *> pure l
2021-11-10 17:34:22 +03:00
recreateEventTriggersSources <- case reloadRecreateEventTriggers of
RSReloadAll -> pure allSources
RSReloadList l -> mapM_ checkSource l *> pure l
2022-09-14 15:59:37 +03:00
dataConnectorInvalidations <- case reloadDataConnectors of
RSReloadAll -> pure allDataConnectors
RSReloadList l -> mapM_ checkDataConnector l *> pure l
2021-07-13 10:56:32 +03:00
let cacheInvalidations =
CacheInvalidations
2020-12-08 17:22:31 +03:00
{ ciMetadata = True ,
ciRemoteSchemas = remoteSchemaInvalidations ,
2022-09-28 16:47:02 +03:00
ciSources = sourcesInvalidations ,
2022-09-14 15:59:37 +03:00
ciDataConnectors = dataConnectorInvalidations
2020-12-08 17:22:31 +03:00
}
2021-07-13 10:56:32 +03:00
2021-11-10 17:34:22 +03:00
buildSchemaCacheWithOptions ( CatalogUpdate $ Just recreateEventTriggersSources ) cacheInvalidations metadata
2022-03-03 16:33:43 +03:00
inconsObjs <- scInconsistentObjs <$> askSchemaCache
pure . encJFromJValue . J . object $
2022-06-08 18:31:28 +03:00
[ " message " J ..= ( " success " :: Text ) ,
2022-03-03 16:33:43 +03:00
" is_consistent " J ..= null inconsObjs
]
<> [ " inconsistent_objects " J ..= inconsObjs | not ( null inconsObjs ) ]
2018-09-05 18:25:30 +03:00
2018-12-13 10:26:15 +03:00
runDumpInternalState ::
2019-11-26 15:14:21 +03:00
( QErrM m , CacheRM m ) =>
2019-03-18 19:22:21 +03:00
DumpInternalState ->
m EncJSON
2019-11-26 15:14:21 +03:00
runDumpInternalState _ =
2019-03-18 19:22:21 +03:00
encJFromJValue <$> askSchemaCache
2019-04-17 19:29:39 +03:00
runGetInconsistentMetadata ::
2019-11-26 15:14:21 +03:00
( QErrM m , CacheRM m ) =>
2019-04-17 19:29:39 +03:00
GetInconsistentMetadata ->
m EncJSON
runGetInconsistentMetadata _ = do
inconsObjs <- scInconsistentObjs <$> askSchemaCache
2021-02-16 11:08:19 +03:00
return $ encJFromJValue $ formatInconsistentObjs inconsObjs
2021-09-29 11:13:30 +03:00
formatInconsistentObjs :: [ InconsistentMetadata ] -> J . Value
2021-02-16 11:08:19 +03:00
formatInconsistentObjs inconsObjs =
2021-09-29 11:13:30 +03:00
J . object
[ " is_consistent " J ..= null inconsObjs ,
" inconsistent_objects " J ..= inconsObjs
2021-02-16 11:08:19 +03:00
]
2021-09-24 01:56:37 +03:00
2019-04-17 19:29:39 +03:00
runDropInconsistentMetadata ::
2020-12-08 17:22:31 +03:00
( QErrM m , CacheRWM m , MetadataM m ) =>
2019-04-17 19:29:39 +03:00
DropInconsistentMetadata ->
m EncJSON
runDropInconsistentMetadata _ = do
sc <- askSchemaCache
2019-11-27 01:49:42 +03:00
let inconsSchObjs = L . nub . concatMap imObjectIds $ scInconsistentObjs sc
2019-11-20 21:21:30 +03:00
-- Note: when building the schema cache, we try to put dependents after their dependencies in the
-- list of inconsistent objects, so reverse the list to start with dependents first. This is not
-- perfect — a completely accurate solution would require performing a topological sort — but it
-- seems to work well enough for now.
2022-02-18 15:46:55 +03:00
MetadataModifier { .. } <- execWriterT $ mapM_ ( tell . purgeMetadataObj ) ( reverse inconsSchObjs )
2020-12-08 17:22:31 +03:00
metadata <- getMetadata
2022-02-18 15:46:55 +03:00
putMetadata $ runMetadataModifier metadata
buildSchemaCache mempty
2021-09-14 15:02:13 +03:00
-- after building the schema cache, we need to check the inconsistent metadata, if any
-- are only those which are not droppable
newInconsistentObjects <- scInconsistentObjs <$> askSchemaCache
let droppableInconsistentObjects = filter droppableInconsistentMetadata newInconsistentObjects
unless ( null droppableInconsistentObjects ) $
throwError
( err400 Unexpected " cannot continue due to new inconsistent metadata " )
2021-09-29 11:13:30 +03:00
{ qeInternal = Just $ ExtraInternal $ J . toJSON newInconsistentObjects
2021-09-17 10:43:43 +03:00
}
2019-04-17 19:29:39 +03:00
return successMsg
2020-12-08 17:22:31 +03:00
purgeMetadataObj :: MetadataObjId -> MetadataModifier
purgeMetadataObj = \ case
2021-03-15 16:02:58 +03:00
MOSource source -> MetadataModifier $ metaSources %~ OMap . delete source
MOSourceObjId source exists -> AB . dispatchAnyBackend @ BackendMetadata exists $ handleSourceObj source
MORemoteSchema rsn -> dropRemoteSchemaInMetadata rsn
MORemoteSchemaPermissions rsName role -> dropRemoteSchemaPermissionInMetadata rsName role
2022-02-03 21:58:37 +03:00
MORemoteSchemaRemoteRelationship rsName typeName relName ->
dropRemoteSchemaRemoteRelationshipInMetadata rsName typeName relName
2021-03-15 16:02:58 +03:00
MOCustomTypes -> clearCustomTypesInMetadata
MOAction action -> dropActionInMetadata action -- Nothing
MOActionPermission action role -> dropActionPermissionInMetadata action role
MOCronTrigger ctName -> dropCronTriggerInMetadata ctName
MOEndpoint epName -> dropEndpointInMetadata epName
MOInheritedRole role -> dropInheritedRoleInMetadata role
2022-03-08 12:48:21 +03:00
MOQueryCollectionsQuery cName lq -> dropListedQueryFromQueryCollections cName lq
2022-09-01 08:27:57 +03:00
MODataConnectorAgent agentName ->
MetadataModifier $
metaBackendConfigs
%~ BackendMap . modify @ 'DataConnector ( BackendConfigWrapper . OMap . delete agentName . unBackendConfigWrapper )
2022-11-07 09:54:49 +03:00
MOOpenTelemetry subobject ->
case subobject of
2022-11-23 05:49:29 +03:00
OtelSubobjectAll ->
MetadataModifier $ metaOpenTelemetryConfig .~ emptyOpenTelemetryConfig
2022-11-07 09:54:49 +03:00
OtelSubobjectExporterOtlp ->
MetadataModifier $ metaOpenTelemetryConfig . ocExporterOtlp .~ defaultOtelExporterConfig
OtelSubobjectBatchSpanProcessor ->
MetadataModifier $ metaOpenTelemetryConfig . ocBatchSpanProcessor .~ defaultOtelBatchSpanProcessorConfig
2021-03-15 16:02:58 +03:00
where
2021-04-22 00:44:37 +03:00
handleSourceObj :: forall b . BackendMetadata b => SourceName -> SourceMetadataObjId b -> MetadataModifier
2021-03-15 16:02:58 +03:00
handleSourceObj source = \ case
2021-04-22 00:44:37 +03:00
SMOTable qt -> dropTableInMetadata @ b source qt
SMOFunction qf -> dropFunctionInMetadata @ b source qf
SMOFunctionPermission qf rn -> dropFunctionPermissionInMetadata @ b source qf rn
2023-01-30 19:04:56 +03:00
SMONativeQuery nq -> dropNativeQueryInMetadata @ b source nq
2021-03-15 16:02:58 +03:00
SMOTableObj qt tableObj ->
MetadataModifier $
2021-04-22 00:44:37 +03:00
tableMetadataSetter @ b source qt %~ case tableObj of
2021-02-14 09:07:52 +03:00
MTORel rn _ -> dropRelationshipInMetadata rn
MTOPerm rn pt -> dropPermissionInMetadata rn pt
MTOTrigger trn -> dropEventTriggerInMetadata trn
MTOComputedField ccn -> dropComputedFieldInMetadata ccn
MTORemoteRelationship rn -> dropRemoteRelationshipInMetadata rn
2021-01-07 12:04:22 +03:00
2022-03-08 12:48:21 +03:00
dropListedQueryFromQueryCollections :: CollectionName -> ListedQuery -> MetadataModifier
dropListedQueryFromQueryCollections cName lq = MetadataModifier $ removeAndCleanupMetadata
where
removeAndCleanupMetadata m =
let newQueryCollection = filteredCollection ( _metaQueryCollections m )
-- QueryCollections = InsOrdHashMap CollectionName CreateCollection
filteredCollection :: QueryCollections -> QueryCollections
filteredCollection qc = OMap . filter ( isNonEmptyCC ) $ OMap . adjust ( collectionModifier ) ( cName ) qc
collectionModifier :: CreateCollection -> CreateCollection
collectionModifier cc @ CreateCollection { .. } =
cc
{ _ccDefinition =
let oldQueries = _cdQueries _ccDefinition
in _ccDefinition
{ _cdQueries = filter ( /= lq ) oldQueries
}
}
isNonEmptyCC :: CreateCollection -> Bool
isNonEmptyCC = not . null . _cdQueries . _ccDefinition
cleanupAllowList :: MetadataAllowlist -> MetadataAllowlist
cleanupAllowList = OMap . filterWithKey ( \ _ _ -> OMap . member cName newQueryCollection )
cleanupRESTEndpoints :: Endpoints -> Endpoints
cleanupRESTEndpoints endpoints = OMap . filter ( not . isFaultyQuery . _edQuery . _ceDefinition ) endpoints
isFaultyQuery :: QueryReference -> Bool
isFaultyQuery QueryReference { .. } = _qrCollectionName == cName && _qrQueryName == ( _lqName lq )
in m
{ _metaQueryCollections = newQueryCollection ,
_metaAllowlist = cleanupAllowList ( _metaAllowlist m ) ,
_metaRestEndpoints = cleanupRESTEndpoints ( _metaRestEndpoints m )
}
2021-01-07 12:04:22 +03:00
runGetCatalogState ::
2023-02-03 04:03:23 +03:00
( MonadMetadataStorageQueryAPI m , MonadError QErr m ) => GetCatalogState -> m EncJSON
2021-01-07 12:04:22 +03:00
runGetCatalogState _ =
2023-02-03 04:03:23 +03:00
encJFromJValue <$> liftEitherM fetchCatalogState
2021-01-07 12:04:22 +03:00
runSetCatalogState ::
2023-02-03 04:03:23 +03:00
( MonadMetadataStorageQueryAPI m , MonadError QErr m ) => SetCatalogState -> m EncJSON
2021-01-07 12:04:22 +03:00
runSetCatalogState SetCatalogState { .. } = do
2023-02-03 04:03:23 +03:00
liftEitherM $ updateCatalogState _scsType _scsState
2021-01-07 12:04:22 +03:00
pure successMsg
2021-02-11 20:54:25 +03:00
runSetMetricsConfig ::
( MonadIO m , CacheRWM m , MetadataM m , MonadError QErr m ) =>
MetricsConfig ->
m EncJSON
runSetMetricsConfig mc = do
withNewInconsistentObjsCheck $
buildSchemaCache $
MetadataModifier $
metaMetricsConfig .~ mc
pure successMsg
runRemoveMetricsConfig ::
( MonadIO m , CacheRWM m , MetadataM m , MonadError QErr m ) =>
m EncJSON
runRemoveMetricsConfig = do
withNewInconsistentObjsCheck $
buildSchemaCache $
MetadataModifier $
metaMetricsConfig .~ emptyMetricsConfig
pure successMsg
2021-09-16 14:03:01 +03:00
2021-12-09 10:58:41 +03:00
data TestTransformError
2022-03-11 02:22:54 +03:00
= RequestInitializationError HTTP . HttpException
2021-12-09 10:58:41 +03:00
| RequestTransformationError HTTP . Request TransformErrorBundle
2021-09-29 11:13:30 +03:00
runTestWebhookTransform ::
2021-12-09 10:58:41 +03:00
( QErrM m ) =>
2021-09-29 11:13:30 +03:00
TestWebhookTransform ->
2021-09-16 14:03:01 +03:00
m EncJSON
2022-03-08 03:42:06 +03:00
runTestWebhookTransform ( TestWebhookTransform env headers urlE payload rt _ sv ) = do
2021-12-03 10:12:43 +03:00
url <- case urlE of
2022-03-02 01:54:47 +03:00
URL url' -> interpolateFromEnv env url'
2021-12-09 10:58:41 +03:00
EnvVar var ->
let err = throwError $ err400 NotFound " Missing Env Var "
in maybe err ( pure . T . pack ) $ Env . lookupEnv env var
2022-03-02 01:54:47 +03:00
headers' <- traverse ( traverse ( fmap TE . encodeUtf8 . interpolateFromEnv env . TE . decodeUtf8 ) ) headers
2021-12-09 10:58:41 +03:00
result <- runExceptT $ do
2022-03-11 02:22:54 +03:00
initReq <- hoistEither $ first RequestInitializationError $ HTTP . mkRequestEither url
2021-12-09 10:58:41 +03:00
2022-03-02 01:54:47 +03:00
let req = initReq & HTTP . body .~ pure ( J . encode payload ) & HTTP . headers .~ headers'
2022-03-08 03:42:06 +03:00
reqTransform = requestFields rt
engine = templateEngine rt
2022-10-16 06:53:13 +03:00
reqTransformCtx = fmap mkRequestContext $ mkReqTransformCtx url sv engine
2022-01-19 07:46:42 +03:00
hoistEither $ first ( RequestTransformationError req ) $ applyRequestTransform reqTransformCtx reqTransform req
2021-12-09 10:58:41 +03:00
case result of
2021-09-29 11:13:30 +03:00
Right transformed ->
2022-11-16 19:36:05 +03:00
packTransformResult $ Right transformed
Left ( RequestTransformationError _ err ) -> packTransformResult ( Left err )
2022-03-11 02:22:54 +03:00
-- NOTE: In the following case we have failed before producing a valid request.
Left ( RequestInitializationError err ) ->
let errorBundle =
TransformErrorBundle $
pure $
J . object [ " error_code " J ..= J . String " Request Initialization Error " , " message " J ..= J . String ( tshow err ) ]
2022-11-16 19:36:05 +03:00
in throw400WithDetail ValidationFailed " request transform validation failed " $ J . toJSON errorBundle
2021-12-09 10:58:41 +03:00
2022-03-02 01:54:47 +03:00
interpolateFromEnv :: MonadError QErr m => Env . Environment -> Text -> m Text
interpolateFromEnv env url =
case AT . parseOnly parseEnvTemplate url of
Left _ -> throwError $ err400 ParseFailed " Invalid Url Template "
Right xs ->
let lookup' var = maybe ( Left var ) ( Right . T . pack ) $ Env . lookupEnv env ( T . unpack var )
result = traverse ( fmap indistinct . bitraverse lookup' pure ) xs
2022-06-14 19:24:35 +03:00
err e =
throwError $
err400 NotFound $
" Missing Env Var: "
<> e
<> " . For security reasons when testing request options real environment variable values are not available. Please enter a mock value for "
<> e
<> " in the Sample Env Variables list. See https://hasura.io/docs/latest/graphql/core/actions/rest-connectors/#action-transforms-sample-context "
2022-03-02 01:54:47 +03:00
in either err ( pure . fold ) result
2022-03-11 02:22:54 +03:00
-- | Deserialize a JSON or X-WWW-URL-FORMENCODED body from an
-- 'HTTP.Request' as 'J.Value'.
2022-03-02 22:42:21 +03:00
decodeBody :: Maybe BL . ByteString -> J . Value
decodeBody Nothing = J . Null
decodeBody ( Just bs ) = fromMaybe J . Null $ jsonToValue bs <|> formUrlEncodedToValue bs
-- | Attempt to encode a 'ByteString' as an Aeson 'Value'
jsonToValue :: BL . ByteString -> Maybe J . Value
jsonToValue bs = J . decode bs
-- | Quote a 'ByteString' then attempt to encode it as a JSON
-- String. This is necessary for 'x-www-url-formencoded' bodies. They
-- are a list of key/value pairs encoded as a raw 'ByteString' with no
-- quoting whereas JSON Strings must be quoted.
formUrlEncodedToValue :: BL . ByteString -> Maybe J . Value
formUrlEncodedToValue bs = J . decode ( " \ " " <> bs <> " \ " " )
2022-02-03 17:56:24 +03:00
parseEnvTemplate :: AT . Parser [ Either T . Text T . Text ]
parseEnvTemplate = AT . many1 $ pEnv <|> pLit <|> fmap Right " { "
where
pEnv = fmap ( Left ) $ " {{ " *> AT . takeWhile1 ( /= '}' ) <* " }} "
pLit = fmap Right $ AT . takeWhile1 ( /= '{' )
indistinct :: Either a a -> a
indistinct = either id id
2022-11-16 19:36:05 +03:00
packTransformResult :: ( MonadError QErr m ) => Either TransformErrorBundle HTTP . Request -> m EncJSON
2022-03-11 02:22:54 +03:00
packTransformResult = \ case
Right req ->
2022-11-16 19:36:05 +03:00
pure . encJFromJValue $
2022-03-11 02:22:54 +03:00
J . object
[ " webhook_url " J ..= ( req ^. HTTP . url ) ,
" method " J ..= ( req ^. HTTP . method ) ,
" headers " J ..= ( first CI . foldedCase <$> ( req ^. HTTP . headers ) ) ,
" body " J ..= decodeBody ( req ^. HTTP . body )
]
2022-11-16 19:36:05 +03:00
Left err -> throw400WithDetail ValidationFailed " request transform validation failed " $ J . toJSON err