2018-06-27 16:11:32 +03:00
module Hasura.RQL.DDL.Metadata
2021-09-24 01:56:37 +03:00
( runReplaceMetadata ,
runReplaceMetadataV2 ,
runExportMetadata ,
runExportMetadataV2 ,
runClearMetadata ,
runReloadMetadata ,
runDumpInternalState ,
runGetInconsistentMetadata ,
runDropInconsistentMetadata ,
runGetCatalogState ,
runSetCatalogState ,
runValidateWebhookTransform ,
runSetMetricsConfig ,
runRemoveMetricsConfig ,
module Hasura.RQL.DDL.Metadata.Types ,
)
where
import Control.Lens ( ( .~ ) , ( ^. ) , ( ^? ) )
import Data.Aeson
import Data.Aeson.Ordered qualified as AO
import Data.Bifunctor ( bimap )
import Data.CaseInsensitive qualified as CI
import Data.Has ( Has , getter )
import Data.HashMap.Strict qualified as Map
import Data.HashMap.Strict.InsOrd.Extended qualified as OMap
import Data.HashSet qualified as HS
import Data.List qualified as L
import Data.TByteString qualified as TBS
import Data.Text.Extended ( ( <<> ) )
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.Logging qualified as HL
import Hasura.Metadata.Class
import Hasura.Prelude
import Hasura.RQL.DDL.Action
import Hasura.RQL.DDL.ComputedField
import Hasura.RQL.DDL.CustomTypes
import Hasura.RQL.DDL.Endpoint
import Hasura.RQL.DDL.EventTrigger
import Hasura.RQL.DDL.InheritedRoles
import Hasura.RQL.DDL.Metadata.Types
import Hasura.RQL.DDL.Network
import Hasura.RQL.DDL.Permission
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.RemoteRelationship
import Hasura.RQL.DDL.RemoteSchema
import Hasura.RQL.DDL.RequestTransform
import Hasura.RQL.DDL.ScheduledTrigger
import Hasura.RQL.DDL.Schema
import Hasura.RQL.Types
import Hasura.RQL.Types.Eventing.Backend ( BackendEventTrigger ( .. ) )
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.Server.Types ( ExperimentalFeature ( .. ) )
import Network.HTTP.Client.Transformable qualified as HTTP
runClearMetadata ::
( MonadIO m ,
CacheRWM m ,
MetadataM m ,
HasServerConfigCtx m ,
MonadMetadataStorageQueryAPI m ,
MonadReader r m ,
Has ( HL . Logger HL . Hasura ) r
) =>
ClearMetadata ->
m EncJSON
2018-12-13 10:26:15 +03:00
runClearMetadata _ = do
2020-12-28 15:56:00 +03:00
metadata <- getMetadata
-- We can infer whether the server is started with `--database-url` option
-- (or corresponding env variable) by checking the existence of @'defaultSource'
-- in current metadata.
2021-09-24 01:56:37 +03:00
let maybeDefaultSourceMetadata = metadata ^? metaSources . ix defaultSource
2020-12-28 15:56:00 +03:00
emptyMetadata' = case maybeDefaultSourceMetadata of
2021-09-24 01:56:37 +03:00
Nothing -> emptyMetadata
Just exists ->
-- If default postgres source is defined, we need to set metadata
-- which contains only default source without any tables and functions.
let emptyDefaultSource =
AB . dispatchAnyBackend @ Backend exists \ ( s :: SourceMetadata b ) ->
AB . mkAnyBackend @ b $
SourceMetadata @ b defaultSource mempty mempty ( _smConfiguration @ b s ) Nothing
in emptyMetadata
& metaSources %~ OMap . insert defaultSource emptyDefaultSource
2021-02-16 11:08:19 +03:00
runReplaceMetadataV1 $ RMWithSources emptyMetadata'
2020-12-08 17:22:31 +03:00
2021-09-09 14:54:19 +03:00
{- Note [Cleanup for dropped triggers]
2020-12-08 17:22:31 +03:00
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There was an issue ( https :// github . com / hasura / graphql - engine / issues / 5461 )
fixed ( via https :// github . com / hasura / graphql - engine / pull / 6137 ) related to
event triggers while replacing metadata in the catalog prior to metadata
separation . The metadata separation solves the issue naturally , since the
'hdb_catalog . event_triggers' table is no more in use and new / updated event
triggers are processed in building schema cache . But we need to drop the
2021-09-09 14:54:19 +03:00
database trigger and archive events for dropped event triggers . This is handled
2020-12-08 17:22:31 +03:00
explicitly in @ 'runReplaceMetadata' function .
- }
2018-06-27 16:11:32 +03:00
2021-07-29 11:29:12 +03:00
-- | Replace the 'current metadata' with the 'new metadata'
-- The 'new metadata' might come via the 'Import Metadata' in console
2021-09-24 01:56:37 +03:00
runReplaceMetadata ::
( CacheRWM m ,
MetadataM m ,
MonadIO m ,
MonadMetadataStorageQueryAPI m ,
HasServerConfigCtx m ,
MonadReader r m ,
Has ( HL . Logger HL . Hasura ) r
) =>
ReplaceMetadata ->
m EncJSON
2021-02-16 11:08:19 +03:00
runReplaceMetadata = \ case
RMReplaceMetadataV1 v1args -> runReplaceMetadataV1 v1args
RMReplaceMetadataV2 v2args -> runReplaceMetadataV2 v2args
2021-09-24 01:56:37 +03:00
runReplaceMetadataV1 ::
( QErrM m ,
CacheRWM m ,
MetadataM m ,
MonadIO m ,
MonadMetadataStorageQueryAPI m ,
HasServerConfigCtx m ,
MonadReader r m ,
Has ( HL . Logger HL . Hasura ) r
) =>
ReplaceMetadataV1 ->
m EncJSON
2021-02-16 11:08:19 +03:00
runReplaceMetadataV1 =
( successMsg <$ ) . runReplaceMetadataV2 . ReplaceMetadataV2 NoAllowInconsistentMetadata
2021-09-24 01:56:37 +03:00
runReplaceMetadataV2 ::
forall m r .
( QErrM m ,
CacheRWM m ,
MetadataM m ,
MonadIO m ,
HasServerConfigCtx m ,
MonadMetadataStorageQueryAPI m ,
MonadReader r m ,
Has ( HL . Logger HL . Hasura ) r
) =>
ReplaceMetadataV2 ->
m EncJSON
runReplaceMetadataV2 ReplaceMetadataV2 { .. } = do
2021-09-09 14:54:19 +03:00
logger :: ( HL . Logger HL . Hasura ) <- asks getter
2021-05-26 19:19:26 +03:00
-- we drop all the future cron trigger events before inserting the new metadata
-- and re-populating future cron events below
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
experimentalFeatures <- _sccExperimentalFeatures <$> askServerConfigCtx
let inheritedRoles =
case _rmv2Metadata of
2021-09-24 01:56:37 +03:00
RMWithSources Metadata { _metaInheritedRoles } -> _metaInheritedRoles
RMWithoutSources _ -> mempty
2021-05-05 15:25:27 +03:00
introspectionDisabledRoles =
case _rmv2Metadata of
2021-09-24 01:56:37 +03:00
RMWithSources m -> _metaSetGraphqlIntrospectionOptions m
2021-05-05 15:25:27 +03:00
RMWithoutSources _ -> mempty
2021-07-23 02:06:10 +03:00
when ( inheritedRoles /= mempty && EFInheritedRoles ` notElem ` experimentalFeatures ) $
throw400 ConstraintViolation " inherited_roles can only be added when it's enabled in the experimental features "
2021-05-26 19:19:26 +03:00
2020-12-08 17:22:31 +03:00
oldMetadata <- getMetadata
2021-08-04 17:51:20 +03:00
( cronTriggersMetadata , cronTriggersToBeAdded ) <- processCronTriggers oldMetadata
2021-02-16 11:08:19 +03:00
metadata <- case _rmv2Metadata of
2021-09-24 01:56:37 +03:00
RMWithSources m -> pure $ m { _metaCronTriggers = cronTriggersMetadata }
RMWithoutSources MetadataNoSources { .. } -> do
let maybeDefaultSourceMetadata = oldMetadata ^? metaSources . ix defaultSource . toSourceMetadata
defaultSourceMetadata <-
onNothing maybeDefaultSourceMetadata $
throw400 NotSupported " cannot import metadata without sources since no default source is defined "
let newDefaultSourceMetadata =
AB . mkAnyBackend
defaultSourceMetadata
{ _smTables = _mnsTables ,
_smFunctions = _mnsFunctions
}
pure $
Metadata
( OMap . singleton defaultSource newDefaultSourceMetadata )
_mnsRemoteSchemas
_mnsQueryCollections
_mnsAllowlist
_mnsCustomTypes
_mnsActions
cronTriggersMetadata
( _metaRestEndpoints oldMetadata )
emptyApiLimit
emptyMetricsConfig
mempty
introspectionDisabledRoles
emptyNetwork
2020-12-08 17:22:31 +03:00
putMetadata metadata
2021-02-16 11:08:19 +03:00
case _rmv2AllowInconsistentMetadata of
AllowInconsistentMetadata ->
buildSchemaCache noMetadataModify
NoAllowInconsistentMetadata ->
buildSchemaCacheStrict
2021-05-26 19:19:26 +03:00
-- populate future cron events for all the new cron triggers that are imported
2021-08-04 17:51:20 +03:00
for_ cronTriggersToBeAdded $ \ CronTriggerMetadata { .. } ->
2021-05-26 19:19:26 +03:00
populateInitialCronTriggerEvents ctSchedule ctName
2021-09-09 14:54:19 +03:00
-- See Note [Cleanup for dropped triggers]
dropSourceSQLTriggers logger ( _metaSources oldMetadata ) ( _metaSources metadata )
2019-01-25 06:31:54 +03:00
2021-07-23 02:06:10 +03:00
encJFromJValue . formatInconsistentObjs . scInconsistentObjs <$> askSchemaCache
2021-02-23 20:37:27 +03:00
where
2021-08-04 17:51:20 +03:00
{- Note [Cron triggers behaviour with replace metadata]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When the metadata is replaced , we delete only the cron triggers
that were deleted , instead of deleting all the old cron triggers ( which
existed in the metadata before it was replaced ) and inserting all the
new cron triggers . This is done this way , because when a cron trigger is
dropped , the cron events associated with it will also be dropped from the DB
and when a new cron trigger is added , new cron events are generated by the
graphql - engine . So , this way we only delete and insert the data which has been changed .
The cron triggers that were deleted is calculated by getting a diff
of the old cron triggers and the new cron triggers . Note that we don't just
check the name of the trigger to calculate the diff , the whole cron trigger
definition is considered in the calculation .
Note : Only cron triggers with ` include_in_metadata ` set to ` true ` can be updated / deleted
via the replace metadata API . Cron triggers with ` include_in_metadata ` can only be modified
via the ` create_cron_trigger ` and ` delete_cron_trigger ` APIs .
- }
processCronTriggers oldMetadata = do
let ( oldCronTriggersIncludedInMetadata , oldCronTriggersNotIncludedInMetadata ) =
OMap . partition ctIncludeInMetadata ( _metaCronTriggers oldMetadata )
allNewCronTriggers =
case _rmv2Metadata of
RMWithoutSources m -> _mnsCronTriggers m
2021-09-24 01:56:37 +03:00
RMWithSources m -> _metaCronTriggers m
2021-08-04 17:51:20 +03:00
-- this function is intended to use with `Map.differenceWith`, it's used when two
-- equal keys are encountered, then the values are compared to calculate the diff.
-- see https://hackage.haskell.org/package/unordered-containers-0.2.14.0/docs/Data-HashMap-Internal.html#v:differenceWith
leftIfDifferent l r
2021-09-24 01:56:37 +03:00
| l == r = Nothing
2021-08-04 17:51:20 +03:00
| otherwise = Just l
2021-09-24 01:56:37 +03:00
cronTriggersToBeAdded =
Map . differenceWith
leftIfDifferent
( OMap . toHashMap allNewCronTriggers )
( OMap . toHashMap oldCronTriggersIncludedInMetadata )
cronTriggersToBeDropped =
Map . differenceWith
leftIfDifferent
( OMap . toHashMap oldCronTriggersIncludedInMetadata )
( OMap . toHashMap allNewCronTriggers )
2021-08-04 17:51:20 +03:00
dropFutureCronEvents $ MetadataCronTriggers $ Map . keys cronTriggersToBeDropped
cronTriggers <- do
-- traverse over the new cron triggers and check if any of them
-- already exists as a cron trigger with "included_in_metadata: false"
for_ allNewCronTriggers $ \ ct ->
when ( ctName ct ` OMap . member ` oldCronTriggersNotIncludedInMetadata ) $
throw400 AlreadyExists $
2021-09-24 01:56:37 +03:00
" cron trigger with name "
<> ctName ct
<<> " already exists as a cron trigger with \ " included_in_metadata \ " as false "
2021-08-04 17:51:20 +03:00
-- we add the old cron triggers with included_in_metadata set to false with the
-- newly added cron triggers
pure $ allNewCronTriggers <> oldCronTriggersNotIncludedInMetadata
pure $ ( cronTriggers , cronTriggersToBeAdded )
2021-09-24 01:56:37 +03:00
dropSourceSQLTriggers ::
HL . Logger HL . Hasura ->
InsOrdHashMap SourceName BackendSourceMetadata ->
InsOrdHashMap SourceName BackendSourceMetadata ->
m ()
2021-09-09 14:54:19 +03:00
dropSourceSQLTriggers ( HL . Logger logger ) oldSources newSources = do
-- NOTE: the current implementation of this function has an edge case.
-- The edge case is that when a `SourceA` which contained some event triggers
-- is modified to point to a new database, this function will try to drop the
-- SQL triggers of the dropped event triggers on the new database which doesn't exist.
-- In the current implementation, this doesn't throw an error because the trigger is dropped
-- using `DROP IF EXISTS..` meaning this silently fails without throwing an error.
for_ ( OMap . toList newSources ) $ \ ( source , newBackendSourceMetadata ) -> do
onJust ( OMap . lookup source oldSources ) $ \ oldBackendSourceMetadata ->
compose source newBackendSourceMetadata oldBackendSourceMetadata \ ( newSourceMetadata :: SourceMetadata b ) -> do
dispatch oldBackendSourceMetadata \ oldSourceMetadata -> do
let oldTriggersMap = getTriggersMap oldSourceMetadata
newTriggersMap = getTriggersMap newSourceMetadata
droppedTriggers = OMap . keys $ oldTriggersMap ` OMap . difference ` newTriggersMap
2021-09-24 01:56:37 +03:00
catcher e @ QErr { qeCode }
2021-09-09 14:54:19 +03:00
| qeCode == Unexpected = pure () -- NOTE: This information should be returned by the inconsistent_metadata response, so doesn't need additional logging.
| otherwise = throwError e -- rethrow other errors
-- This will swallow Unexpected exceptions for sources if allow_inconsistent_metadata is enabled
-- This should be ok since if the sources are already missing from the cache then they should
-- not need to be removed.
--
-- TODO: Determine if any errors should be thrown from askSourceConfig at all if the errors are just being discarded
return $
flip catchError catcher do
sourceConfig <- askSourceConfig @ b source
for_ droppedTriggers $ dropTriggerAndArchiveEvents @ b sourceConfig
2021-02-23 20:37:27 +03:00
where
2021-09-09 14:54:19 +03:00
getTriggersMap = OMap . unions . map _tmEventTriggers . OMap . elems . _smTables
dispatch = AB . dispatchAnyBackend @ BackendEventTrigger
2021-09-24 01:56:37 +03:00
compose ::
SourceName ->
AB . AnyBackend i ->
AB . AnyBackend i ->
( forall b . BackendEventTrigger b => i b -> i b -> m () ) ->
m ()
2021-09-09 14:54:19 +03:00
compose sourceName x y f = AB . composeAnyBackend @ BackendEventTrigger f x y ( logger $ HL . UnstructuredLog HL . LevelInfo $ TBS . fromText $ " Event trigger clean up couldn't be done on the source " <> sourceName <<> " because it has changed its type " )
2021-01-29 08:48:17 +03:00
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
processExperimentalFeatures :: HasServerConfigCtx m => Metadata -> m Metadata
processExperimentalFeatures metadata = do
experimentalFeatures <- _sccExperimentalFeatures <$> askServerConfigCtx
let isInheritedRolesSet = EFInheritedRoles ` elem ` experimentalFeatures
-- export inherited roles only when inherited_roles is set in the experimental features
2021-09-24 01:56:37 +03:00
pure $ bool ( metadata { _metaInheritedRoles = mempty } ) metadata isInheritedRolesSet
2021-02-19 05:39:30 +03:00
2021-05-26 19:19:26 +03:00
-- | Only includes the cron triggers with `included_in_metadata` set to `True`
processCronTriggersMetadata :: Metadata -> Metadata
processCronTriggersMetadata metadata =
let cronTriggersIncludedInMetadata = OMap . filter ctIncludeInMetadata $ _metaCronTriggers metadata
2021-09-24 01:56:37 +03:00
in metadata { _metaCronTriggers = cronTriggersIncludedInMetadata }
2021-05-26 19:19:26 +03:00
processMetadata :: HasServerConfigCtx m => Metadata -> m Metadata
processMetadata metadata =
processCronTriggersMetadata <$> processExperimentalFeatures metadata
2021-09-24 01:56:37 +03:00
runExportMetadata ::
forall m .
( QErrM m , MetadataM m , HasServerConfigCtx m ) =>
ExportMetadata ->
m EncJSON
runExportMetadata ExportMetadata { } =
2021-08-06 16:39:00 +03:00
encJFromOrderedValue . metadataToOrdJSON <$> ( getMetadata >>= processMetadata )
2021-02-19 05:39:30 +03:00
2021-09-24 01:56:37 +03:00
runExportMetadataV2 ::
forall m .
( QErrM m , MetadataM m , HasServerConfigCtx m ) =>
MetadataResourceVersion ->
ExportMetadata ->
m EncJSON
runExportMetadataV2 currentResourceVersion ExportMetadata { } = do
2021-09-16 18:07:43 +03:00
exportMetadata <- processMetadata =<< getMetadata
2021-09-24 01:56:37 +03:00
pure $
encJFromOrderedValue $
AO . object
[ ( " resource_version " , AO . toOrdered currentResourceVersion ) ,
( " metadata " , metadataToOrdJSON exportMetadata )
]
2018-09-05 18:25:30 +03:00
2020-12-08 17:22:31 +03:00
runReloadMetadata :: ( QErrM m , CacheRWM m , MetadataM m ) => ReloadMetadata -> m EncJSON
2021-01-07 12:04:22 +03:00
runReloadMetadata ( ReloadMetadata reloadRemoteSchemas reloadSources ) = do
2021-07-13 10:56:32 +03:00
metadata <- getMetadata
let allSources = HS . fromList $ OMap . keys $ _metaSources metadata
allRemoteSchemas = HS . fromList $ OMap . keys $ _metaRemoteSchemas metadata
checkRemoteSchema name =
2021-09-24 01:56:37 +03:00
unless ( HS . member name allRemoteSchemas ) $
throw400 NotExists $
" Remote schema with name " <> name <<> " not found in metadata "
2021-07-13 10:56:32 +03:00
checkSource name =
2021-09-24 01:56:37 +03:00
unless ( HS . member name allSources ) $
throw400 NotExists $
" Source with name " <> name <<> " not found in metadata "
2021-07-13 10:56:32 +03:00
remoteSchemaInvalidations <- case reloadRemoteSchemas of
2021-09-24 01:56:37 +03:00
RSReloadAll -> pure allRemoteSchemas
2021-07-13 10:56:32 +03:00
RSReloadList l -> mapM_ checkRemoteSchema l *> pure l
pgSourcesInvalidations <- case reloadSources of
2021-09-24 01:56:37 +03:00
RSReloadAll -> pure allSources
2021-07-13 10:56:32 +03:00
RSReloadList l -> mapM_ checkSource l *> pure l
2021-09-24 01:56:37 +03:00
let cacheInvalidations =
CacheInvalidations
{ ciMetadata = True ,
ciRemoteSchemas = remoteSchemaInvalidations ,
ciSources = pgSourcesInvalidations
}
2021-07-13 10:56:32 +03:00
2020-12-08 17:22:31 +03:00
buildSchemaCacheWithOptions CatalogUpdate cacheInvalidations metadata
2020-03-26 14:52:20 +03:00
pure successMsg
2018-09-05 18:25:30 +03:00
2021-09-24 01:56:37 +03:00
runDumpInternalState ::
( QErrM m , CacheRM m ) =>
DumpInternalState ->
m EncJSON
2019-11-26 15:14:21 +03:00
runDumpInternalState _ =
2019-03-18 19:22:21 +03:00
encJFromJValue <$> askSchemaCache
2019-04-17 19:29:39 +03:00
2021-09-24 01:56:37 +03:00
runGetInconsistentMetadata ::
( QErrM m , CacheRM m ) =>
GetInconsistentMetadata ->
m EncJSON
2019-04-17 19:29:39 +03:00
runGetInconsistentMetadata _ = do
inconsObjs <- scInconsistentObjs <$> askSchemaCache
2021-02-16 11:08:19 +03:00
return $ encJFromJValue $ formatInconsistentObjs inconsObjs
formatInconsistentObjs :: [ InconsistentMetadata ] -> Value
2021-09-24 01:56:37 +03:00
formatInconsistentObjs inconsObjs =
object
[ " is_consistent " .= null inconsObjs ,
" inconsistent_objects " .= inconsObjs
]
runDropInconsistentMetadata ::
( QErrM m , CacheRWM m , MetadataM m ) =>
DropInconsistentMetadata ->
m EncJSON
2019-04-17 19:29:39 +03:00
runDropInconsistentMetadata _ = do
sc <- askSchemaCache
2019-11-27 01:49:42 +03:00
let inconsSchObjs = L . nub . concatMap imObjectIds $ scInconsistentObjs sc
2019-11-20 21:21:30 +03:00
-- Note: when building the schema cache, we try to put dependents after their dependencies in the
-- list of inconsistent objects, so reverse the list to start with dependents first. This is not
-- perfect — a completely accurate solution would require performing a topological sort — but it
-- seems to work well enough for now.
2020-12-08 17:22:31 +03:00
metadataModifier <- execWriterT $ mapM_ ( tell . purgeMetadataObj ) ( reverse inconsSchObjs )
metadata <- getMetadata
2021-03-15 16:02:58 +03:00
putMetadata $ unMetadataModifier metadataModifier metadata
2021-09-14 15:02:13 +03:00
buildSchemaCache noMetadataModify
-- after building the schema cache, we need to check the inconsistent metadata, if any
-- are only those which are not droppable
newInconsistentObjects <- scInconsistentObjs <$> askSchemaCache
let droppableInconsistentObjects = filter droppableInconsistentMetadata newInconsistentObjects
unless ( null droppableInconsistentObjects ) $
2021-09-24 01:56:37 +03:00
throwError
( err400 Unexpected " cannot continue due to new inconsistent metadata " )
{ qeInternal = Just $ ExtraInternal $ toJSON newInconsistentObjects
}
2019-04-17 19:29:39 +03:00
return successMsg
2020-12-08 17:22:31 +03:00
purgeMetadataObj :: MetadataObjId -> MetadataModifier
purgeMetadataObj = \ case
2021-09-24 01:56:37 +03:00
MOSource source -> MetadataModifier $ metaSources %~ OMap . delete source
MOSourceObjId source exists -> AB . dispatchAnyBackend @ BackendMetadata exists $ handleSourceObj source
MORemoteSchema rsn -> dropRemoteSchemaInMetadata rsn
2021-03-15 16:02:58 +03:00
MORemoteSchemaPermissions rsName role -> dropRemoteSchemaPermissionInMetadata rsName role
2021-09-24 01:56:37 +03:00
MOCustomTypes -> clearCustomTypesInMetadata
MOAction action -> dropActionInMetadata action -- Nothing
MOActionPermission action role -> dropActionPermissionInMetadata action role
MOCronTrigger ctName -> dropCronTriggerInMetadata ctName
MOEndpoint epName -> dropEndpointInMetadata epName
MOInheritedRole role -> dropInheritedRoleInMetadata role
MOHostTlsAllowlist host -> dropHostFromAllowList host
2021-03-15 16:02:58 +03:00
where
2021-04-22 00:44:37 +03:00
handleSourceObj :: forall b . BackendMetadata b => SourceName -> SourceMetadataObjId b -> MetadataModifier
2021-03-15 16:02:58 +03:00
handleSourceObj source = \ case
2021-09-24 01:56:37 +03:00
SMOTable qt -> dropTableInMetadata @ b source qt
SMOFunction qf -> dropFunctionInMetadata @ b source qf
2021-04-22 00:44:37 +03:00
SMOFunctionPermission qf rn -> dropFunctionPermissionInMetadata @ b source qf rn
2021-09-24 01:56:37 +03:00
SMOTableObj qt tableObj ->
MetadataModifier $
tableMetadataSetter @ b source qt %~ case tableObj of
MTORel rn _ -> dropRelationshipInMetadata rn
MTOPerm rn pt -> dropPermissionInMetadata rn pt
MTOTrigger trn -> dropEventTriggerInMetadata trn
MTOComputedField ccn -> dropComputedFieldInMetadata ccn
2021-02-14 09:07:52 +03:00
MTORemoteRelationship rn -> dropRemoteRelationshipInMetadata rn
2021-01-07 12:04:22 +03:00
2021-09-24 01:56:37 +03:00
runGetCatalogState ::
( MonadMetadataStorageQueryAPI m ) => GetCatalogState -> m EncJSON
2021-01-07 12:04:22 +03:00
runGetCatalogState _ =
encJFromJValue <$> fetchCatalogState
2021-09-24 01:56:37 +03:00
runSetCatalogState ::
( MonadMetadataStorageQueryAPI m ) => SetCatalogState -> m EncJSON
runSetCatalogState SetCatalogState { .. } = do
2021-01-07 12:04:22 +03:00
updateCatalogState _scsType _scsState
pure successMsg
2021-02-11 20:54:25 +03:00
2021-09-24 01:56:37 +03:00
runSetMetricsConfig ::
( MonadIO m , CacheRWM m , MetadataM m , MonadError QErr m ) =>
MetricsConfig ->
m EncJSON
2021-02-11 20:54:25 +03:00
runSetMetricsConfig mc = do
2021-09-24 01:56:37 +03:00
withNewInconsistentObjsCheck $
buildSchemaCache $
MetadataModifier $
metaMetricsConfig .~ mc
2021-02-11 20:54:25 +03:00
pure successMsg
2021-09-24 01:56:37 +03:00
runRemoveMetricsConfig ::
( MonadIO m , CacheRWM m , MetadataM m , MonadError QErr m ) =>
m EncJSON
2021-02-11 20:54:25 +03:00
runRemoveMetricsConfig = do
2021-09-24 01:56:37 +03:00
withNewInconsistentObjsCheck $
buildSchemaCache $
MetadataModifier $
metaMetricsConfig .~ emptyMetricsConfig
2021-02-11 20:54:25 +03:00
pure successMsg
2021-09-16 14:03:01 +03:00
2021-09-24 01:56:37 +03:00
runValidateWebhookTransform ::
forall m .
( QErrM m ,
MonadIO m
) =>
ValidateWebhookTransform ->
m EncJSON
2021-09-16 14:03:01 +03:00
runValidateWebhookTransform ( ValidateWebhookTransform url payload mt ) = do
initReq <- liftIO $ HTTP . mkRequestThrow url
let req = initReq & HTTP . body .~ pure ( encode payload )
dataTransform = mkRequestTransformDebug mt
-- TODO(Solomon) Add SessionVariables
transformed = applyRequestTransform dataTransform req Nothing
payload' = decode @ Value =<< ( transformed ^. HTTP . body )
headers' = bimap CI . foldedCase id <$> ( transformed ^. HTTP . headers )
2021-09-24 01:56:37 +03:00
pure $
encJFromJValue $
object
[ " webhook_url " .= ( transformed ^. HTTP . url ) ,
" method " .= ( transformed ^. HTTP . method ) ,
" headers " .= headers' ,
" payload " .= payload'
]