graphql-engine/server/src-lib/Hasura/Server/API/Metadata.hs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

505 lines
21 KiB
Haskell
Raw Normal View History

{-# LANGUAGE ViewPatterns #-}
-- | The RQL metadata query ('/v1/metadata')
module Hasura.Server.API.Metadata
( RQLMetadata,
RQLMetadataV1 (..),
runMetadataQuery,
)
where
import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Aeson
import Data.Aeson.Casing
import Data.Aeson.Types qualified as A
import Data.Environment qualified as Env
import Data.Has (Has)
import Data.Text qualified as T
import Data.Text.Extended qualified as T
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.Logging qualified as L
import Hasura.Metadata.Class
import Hasura.Prelude
import Hasura.RQL.DDL.Action
import Hasura.RQL.DDL.ApiLimit
import Hasura.RQL.DDL.ComputedField
import Hasura.RQL.DDL.CustomTypes
import Hasura.RQL.DDL.Endpoint
import Hasura.RQL.DDL.EventTrigger
import Hasura.RQL.DDL.GraphqlSchemaIntrospection
import Hasura.RQL.DDL.InheritedRoles
import Hasura.RQL.DDL.Metadata
import Hasura.RQL.DDL.Network
import Hasura.RQL.DDL.Permission
import Hasura.RQL.DDL.QueryCollection
import Hasura.RQL.DDL.QueryTags
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.Relationship.Rename
import Hasura.RQL.DDL.RemoteRelationship
import Hasura.RQL.DDL.RemoteSchema
import Hasura.RQL.DDL.ScheduledTrigger
import Hasura.RQL.DDL.Schema
import Hasura.RQL.DDL.Schema.Source
import Hasura.RQL.Types
import Hasura.RQL.Types.Eventing.Backend
import Hasura.RQL.Types.Run
import Hasura.SQL.AnyBackend
import Hasura.SQL.Tag
import Hasura.Server.API.Backend
import Hasura.Server.API.Instances ()
import Hasura.Server.Types (InstanceId (..), MaintenanceMode (..))
import Hasura.Server.Utils (APIVersion (..))
import Hasura.Session
import Hasura.Tracing qualified as Tracing
import Network.HTTP.Client.Manager qualified as HTTP
data RQLMetadataV1
= -- Sources
RMAddSource !(AnyBackend AddSource)
| RMDropSource DropSource
| RMRenameSource !RenameSource
| -- Tables
RMTrackTable !(AnyBackend TrackTableV2)
| RMUntrackTable !(AnyBackend UntrackTable)
| RMSetTableCustomization !(AnyBackend SetTableCustomization)
| -- Tables (PG-specific)
RMPgSetTableIsEnum !SetTableIsEnum
| -- Tables permissions
RMCreateInsertPermission !(AnyBackend (CreatePerm InsPerm))
| RMCreateSelectPermission !(AnyBackend (CreatePerm SelPerm))
| RMCreateUpdatePermission !(AnyBackend (CreatePerm UpdPerm))
| RMCreateDeletePermission !(AnyBackend (CreatePerm DelPerm))
| RMDropInsertPermission !(AnyBackend (DropPerm InsPerm))
| RMDropSelectPermission !(AnyBackend (DropPerm SelPerm))
| RMDropUpdatePermission !(AnyBackend (DropPerm UpdPerm))
| RMDropDeletePermission !(AnyBackend (DropPerm DelPerm))
| RMSetPermissionComment !(AnyBackend SetPermComment)
| -- Tables relationships
RMCreateObjectRelationship !(AnyBackend CreateObjRel)
| RMCreateArrayRelationship !(AnyBackend CreateArrRel)
| RMDropRelationship !(AnyBackend DropRel)
| RMSetRelationshipComment !(AnyBackend SetRelComment)
| RMRenameRelationship !(AnyBackend RenameRel)
| -- Tables remote relationships
RMCreateRemoteRelationship !(AnyBackend CreateFromSourceRelationship)
| RMUpdateRemoteRelationship !(AnyBackend CreateFromSourceRelationship)
| RMDeleteRemoteRelationship !(DeleteFromSourceRelationship ('Postgres 'Vanilla))
| -- Functions
RMTrackFunction !(AnyBackend TrackFunctionV2)
| RMUntrackFunction !(AnyBackend UnTrackFunction)
| RMSetFunctionCustomization (AnyBackend SetFunctionCustomization)
| -- Functions permissions
RMCreateFunctionPermission !(AnyBackend FunctionPermissionArgument)
| RMDropFunctionPermission !(AnyBackend FunctionPermissionArgument)
| -- Computed fields (PG-specific)
RMAddComputedField !(AddComputedField ('Postgres 'Vanilla))
| RMDropComputedField !(DropComputedField ('Postgres 'Vanilla))
| -- Tables event triggers
RMCreateEventTrigger !(AnyBackend CreateEventTriggerQuery)
| RMDeleteEventTrigger !(AnyBackend DeleteEventTriggerQuery)
| RMRedeliverEvent !(AnyBackend RedeliverEventQuery)
| RMInvokeEventTrigger !(AnyBackend InvokeEventTriggerQuery)
| -- Remote schemas
RMAddRemoteSchema !AddRemoteSchemaQuery
| RMUpdateRemoteSchema !AddRemoteSchemaQuery
| RMRemoveRemoteSchema !RemoteSchemaNameQuery
| RMReloadRemoteSchema !RemoteSchemaNameQuery
| RMIntrospectRemoteSchema !RemoteSchemaNameQuery
| -- Remote schemas permissions
RMAddRemoteSchemaPermissions !AddRemoteSchemaPermission
| RMDropRemoteSchemaPermissions !DropRemoteSchemaPermissions
| -- Scheduled triggers
RMCreateCronTrigger !CreateCronTrigger
| RMDeleteCronTrigger !ScheduledTriggerName
| RMCreateScheduledEvent !CreateScheduledEvent
| RMDeleteScheduledEvent !DeleteScheduledEvent
| RMGetScheduledEvents !GetScheduledEvents
| RMGetEventInvocations !GetEventInvocations
| -- Actions
RMCreateAction !CreateAction
| RMDropAction !DropAction
| RMUpdateAction !UpdateAction
| RMCreateActionPermission !CreateActionPermission
| RMDropActionPermission !DropActionPermission
| -- Query collections, allow list related
RMCreateQueryCollection !CreateCollection
| RMDropQueryCollection !DropCollection
| RMAddQueryToCollection !AddQueryToCollection
| RMDropQueryFromCollection !DropQueryFromCollection
| RMAddCollectionToAllowlist !CollectionReq
| RMDropCollectionFromAllowlist !CollectionReq
| -- Rest endpoints
RMCreateRestEndpoint !CreateEndpoint
| RMDropRestEndpoint !DropEndpoint
| -- Custom types
RMSetCustomTypes !CustomTypes
| -- Api limits
RMSetApiLimits !ApiLimit
server: multitenant metadata storage The metadata storage implementation for graphql-engine-multitenant. - It uses a centralized PG database to store metadata of all tenants (instead of per tenant database) - Similarly, it uses a single schema-sync listener thread per MT worker (instead of listener thread per tenant) (PS: although, the processor thread is spawned per tenant) - 2 new flags are introduced - `--metadataDatabaseUrl` and (optional) `--metadataDatabaseRetries` Internally, a "metadata mode" is introduced to indicate an external/managed store vs a store managed by each pro-server. To run : - obtain the schema file (located at `pro/server/res/cloud/metadata_db_schema.sql`) - apply the schema on a PG database - set the `--metadataDatabaseUrl` flag to point to the above database - run the MT executable The schema (and its migrations) for the metadata db is managed outside the MT worker. ### New metadata The following is the new portion of `Metadata` added : ```yaml version: 3 metrics_config: analyze_query_variables: true analyze_response_body: false api_limits: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` - In Pro, the code around fetching/updating/syncing pro-config is removed - That also means, `hdb_pro_catalog` for keeping the config cache is not required. Hence the `hdb_pro_catalog` is also removed - The required config comes from metadata / schema cache ### New Metadata APIs - `set_api_limits` - `remove_api_limits` - `set_metrics_config` - `remove_metrics_config` #### `set_api_limits` ```yaml type: set_api_limits args: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: anonymous: max_reqs_per_min: 10 unique_params: "ip" editor: max_reqs_per_min: 30 unique_params: - x-hasura-user-id user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` #### `remove_api_limits` ```yaml type: remove_api_limits args: {} ``` #### `set_metrics_config` ```yaml type: set_metrics_config args: analyze_query_variables: true analyze_response_body: false ``` #### `remove_metrics_config` ```yaml type: remove_metrics_config args: {} ``` #### TODO - [x] on-prem pro implementation for `MonadMetadataStorage` - [x] move the project config from Lux to pro metadata (PR: #379) - [ ] console changes for pro config/api limits, subscription workers (cc @soorajshankar @beerose) - [x] address other minor TODOs - [x] TxIso for `MonadSourceResolver` - [x] enable EKG connection pool metrics - [x] add logging of connection info when sources are added? - [x] confirm if the `buildReason` for schema cache is correct - [ ] testing - [x] 1.3 -> 1.4 cloud migration script (#465; PR: #508) - [x] one-time migration of existing metadata from users' db to centralized PG - [x] one-time migration of pro project config + api limits + regression tests from metrics API to metadata - [ ] integrate with infra team (WIP - cc @hgiasac) - [x] benchmark with 1000+ tenants + each tenant making read/update metadata query every second (PR: https://github.com/hasura/graphql-engine-mono/pull/411) - [ ] benchmark with few tenants having large metadata (100+ tables etc.) - [ ] when user moves regions (https://github.com/hasura/lux/issues/1717) - [ ] metadata has to be migrated from one regional PG to another - [ ] migrate metrics data as well ? - [ ] operation logs - [ ] regression test runs - [ ] find a way to share the schema files with the infra team Co-authored-by: Naveen Naidu <30195193+Naveenaidu@users.noreply.github.com> GitOrigin-RevId: 39e8361f2c0e96e0f9e8f8fb45e6cc14857f31f1
2021-02-11 20:54:25 +03:00
| RMRemoveApiLimits
| -- Metrics config
RMSetMetricsConfig !MetricsConfig
server: multitenant metadata storage The metadata storage implementation for graphql-engine-multitenant. - It uses a centralized PG database to store metadata of all tenants (instead of per tenant database) - Similarly, it uses a single schema-sync listener thread per MT worker (instead of listener thread per tenant) (PS: although, the processor thread is spawned per tenant) - 2 new flags are introduced - `--metadataDatabaseUrl` and (optional) `--metadataDatabaseRetries` Internally, a "metadata mode" is introduced to indicate an external/managed store vs a store managed by each pro-server. To run : - obtain the schema file (located at `pro/server/res/cloud/metadata_db_schema.sql`) - apply the schema on a PG database - set the `--metadataDatabaseUrl` flag to point to the above database - run the MT executable The schema (and its migrations) for the metadata db is managed outside the MT worker. ### New metadata The following is the new portion of `Metadata` added : ```yaml version: 3 metrics_config: analyze_query_variables: true analyze_response_body: false api_limits: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` - In Pro, the code around fetching/updating/syncing pro-config is removed - That also means, `hdb_pro_catalog` for keeping the config cache is not required. Hence the `hdb_pro_catalog` is also removed - The required config comes from metadata / schema cache ### New Metadata APIs - `set_api_limits` - `remove_api_limits` - `set_metrics_config` - `remove_metrics_config` #### `set_api_limits` ```yaml type: set_api_limits args: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: anonymous: max_reqs_per_min: 10 unique_params: "ip" editor: max_reqs_per_min: 30 unique_params: - x-hasura-user-id user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` #### `remove_api_limits` ```yaml type: remove_api_limits args: {} ``` #### `set_metrics_config` ```yaml type: set_metrics_config args: analyze_query_variables: true analyze_response_body: false ``` #### `remove_metrics_config` ```yaml type: remove_metrics_config args: {} ``` #### TODO - [x] on-prem pro implementation for `MonadMetadataStorage` - [x] move the project config from Lux to pro metadata (PR: #379) - [ ] console changes for pro config/api limits, subscription workers (cc @soorajshankar @beerose) - [x] address other minor TODOs - [x] TxIso for `MonadSourceResolver` - [x] enable EKG connection pool metrics - [x] add logging of connection info when sources are added? - [x] confirm if the `buildReason` for schema cache is correct - [ ] testing - [x] 1.3 -> 1.4 cloud migration script (#465; PR: #508) - [x] one-time migration of existing metadata from users' db to centralized PG - [x] one-time migration of pro project config + api limits + regression tests from metrics API to metadata - [ ] integrate with infra team (WIP - cc @hgiasac) - [x] benchmark with 1000+ tenants + each tenant making read/update metadata query every second (PR: https://github.com/hasura/graphql-engine-mono/pull/411) - [ ] benchmark with few tenants having large metadata (100+ tables etc.) - [ ] when user moves regions (https://github.com/hasura/lux/issues/1717) - [ ] metadata has to be migrated from one regional PG to another - [ ] migrate metrics data as well ? - [ ] operation logs - [ ] regression test runs - [ ] find a way to share the schema files with the infra team Co-authored-by: Naveen Naidu <30195193+Naveenaidu@users.noreply.github.com> GitOrigin-RevId: 39e8361f2c0e96e0f9e8f8fb45e6cc14857f31f1
2021-02-11 20:54:25 +03:00
| RMRemoveMetricsConfig
| -- Inherited roles
RMAddInheritedRole !InheritedRole
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
| RMDropInheritedRole !DropInheritedRole
| -- Metadata management
RMReplaceMetadata !ReplaceMetadata
| RMExportMetadata !ExportMetadata
| RMClearMetadata !ClearMetadata
| RMReloadMetadata !ReloadMetadata
| RMGetInconsistentMetadata !GetInconsistentMetadata
| RMDropInconsistentMetadata !DropInconsistentMetadata
| -- Introspection options
RMSetGraphqlSchemaIntrospectionOptions !SetGraphqlIntrospectionOptions
| -- Network
RMAddHostToTLSAllowlist !AddHostToTLSAllowlist
| RMDropHostFromTLSAllowlist !DropHostFromTLSAllowlist
| -- QueryTags
RMSetQueryTagsConfig !SetQueryTagsConfig
| -- Debug
RMDumpInternalState !DumpInternalState
| RMGetCatalogState !GetCatalogState
| RMSetCatalogState !SetCatalogState
| RMTestWebhookTransform !TestWebhookTransform
| -- Bulk metadata queries
RMBulk [RQLMetadataRequest]
instance FromJSON RQLMetadataV1 where
parseJSON = withObject "RQLMetadataV1" \o -> do
queryType <- o .: "type"
let args :: forall a. FromJSON a => A.Parser a
args = o .: "args"
case queryType of
-- backend agnostic
"rename_source" -> RMRenameSource <$> args
"add_remote_schema" -> RMAddRemoteSchema <$> args
"update_remote_schema" -> RMUpdateRemoteSchema <$> args
"remove_remote_schema" -> RMRemoveRemoteSchema <$> args
"reload_remote_schema" -> RMReloadRemoteSchema <$> args
"introspect_remote_schema" -> RMIntrospectRemoteSchema <$> args
"add_remote_schema_permissions" -> RMAddRemoteSchemaPermissions <$> args
"drop_remote_schema_permissions" -> RMDropRemoteSchemaPermissions <$> args
"create_cron_trigger" -> RMCreateCronTrigger <$> args
"delete_cron_trigger" -> RMDeleteCronTrigger <$> args
"create_scheduled_event" -> RMCreateScheduledEvent <$> args
"delete_scheduled_event" -> RMDeleteScheduledEvent <$> args
"get_scheduled_events" -> RMGetScheduledEvents <$> args
"get_event_invocations" -> RMGetEventInvocations <$> args
"create_action" -> RMCreateAction <$> args
"drop_action" -> RMDropAction <$> args
"update_action" -> RMUpdateAction <$> args
"create_action_permission" -> RMCreateActionPermission <$> args
"drop_action_permission" -> RMDropActionPermission <$> args
"create_query_collection" -> RMCreateQueryCollection <$> args
"drop_query_collection" -> RMDropQueryCollection <$> args
"add_query_to_collection" -> RMAddQueryToCollection <$> args
"drop_query_from_collection" -> RMDropQueryFromCollection <$> args
"add_collection_to_allowlist" -> RMAddCollectionToAllowlist <$> args
"drop_collection_from_allowlist" -> RMDropCollectionFromAllowlist <$> args
"create_rest_endpoint" -> RMCreateRestEndpoint <$> args
"drop_rest_endpoint" -> RMDropRestEndpoint <$> args
"set_custom_types" -> RMSetCustomTypes <$> args
"set_api_limits" -> RMSetApiLimits <$> args
"remove_api_limits" -> pure RMRemoveApiLimits
"set_metrics_config" -> RMSetMetricsConfig <$> args
"remove_metrics_config" -> pure RMRemoveMetricsConfig
"add_inherited_role" -> RMAddInheritedRole <$> args
"drop_inherited_role" -> RMDropInheritedRole <$> args
"replace_metadata" -> RMReplaceMetadata <$> args
"export_metadata" -> RMExportMetadata <$> args
"clear_metadata" -> RMClearMetadata <$> args
"reload_metadata" -> RMReloadMetadata <$> args
"get_inconsistent_metadata" -> RMGetInconsistentMetadata <$> args
"drop_inconsistent_metadata" -> RMDropInconsistentMetadata <$> args
"add_host_to_tls_allowlist" -> RMAddHostToTLSAllowlist <$> args
"drop_host_from_tls_allowlist" -> RMDropHostFromTLSAllowlist <$> args
"dump_internal_state" -> RMDumpInternalState <$> args
"get_catalog_state" -> RMGetCatalogState <$> args
"set_catalog_state" -> RMSetCatalogState <$> args
"set_graphql_schema_introspection_options" -> RMSetGraphqlSchemaIntrospectionOptions <$> args
"test_webhook_transform" -> RMTestWebhookTransform <$> args
"set_query_tags" -> RMSetQueryTagsConfig <$> args
"bulk" -> RMBulk <$> args
-- backend specific
_ -> do
let (prefix, T.drop 1 -> cmd) = T.breakOn "_" queryType
backendType <-
runAesonParser parseJSON (String prefix)
`onLeft` \_ ->
fail
( "unknown metadata command \"" <> T.unpack queryType
<> "\"; \""
<> T.unpack prefix
<> "\" was not recognized as a valid backend name"
)
dispatchAnyBackend @BackendAPI (liftTag backendType) \(_ :: BackendTag b) -> do
argValue <- args
command <- choice <$> sequenceA [p cmd argValue | p <- metadataV1CommandParsers @b]
onNothing command $
fail $
"unknown metadata command \"" <> T.unpack cmd
<> "\" for backend "
<> T.unpack (T.toTxt backendType)
data RQLMetadataV2
= RMV2ReplaceMetadata !ReplaceMetadataV2
| RMV2ExportMetadata !ExportMetadata
Clean metadata arguments ## Description Thanks to #1664, the Metadata API types no longer require a `ToJSON` instance. This PR follows up with a cleanup of the types of the arguments to the metadata API: - whenever possible, it moves those argument types to where they're used (RQL.DDL.*) - it removes all unrequired instances (mostly `ToJSON`) This PR does not attempt to do it for _all_ such argument types. For some of the metadata operations, the type used to describe the argument to the API and used to represent the value in the metadata are one and the same (like for `CreateEndpoint`). Sometimes, the two types are intertwined in complex ways (`RemoteRelationship` and `RemoteRelationshipDef`). In the spirit of only doing uncontroversial cleaning work, this PR only moves types that are not used outside of RQL.DDL. Furthermore, this is a small step towards separating the different types all jumbled together in RQL.Types. ## Notes This PR also improves several `FromJSON` instances to make use of `withObject`, and to use a human readable string instead of a type name in error messages whenever possible. For instance: - before: `expected Object for Object, but encountered X` after: `expected Object for add computed field, but encountered X` - before: `Expecting an object for update query` after: `expected Object for update query, but encountered X` This PR also renames `CreateFunctionPermission` to `FunctionPermissionArgument`, to remove the quite surprising `type DropFunctionPermission = CreateFunctionPermission`. This PR also deletes some dead code, mostly in RQL.DML. This PR also moves a PG-specific source resolving function from DDL.Schema.Source to the only place where it is used: App.hs. https://github.com/hasura/graphql-engine-mono/pull/1844 GitOrigin-RevId: a594521194bb7fe6a111b02a9e099896f9fed59c
2021-07-27 13:41:42 +03:00
deriving (Generic)
instance FromJSON RQLMetadataV2 where
parseJSON =
genericParseJSON $
defaultOptions
{ constructorTagModifier = snakeCase . drop 4,
sumEncoding = TaggedObject "type" "args"
}
data RQLMetadataRequest
= RMV1 !RQLMetadataV1
| RMV2 !RQLMetadataV2
instance FromJSON RQLMetadataRequest where
parseJSON = withObject "RQLMetadataRequest" $ \o -> do
version <- o .:? "version" .!= VIVersion1
let val = Object o
case version of
VIVersion1 -> RMV1 <$> parseJSON val
VIVersion2 -> RMV2 <$> parseJSON val
data RQLMetadata = RQLMetadata
{ _rqlMetadataResourceVersion :: !(Maybe MetadataResourceVersion),
_rqlMetadata :: !RQLMetadataRequest
Clean metadata arguments ## Description Thanks to #1664, the Metadata API types no longer require a `ToJSON` instance. This PR follows up with a cleanup of the types of the arguments to the metadata API: - whenever possible, it moves those argument types to where they're used (RQL.DDL.*) - it removes all unrequired instances (mostly `ToJSON`) This PR does not attempt to do it for _all_ such argument types. For some of the metadata operations, the type used to describe the argument to the API and used to represent the value in the metadata are one and the same (like for `CreateEndpoint`). Sometimes, the two types are intertwined in complex ways (`RemoteRelationship` and `RemoteRelationshipDef`). In the spirit of only doing uncontroversial cleaning work, this PR only moves types that are not used outside of RQL.DDL. Furthermore, this is a small step towards separating the different types all jumbled together in RQL.Types. ## Notes This PR also improves several `FromJSON` instances to make use of `withObject`, and to use a human readable string instead of a type name in error messages whenever possible. For instance: - before: `expected Object for Object, but encountered X` after: `expected Object for add computed field, but encountered X` - before: `Expecting an object for update query` after: `expected Object for update query, but encountered X` This PR also renames `CreateFunctionPermission` to `FunctionPermissionArgument`, to remove the quite surprising `type DropFunctionPermission = CreateFunctionPermission`. This PR also deletes some dead code, mostly in RQL.DML. This PR also moves a PG-specific source resolving function from DDL.Schema.Source to the only place where it is used: App.hs. https://github.com/hasura/graphql-engine-mono/pull/1844 GitOrigin-RevId: a594521194bb7fe6a111b02a9e099896f9fed59c
2021-07-27 13:41:42 +03:00
}
instance FromJSON RQLMetadata where
parseJSON = withObject "RQLMetadata" $ \o -> do
_rqlMetadataResourceVersion <- o .:? "resource_version"
_rqlMetadata <- parseJSON $ Object o
pure RQLMetadata {..}
runMetadataQuery ::
( MonadIO m,
MonadBaseControl IO m,
Tracing.MonadTrace m,
MonadMetadataStorage m,
MonadResolveSource m
) =>
Env.Environment ->
L.Logger L.Hasura ->
InstanceId ->
UserInfo ->
HTTP.Manager ->
ServerConfigCtx ->
RebuildableSchemaCache ->
RQLMetadata ->
m (EncJSON, RebuildableSchemaCache)
runMetadataQuery env logger instanceId userInfo httpManager serverConfigCtx schemaCache RQLMetadata {..} = do
(metadata, currentResourceVersion) <- fetchMetadata
((r, modMetadata), modSchemaCache, cacheInvalidations) <-
runMetadataQueryM env currentResourceVersion _rqlMetadata
& flip runReaderT logger
& runMetadataT metadata
& runCacheRWT schemaCache
& peelRun (RunCtx userInfo httpManager serverConfigCtx)
& runExceptT
& liftEitherM
-- set modified metadata in storage
if queryModifiesMetadata _rqlMetadata
then case _sccMaintenanceMode serverConfigCtx of
MaintenanceModeDisabled -> do
-- set modified metadata in storage
newResourceVersion <- setMetadata (fromMaybe currentResourceVersion _rqlMetadataResourceVersion) modMetadata
-- notify schema cache sync
notifySchemaCacheSync newResourceVersion instanceId cacheInvalidations
(_, modSchemaCache', _) <-
setMetadataResourceVersionInSchemaCache newResourceVersion
& runCacheRWT modSchemaCache
& peelRun (RunCtx userInfo httpManager serverConfigCtx)
& runExceptT
& liftEitherM
pure (r, modSchemaCache')
MaintenanceModeEnabled ->
throw500 "metadata cannot be modified in maintenance mode"
else pure (r, modSchemaCache)
queryModifiesMetadata :: RQLMetadataRequest -> Bool
queryModifiesMetadata = \case
RMV1 q ->
case q of
RMRedeliverEvent _ -> False
RMInvokeEventTrigger _ -> False
RMGetInconsistentMetadata _ -> False
RMIntrospectRemoteSchema _ -> False
RMDumpInternalState _ -> False
RMSetCatalogState _ -> False
RMGetCatalogState _ -> False
RMExportMetadata _ -> False
RMGetEventInvocations _ -> False
RMGetScheduledEvents _ -> False
RMCreateScheduledEvent _ -> False
RMDeleteScheduledEvent _ -> False
RMTestWebhookTransform _ -> False
RMBulk qs -> any queryModifiesMetadata qs
_ -> True
RMV2 q ->
case q of
RMV2ExportMetadata _ -> False
_ -> True
runMetadataQueryM ::
( MonadIO m,
MonadBaseControl IO m,
CacheRWM m,
Tracing.MonadTrace m,
UserInfoM m,
HTTP.HasHttpManagerM m,
MetadataM m,
MonadMetadataStorageQueryAPI m,
HasServerConfigCtx m,
MonadReader r m,
Has (L.Logger L.Hasura) r
) =>
Env.Environment ->
MetadataResourceVersion ->
RQLMetadataRequest ->
m EncJSON
runMetadataQueryM env currentResourceVersion =
withPathK "args" . \case
RMV1 q -> runMetadataQueryV1M env currentResourceVersion q
RMV2 q -> runMetadataQueryV2M currentResourceVersion q
runMetadataQueryV1M ::
forall m r.
( MonadIO m,
MonadBaseControl IO m,
CacheRWM m,
Tracing.MonadTrace m,
UserInfoM m,
HTTP.HasHttpManagerM m,
MetadataM m,
MonadMetadataStorageQueryAPI m,
HasServerConfigCtx m,
MonadReader r m,
Has (L.Logger L.Hasura) r
) =>
Env.Environment ->
MetadataResourceVersion ->
RQLMetadataV1 ->
m EncJSON
runMetadataQueryV1M env currentResourceVersion = \case
RMAddSource q -> dispatchMetadata runAddSource q
RMDropSource q -> runDropSource q
RMRenameSource q -> runRenameSource q
RMTrackTable q -> dispatchMetadata runTrackTableV2Q q
RMUntrackTable q -> dispatchMetadata runUntrackTableQ q
RMSetFunctionCustomization q -> dispatchMetadata runSetFunctionCustomization q
RMSetTableCustomization q -> dispatchMetadata runSetTableCustomization q
RMPgSetTableIsEnum q -> runSetExistingTableIsEnumQ q
RMCreateInsertPermission q -> dispatchMetadata runCreatePerm q
RMCreateSelectPermission q -> dispatchMetadata runCreatePerm q
RMCreateUpdatePermission q -> dispatchMetadata runCreatePerm q
RMCreateDeletePermission q -> dispatchMetadata runCreatePerm q
RMDropInsertPermission q -> dispatchMetadata runDropPerm q
RMDropSelectPermission q -> dispatchMetadata runDropPerm q
RMDropUpdatePermission q -> dispatchMetadata runDropPerm q
RMDropDeletePermission q -> dispatchMetadata runDropPerm q
RMSetPermissionComment q -> dispatchMetadata runSetPermComment q
RMCreateObjectRelationship q -> dispatchMetadata (runCreateRelationship ObjRel . unCreateObjRel) q
RMCreateArrayRelationship q -> dispatchMetadata (runCreateRelationship ArrRel . unCreateArrRel) q
RMDropRelationship q -> dispatchMetadata runDropRel q
RMSetRelationshipComment q -> dispatchMetadata runSetRelComment q
RMRenameRelationship q -> dispatchMetadata runRenameRel q
RMCreateRemoteRelationship q -> dispatchMetadata runCreateRemoteRelationship q
RMUpdateRemoteRelationship q -> dispatchMetadata runUpdateRemoteRelationship q
RMDeleteRemoteRelationship q -> runDeleteRemoteRelationship q
RMTrackFunction q -> dispatchMetadata runTrackFunctionV2 q
RMUntrackFunction q -> dispatchMetadata runUntrackFunc q
RMCreateFunctionPermission q -> dispatchMetadata runCreateFunctionPermission q
RMDropFunctionPermission q -> dispatchMetadata runDropFunctionPermission q
RMAddComputedField q -> runAddComputedField q
RMDropComputedField q -> runDropComputedField q
RMCreateEventTrigger q -> dispatchMetadata runCreateEventTriggerQuery q
RMDeleteEventTrigger q -> dispatchMetadataAndEventTrigger runDeleteEventTriggerQuery q
RMRedeliverEvent q -> dispatchEventTrigger runRedeliverEvent q
RMInvokeEventTrigger q -> dispatchEventTrigger runInvokeEventTrigger q
RMAddRemoteSchema q -> runAddRemoteSchema env q
RMUpdateRemoteSchema q -> runUpdateRemoteSchema env q
RMRemoveRemoteSchema q -> runRemoveRemoteSchema q
RMReloadRemoteSchema q -> runReloadRemoteSchema q
RMIntrospectRemoteSchema q -> runIntrospectRemoteSchema q
RMAddRemoteSchemaPermissions q -> runAddRemoteSchemaPermissions q
RMDropRemoteSchemaPermissions q -> runDropRemoteSchemaPermissions q
RMCreateCronTrigger q -> runCreateCronTrigger q
RMDeleteCronTrigger q -> runDeleteCronTrigger q
RMCreateScheduledEvent q -> runCreateScheduledEvent q
RMDeleteScheduledEvent q -> runDeleteScheduledEvent q
RMGetScheduledEvents q -> runGetScheduledEvents q
RMGetEventInvocations q -> runGetEventInvocations q
RMCreateAction q -> runCreateAction q
RMDropAction q -> runDropAction q
RMUpdateAction q -> runUpdateAction q
RMCreateActionPermission q -> runCreateActionPermission q
RMDropActionPermission q -> runDropActionPermission q
RMCreateQueryCollection q -> runCreateCollection q
RMDropQueryCollection q -> runDropCollection q
RMAddQueryToCollection q -> runAddQueryToCollection q
RMDropQueryFromCollection q -> runDropQueryFromCollection q
RMAddCollectionToAllowlist q -> runAddCollectionToAllowlist q
RMDropCollectionFromAllowlist q -> runDropCollectionFromAllowlist q
RMCreateRestEndpoint q -> runCreateEndpoint q
RMDropRestEndpoint q -> runDropEndpoint q
RMSetCustomTypes q -> runSetCustomTypes q
RMSetApiLimits q -> runSetApiLimits q
RMRemoveApiLimits -> runRemoveApiLimits
RMSetMetricsConfig q -> runSetMetricsConfig q
RMRemoveMetricsConfig -> runRemoveMetricsConfig
RMAddInheritedRole q -> runAddInheritedRole q
RMDropInheritedRole q -> runDropInheritedRole q
RMReplaceMetadata q -> runReplaceMetadata q
RMExportMetadata q -> runExportMetadata q
RMClearMetadata q -> runClearMetadata q
RMReloadMetadata q -> runReloadMetadata q
RMGetInconsistentMetadata q -> runGetInconsistentMetadata q
RMDropInconsistentMetadata q -> runDropInconsistentMetadata q
RMSetGraphqlSchemaIntrospectionOptions q -> runSetGraphqlSchemaIntrospectionOptions q
RMAddHostToTLSAllowlist q -> runAddHostToTLSAllowlist q
RMDropHostFromTLSAllowlist q -> runDropHostFromTLSAllowlist q
RMDumpInternalState q -> runDumpInternalState q
RMGetCatalogState q -> runGetCatalogState q
RMSetCatalogState q -> runSetCatalogState q
RMTestWebhookTransform q -> runTestWebhookTransform env q
RMSetQueryTagsConfig q -> runSetQueryTagsConfig q
RMBulk q -> encJFromList <$> indexedMapM (runMetadataQueryM env currentResourceVersion) q
where
dispatchMetadata ::
(forall b. BackendMetadata b => i b -> a) ->
AnyBackend i ->
a
dispatchMetadata f x = dispatchAnyBackend @BackendMetadata x f
dispatchEventTrigger :: (forall b. BackendEventTrigger b => i b -> a) -> AnyBackend i -> a
dispatchEventTrigger f x = dispatchAnyBackend @BackendEventTrigger x f
dispatchMetadataAndEventTrigger ::
(forall b. (BackendMetadata b, BackendEventTrigger b) => i b -> a) ->
AnyBackend i ->
a
dispatchMetadataAndEventTrigger f x = dispatchAnyBackendWithTwoConstraints @BackendMetadata @BackendEventTrigger x f
runMetadataQueryV2M ::
( MonadIO m,
CacheRWM m,
MetadataM m,
MonadMetadataStorageQueryAPI m,
MonadReader r m,
Has (L.Logger L.Hasura) r
) =>
MetadataResourceVersion ->
RQLMetadataV2 ->
m EncJSON
runMetadataQueryV2M currentResourceVersion = \case
RMV2ReplaceMetadata q -> runReplaceMetadataV2 q
RMV2ExportMetadata q -> runExportMetadataV2 currentResourceVersion q