2021-07-07 04:43:42 +03:00
|
|
|
{-# LANGUAGE ViewPatterns #-}
|
|
|
|
|
2021-01-07 12:04:22 +03:00
|
|
|
-- | The RQL metadata query ('/v1/metadata')
|
2021-11-04 19:08:33 +03:00
|
|
|
module Hasura.Server.API.Metadata
|
|
|
|
( RQLMetadata,
|
|
|
|
RQLMetadataV1 (..),
|
|
|
|
runMetadataQuery,
|
|
|
|
)
|
|
|
|
where
|
2021-01-07 12:04:22 +03:00
|
|
|
|
2022-03-11 02:22:54 +03:00
|
|
|
import Control.Lens (_Just)
|
2021-05-05 15:25:27 +03:00
|
|
|
import Control.Monad.Trans.Control (MonadBaseControl)
|
2021-01-07 12:04:22 +03:00
|
|
|
import Data.Aeson
|
|
|
|
import Data.Aeson.Casing
|
2021-07-07 04:43:42 +03:00
|
|
|
import Data.Aeson.Types qualified as A
|
2021-05-05 15:25:27 +03:00
|
|
|
import Data.Environment qualified as Env
|
2021-07-27 18:14:12 +03:00
|
|
|
import Data.Has (Has)
|
2021-07-07 04:43:42 +03:00
|
|
|
import Data.Text qualified as T
|
|
|
|
import Data.Text.Extended qualified as T
|
2022-08-19 00:56:47 +03:00
|
|
|
import GHC.Generics.Extended (constrName)
|
2021-05-11 18:18:31 +03:00
|
|
|
import Hasura.Base.Error
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.EncJSON
|
2021-07-27 18:14:12 +03:00
|
|
|
import Hasura.Logging qualified as L
|
2023-02-22 12:22:22 +03:00
|
|
|
import Hasura.LogicalModel.API qualified as LogicalModels
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.Metadata.Class
|
2022-03-11 02:22:54 +03:00
|
|
|
import Hasura.Prelude hiding (first)
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.Action
|
2021-02-11 20:54:25 +03:00
|
|
|
import Hasura.RQL.DDL.ApiLimit
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.ComputedField
|
2023-01-25 10:12:53 +03:00
|
|
|
import Hasura.RQL.DDL.ConnectionTemplate
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.CustomTypes
|
2022-08-18 01:13:32 +03:00
|
|
|
import Hasura.RQL.DDL.DataConnector
|
2021-01-29 04:02:34 +03:00
|
|
|
import Hasura.RQL.DDL.Endpoint
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.EventTrigger
|
2023-01-23 16:35:48 +03:00
|
|
|
import Hasura.RQL.DDL.FeatureFlag
|
2021-05-05 15:25:27 +03:00
|
|
|
import Hasura.RQL.DDL.GraphqlSchemaIntrospection
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
import Hasura.RQL.DDL.InheritedRoles
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.Metadata
|
2021-08-24 10:36:32 +03:00
|
|
|
import Hasura.RQL.DDL.Network
|
2022-11-23 05:49:29 +03:00
|
|
|
import Hasura.RQL.DDL.OpenTelemetry
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.Permission
|
|
|
|
import Hasura.RQL.DDL.QueryCollection
|
2021-09-23 15:37:56 +03:00
|
|
|
import Hasura.RQL.DDL.QueryTags
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.Relationship
|
|
|
|
import Hasura.RQL.DDL.Relationship.Rename
|
2023-01-09 10:25:32 +03:00
|
|
|
import Hasura.RQL.DDL.Relationship.Suggest
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.DDL.RemoteRelationship
|
|
|
|
import Hasura.RQL.DDL.ScheduledTrigger
|
|
|
|
import Hasura.RQL.DDL.Schema
|
|
|
|
import Hasura.RQL.DDL.Schema.Source
|
2022-08-18 01:13:32 +03:00
|
|
|
import Hasura.RQL.DDL.SourceKinds
|
2022-03-11 02:22:54 +03:00
|
|
|
import Hasura.RQL.DDL.Webhook.Transform.Validation
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.Types.Action
|
|
|
|
import Hasura.RQL.Types.Allowlist
|
|
|
|
import Hasura.RQL.Types.ApiLimit
|
|
|
|
import Hasura.RQL.Types.Common
|
|
|
|
import Hasura.RQL.Types.CustomTypes
|
2022-03-13 10:40:06 +03:00
|
|
|
import Hasura.RQL.Types.Endpoint
|
2022-09-09 11:26:44 +03:00
|
|
|
import Hasura.RQL.Types.EventTrigger
|
2021-09-06 14:15:36 +03:00
|
|
|
import Hasura.RQL.Types.Eventing.Backend
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.Types.GraphqlSchemaIntrospection
|
2022-10-20 15:45:31 +03:00
|
|
|
import Hasura.RQL.Types.Metadata (GetCatalogState, SetCatalogState, emptyMetadataDefaults)
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.Types.Metadata.Backend
|
|
|
|
import Hasura.RQL.Types.Network
|
2022-11-23 05:49:29 +03:00
|
|
|
import Hasura.RQL.Types.OpenTelemetry
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.Types.Permission
|
|
|
|
import Hasura.RQL.Types.QueryCollection
|
|
|
|
import Hasura.RQL.Types.Roles
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.RQL.Types.Run
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.Types.ScheduledTrigger
|
|
|
|
import Hasura.RQL.Types.SchemaCache
|
|
|
|
import Hasura.RQL.Types.SchemaCache.Build
|
|
|
|
import Hasura.RQL.Types.Source
|
scaffolding for remote-schemas module
The main aim of the PR is:
1. To set up a module structure for 'remote-schemas' package.
2. Move parts by the remote schema codebase into the new module structure to validate it.
## Notes to the reviewer
Why a PR with large-ish diff?
1. We've been making progress on the MM project but we don't yet know long it is going to take us to get to the first milestone. To understand this better, we need to figure out the unknowns as soon as possible. Hence I've taken a stab at the first two items in the [end-state](https://gist.github.com/0x777/ca2bdc4284d21c3eec153b51dea255c9) document to figure out the unknowns. Unsurprisingly, there are a bunch of issues that we haven't discussed earlier. These are documented in the 'open questions' section.
1. The diff is large but that is only code moved around and I've added a section that documents how things are moved. In addition, there are fair number of PR comments to help with the review process.
## Changes in the PR
### Module structure
Sets up the module structure as follows:
```
Hasura/
RemoteSchema/
Metadata/
Types.hs
SchemaCache/
Types.hs
Permission.hs
RemoteRelationship.hs
Build.hs
MetadataAPI/
Types.hs
Execute.hs
```
### 1. Types representing metadata are moved
Types that capture metadata information (currently scattered across several RQL modules) are moved into `Hasura.RemoteSchema.Metadata.Types`.
- This new module only depends on very 'core' modules such as
`Hasura.Session` for the notion of roles and `Hasura.Incremental` for `Cacheable` typeclass.
- The requirement on database modules is avoided by generalizing the remote schemas metadata to accept an arbitrary 'r' for a remote relationship
definition.
### 2. SchemaCache related types and build logic have been moved
Types that represent remote schemas information in SchemaCache are moved into `Hasura.RemoteSchema.SchemaCache.Types`.
Similar to `H.RS.Metadata.Types`, this module depends on 'core' modules except for `Hasura.GraphQL.Parser.Variable`. It has something to do with remote relationships but I haven't spent time looking into it. The validation of 'remote relationships to remote schema' is also something that needs to be looked at.
Rips out the logic that builds remote schema's SchemaCache information from the monolithic `buildSchemaCacheRule` and moves it into `Hasura.RemoteSchema.SchemaCache.Build`. Further, the `.SchemaCache.Permission` and `.SchemaCache.RemoteRelationship` have been created from existing modules that capture schema cache building logic for those two components.
This was a fair amount of work. On main, currently remote schema's SchemaCache information is built in two phases - in the first phase, 'permissions' and 'remote relationships' are ignored and in the second phase they are filled in.
While remote relationships can only be resolved after partially resolving sources and other remote schemas, the same isn't true for permissions. Further, most of the work that is done to resolve remote relationships can be moved to the first phase so that the second phase can be a very simple traversal.
This is the approach that was taken - resolve permissions and as much as remote relationships information in the first phase.
### 3. Metadata APIs related types and build logic have been moved
The types that represent remote schema related metadata APIs and the execution logic have been moved to `Hasura.RemoteSchema.MetadataAPI.Types` and `.Execute` modules respectively.
## Open questions:
1. `Hasura.RemoteSchema.Metadata.Types` is so called because I was hoping that all of the metadata related APIs of remote schema can be brought in at `Hasura.RemoteSchema.Metadata.API`. However, as metadata APIs depended on functions from `SchemaCache` module (see [1](https://github.com/hasura/graphql-engine-mono/blob/ceba6d62264603ee5d279814677b29bcc43ecaea/server/src-lib/Hasura/RQL/DDL/RemoteSchema.hs#L55) and [2](https://github.com/hasura/graphql-engine-mono/blob/ceba6d62264603ee5d279814677b29bcc43ecaea/server/src-lib/Hasura/RQL/DDL/RemoteSchema.hs#L91), it made more sense to create a separate top-level module for `MetadataAPI`s.
Maybe we can just have `Hasura.RemoteSchema.Metadata` and get rid of the extra nesting or have `Hasura.RemoteSchema.Metadata.{Core,Permission,RemoteRelationship}` if we want to break them down further.
1. `buildRemoteSchemas` in `H.RS.SchemaCache.Build` has the following type:
```haskell
buildRemoteSchemas ::
( ArrowChoice arr,
Inc.ArrowDistribute arr,
ArrowWriter (Seq CollectedInfo) arr,
Inc.ArrowCache m arr,
MonadIO m,
HasHttpManagerM m,
Inc.Cacheable remoteRelationshipDefinition,
ToJSON remoteRelationshipDefinition,
MonadError QErr m
) =>
Env.Environment ->
( (Inc.Dependency (HashMap RemoteSchemaName Inc.InvalidationKey), OrderedRoles),
[RemoteSchemaMetadataG remoteRelationshipDefinition]
)
`arr` HashMap RemoteSchemaName (PartiallyResolvedRemoteSchemaCtxG remoteRelationshipDefinition, MetadataObject)
```
Note the dependence on `CollectedInfo` which is defined as
```haskell
data CollectedInfo
= CIInconsistency InconsistentMetadata
| CIDependency
MetadataObject
-- ^ for error reporting on missing dependencies
SchemaObjId
SchemaDependency
deriving (Eq)
```
this pretty much means that remote schemas is dependent on types from databases, actions, ....
How do we fix this? Maybe introduce a typeclass such as `ArrowCollectRemoteSchemaDependencies` which is defined in `Hasura.RemoteSchema` and then implemented in graphql-engine?
1. The dependency on `buildSchemaCacheFor` in `.MetadataAPI.Execute` which has the following signature:
```haskell
buildSchemaCacheFor ::
(QErrM m, CacheRWM m, MetadataM m) =>
MetadataObjId ->
MetadataModifier ->
```
This can be easily resolved if we restrict what the metadata APIs are allowed to do. Currently, they operate in an unfettered access to modify SchemaCache (the `CacheRWM` constraint):
```haskell
runAddRemoteSchema ::
( QErrM m,
CacheRWM m,
MonadIO m,
HasHttpManagerM m,
MetadataM m,
Tracing.MonadTrace m
) =>
Env.Environment ->
AddRemoteSchemaQuery ->
m EncJSON
```
This should instead be changed to restrict remote schema APIs to only modify remote schema metadata (but has access to the remote schemas part of the schema cache), this dependency is completely removed.
```haskell
runAddRemoteSchema ::
( QErrM m,
MonadIO m,
HasHttpManagerM m,
MonadReader RemoteSchemasSchemaCache m,
MonadState RemoteSchemaMetadata m,
Tracing.MonadTrace m
) =>
Env.Environment ->
AddRemoteSchemaQuery ->
m RemoteSchemeMetadataObjId
```
The idea is that the core graphql-engine would call these functions and then call
`buildSchemaCacheFor`.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6291
GitOrigin-RevId: 51357148c6404afe70219afa71bd1d59bdf4ffc6
2022-10-21 06:13:07 +03:00
|
|
|
import Hasura.RemoteSchema.MetadataAPI
|
2021-07-07 04:43:42 +03:00
|
|
|
import Hasura.SQL.AnyBackend
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.SQL.Backend
|
2021-07-07 04:43:42 +03:00
|
|
|
import Hasura.Server.API.Backend
|
|
|
|
import Hasura.Server.API.Instances ()
|
2022-11-06 01:37:04 +03:00
|
|
|
import Hasura.Server.Logging (SchemaSyncLog (..), SchemaSyncThreadType (TTMetadataApi))
|
2023-02-15 09:11:05 +03:00
|
|
|
import Hasura.Server.SchemaCacheRef
|
2022-04-22 17:50:01 +03:00
|
|
|
import Hasura.Server.Types
|
2021-05-05 15:25:27 +03:00
|
|
|
import Hasura.Server.Utils (APIVersion (..))
|
harmonize network manager handling
## Description
### I want to speak to the `Manager`
Oh boy. This PR is both fairly straightforward and overreaching, so let's break it down.
For most network access, we need a [`HTTP.Manager`](https://hackage.haskell.org/package/http-client-0.1.0.0/docs/Network-HTTP-Client-Manager.html). It is created only once, at the top level, when starting the engine, and is then threaded through the application to wherever we need to make a network call. As of main, the way we do this is not standardized: most of the GraphQL execution code passes it "manually" as a function argument throughout the code. We also have a custom monad constraint, `HasHttpManagerM`, that describes a monad's ability to provide a manager. And, finally, several parts of the code store the manager in some kind of argument structure, such as `RunT`'s `RunCtx`.
This PR's first goal is to harmonize all of this: we always create the manager at the root, and we already have it when we do our very first `runReaderT`. Wouldn't it make sense for the rest of the code to not manually pass it anywhere, to not store it anywhere, but to always rely on the current monad providing it? This is, in short, what this PR does: it implements a constraint on the base monads, so that they provide the manager, and removes most explicit passing from the code.
### First come, first served
One way this PR goes a tiny bit further than "just" doing the aforementioned harmonization is that it starts the process of implementing the "Services oriented architecture" roughly outlined in this [draft document](https://docs.google.com/document/d/1FAigqrST0juU1WcT4HIxJxe1iEBwTuBZodTaeUvsKqQ/edit?usp=sharing). Instead of using the existing `HasHTTPManagerM`, this PR revamps it into the `ProvidesNetwork` service.
The idea is, again, that we should make all "external" dependencies of the engine, all things that the core of the engine doesn't care about, a "service". This allows us to define clear APIs for features, to choose different implementations based on which version of the engine we're running, harmonizes our many scattered monadic constraints... Which is why this service is called "Network": we can refine it, moving forward, to be the constraint that defines how all network communication is to operate, instead of relying on disparate classes constraint or hardcoded decisions. A comment in the code clarifies this intent.
### Side-effects? In my Haskell?
This PR also unavoidably touches some other aspects of the codebase. One such example: it introduces `Hasura.App.AppContext`, named after `HasuraPro.Context.AppContext`: a name for the reader structure at the base level. It also transforms `Handler` from a type alias to a newtype, as `Handler` is where we actually enforce HTTP limits; but without `Handler` being a distinct type, any code path could simply do a `runExceptT $ runReader` and forget to enforce them.
(As a rule of thumb, i am starting to consider any straggling `runReaderT` or `runExceptT` as a code smell: we should not stack / unstack monads haphazardly, and every layer should be an opaque `newtype` with a corresponding run function.)
## Further work
In several places, i have left TODOs when i have encountered things that suggest that we should do further unrelated cleanups. I'll write down the follow-up steps, either in the aforementioned document or on slack. But, in short, at a glance, in approximate order, we could:
- delete `ExecutionCtx` as it is only a subset of `ServerCtx`, and remove one more `runReaderT` call
- delete `ServerConfigCtx` as it is only a subset of `ServerCtx`, and remove it from `RunCtx`
- remove `ServerCtx` from `HandlerCtx`, and make it part of `AppContext`, or even make it the `AppContext` altogether (since, at least for the OSS version, `AppContext` is there again only a subset)
- remove `CacheBuildParams` and `CacheBuild` altogether, as they're just a distinct stack that is a `ReaderT` on top of `IO` that contains, you guessed it, the same thing as `ServerCtx`
- move `RunT` out of `RQL.Types` and rename it, since after the previous cleanups **it only contains `UserInfo`**; it could be bundled with the authentication service, made a small implementation detail in `Hasura.Server.Auth`
- rename `PGMetadaStorageT` to something a bit more accurate, such as `App`, and enforce its IO base
This would significantly simply our complex stack. From there, or in parallel, we can start moving existing dependencies as Services. For the purpose of supporting read replicas entitlement, we could move `MonadResolveSource` to a `SourceResolver` service, as attempted in #7653, and transform `UserAuthenticationM` into a `Authentication` service.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7736
GitOrigin-RevId: 68cce710eb9e7d752bda1ba0c49541d24df8209f
2023-02-22 18:53:52 +03:00
|
|
|
import Hasura.Services
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.Session
|
2021-05-05 15:25:27 +03:00
|
|
|
import Hasura.Tracing qualified as Tracing
|
2021-01-07 12:04:22 +03:00
|
|
|
|
2021-02-16 11:08:19 +03:00
|
|
|
data RQLMetadataV1
|
2021-07-07 04:43:42 +03:00
|
|
|
= -- Sources
|
|
|
|
RMAddSource !(AnyBackend AddSource)
|
|
|
|
| RMDropSource DropSource
|
|
|
|
| RMRenameSource !RenameSource
|
2022-06-22 10:06:19 +03:00
|
|
|
| RMUpdateSource !(AnyBackend UpdateSource)
|
2022-08-30 02:51:34 +03:00
|
|
|
| RMListSourceKinds !ListSourceKinds
|
2022-09-02 20:50:09 +03:00
|
|
|
| RMGetSourceKindCapabilities !GetSourceKindCapabilities
|
2022-08-30 02:51:34 +03:00
|
|
|
| RMGetSourceTables !GetSourceTables
|
|
|
|
| RMGetTableInfo !GetTableInfo
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Tables
|
|
|
|
RMTrackTable !(AnyBackend TrackTableV2)
|
|
|
|
| RMUntrackTable !(AnyBackend UntrackTable)
|
|
|
|
| RMSetTableCustomization !(AnyBackend SetTableCustomization)
|
2022-08-04 12:35:58 +03:00
|
|
|
| RMSetApolloFederationConfig (AnyBackend SetApolloFederationConfig)
|
2022-10-11 12:08:04 +03:00
|
|
|
| RMPgSetTableIsEnum !(AnyBackend SetTableIsEnum)
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Tables permissions
|
|
|
|
RMCreateInsertPermission !(AnyBackend (CreatePerm InsPerm))
|
|
|
|
| RMCreateSelectPermission !(AnyBackend (CreatePerm SelPerm))
|
|
|
|
| RMCreateUpdatePermission !(AnyBackend (CreatePerm UpdPerm))
|
|
|
|
| RMCreateDeletePermission !(AnyBackend (CreatePerm DelPerm))
|
2022-04-06 15:47:35 +03:00
|
|
|
| RMDropInsertPermission !(AnyBackend DropPerm)
|
|
|
|
| RMDropSelectPermission !(AnyBackend DropPerm)
|
|
|
|
| RMDropUpdatePermission !(AnyBackend DropPerm)
|
|
|
|
| RMDropDeletePermission !(AnyBackend DropPerm)
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMSetPermissionComment !(AnyBackend SetPermComment)
|
|
|
|
| -- Tables relationships
|
|
|
|
RMCreateObjectRelationship !(AnyBackend CreateObjRel)
|
|
|
|
| RMCreateArrayRelationship !(AnyBackend CreateArrRel)
|
|
|
|
| RMDropRelationship !(AnyBackend DropRel)
|
|
|
|
| RMSetRelationshipComment !(AnyBackend SetRelComment)
|
|
|
|
| RMRenameRelationship !(AnyBackend RenameRel)
|
2023-01-09 10:25:32 +03:00
|
|
|
| RMSuggestRelationships !(AnyBackend SuggestRels)
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Tables remote relationships
|
2021-12-01 07:53:34 +03:00
|
|
|
RMCreateRemoteRelationship !(AnyBackend CreateFromSourceRelationship)
|
|
|
|
| RMUpdateRemoteRelationship !(AnyBackend CreateFromSourceRelationship)
|
Fix several issues with remote relationships.
## Remaining Work
- [x] changelog entry
- [x] more tests: `<backend>_delete_remote_relationship` is definitely untested
- [x] negative tests: we probably want to assert that there are some APIs we DON'T support
- [x] update the console to use the new API, if necessary
- [x] ~~adding the corresponding documentation for the API for other backends (only `pg_` was added here)~~
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3170
- [x] ~~deciding which backends should support this API~~
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3170
- [x] ~~deciding what to do about potentially overlapping schematic representations~~
- ~~cf. https://github.com/hasura/graphql-engine-mono/pull/3157#issuecomment-995307624~~
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3171
- [x] ~~add more descriptive versioning information to some of the types that are changing in this PR~~
- cf. https://github.com/hasura/graphql-engine-mono/pull/3157#discussion_r769830920
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3172
## Description
This PR fixes several important issues wrt. the remote relationship API.
- it fixes a regression introduced by [#3124](https://github.com/hasura/graphql-engine-mono/pull/3124), which prevented `<backend>_create_remote_relationship` from accepting the old argument format (break of backwards compatibility, broke the console)
- it removes the command `create_remote_relationship` added to the v1/metadata API as a work-around as part of [#3124](https://github.com/hasura/graphql-engine-mono/pull/3124)
- it reverts the subsequent fix in the console: [#3149](https://github.com/hasura/graphql-engine-mono/pull/3149)
Furthermore, this PR also addresses two other issues:
- THE DOCUMENTATION OF THE METADATA API WAS WRONG, and documented `create_remote_relationship` instead of `<backend>_create_remote_relationship`: this PR fixes this by adding `pg_` everywhere, but does not attempt to add the corresponding documentation for other backends, partly because:
- `<backend>_delete_remote_relationship` WAS BROKEN ON NON-POSTGRES BACKENDS; it always expected an argument parameterized by Postgres.
As of main, the `<backend>_(create|update|delete)_remote_relationship` commands are supported on Postgres, Citus, BigQuery, but **NOT MSSQL**. I do not know if this is intentional or not, if it even should be publicized or not, and as a result this PR doesn't change this.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3157
Co-authored-by: jkachmar <8461423+jkachmar@users.noreply.github.com>
GitOrigin-RevId: 37e2f41522a9229a11c595574c3f4984317d652a
2021-12-16 23:28:08 +03:00
|
|
|
| RMDeleteRemoteRelationship !(AnyBackend DeleteFromSourceRelationship)
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Functions
|
|
|
|
RMTrackFunction !(AnyBackend TrackFunctionV2)
|
|
|
|
| RMUntrackFunction !(AnyBackend UnTrackFunction)
|
2021-10-07 16:02:19 +03:00
|
|
|
| RMSetFunctionCustomization (AnyBackend SetFunctionCustomization)
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Functions permissions
|
Clean metadata arguments
## Description
Thanks to #1664, the Metadata API types no longer require a `ToJSON` instance. This PR follows up with a cleanup of the types of the arguments to the metadata API:
- whenever possible, it moves those argument types to where they're used (RQL.DDL.*)
- it removes all unrequired instances (mostly `ToJSON`)
This PR does not attempt to do it for _all_ such argument types. For some of the metadata operations, the type used to describe the argument to the API and used to represent the value in the metadata are one and the same (like for `CreateEndpoint`). Sometimes, the two types are intertwined in complex ways (`RemoteRelationship` and `RemoteRelationshipDef`). In the spirit of only doing uncontroversial cleaning work, this PR only moves types that are not used outside of RQL.DDL.
Furthermore, this is a small step towards separating the different types all jumbled together in RQL.Types.
## Notes
This PR also improves several `FromJSON` instances to make use of `withObject`, and to use a human readable string instead of a type name in error messages whenever possible. For instance:
- before: `expected Object for Object, but encountered X`
after: `expected Object for add computed field, but encountered X`
- before: `Expecting an object for update query`
after: `expected Object for update query, but encountered X`
This PR also renames `CreateFunctionPermission` to `FunctionPermissionArgument`, to remove the quite surprising `type DropFunctionPermission = CreateFunctionPermission`.
This PR also deletes some dead code, mostly in RQL.DML.
This PR also moves a PG-specific source resolving function from DDL.Schema.Source to the only place where it is used: App.hs.
https://github.com/hasura/graphql-engine-mono/pull/1844
GitOrigin-RevId: a594521194bb7fe6a111b02a9e099896f9fed59c
2021-07-27 13:41:42 +03:00
|
|
|
RMCreateFunctionPermission !(AnyBackend FunctionPermissionArgument)
|
|
|
|
| RMDropFunctionPermission !(AnyBackend FunctionPermissionArgument)
|
2022-05-04 17:52:29 +03:00
|
|
|
| -- Computed fields
|
|
|
|
RMAddComputedField !(AnyBackend AddComputedField)
|
|
|
|
| RMDropComputedField !(AnyBackend DropComputedField)
|
2023-01-25 10:12:53 +03:00
|
|
|
| -- Connection template
|
|
|
|
RMTestConnectionTemplate !(AnyBackend TestConnectionTemplate)
|
2023-02-21 16:45:12 +03:00
|
|
|
| -- Logical Models
|
|
|
|
RMGetLogicalModel !(AnyBackend LogicalModels.GetLogicalModel)
|
|
|
|
| RMTrackLogicalModel !(AnyBackend LogicalModels.TrackLogicalModel)
|
|
|
|
| RMUntrackLogicalModel !(AnyBackend LogicalModels.UntrackLogicalModel)
|
2023-03-03 18:27:51 +03:00
|
|
|
| RMCreateSelectLogicalModelPermission !(AnyBackend (LogicalModels.CreateLogicalModelPermission SelPerm))
|
2023-03-03 20:58:22 +03:00
|
|
|
| RMDropSelectLogicalModelPermission !(AnyBackend LogicalModels.DropLogicalModelPermission)
|
2021-09-09 14:54:19 +03:00
|
|
|
| -- Tables event triggers
|
2022-03-11 02:22:54 +03:00
|
|
|
RMCreateEventTrigger !(AnyBackend (Unvalidated1 CreateEventTriggerQuery))
|
2021-09-09 14:54:19 +03:00
|
|
|
| RMDeleteEventTrigger !(AnyBackend DeleteEventTriggerQuery)
|
2021-09-06 14:15:36 +03:00
|
|
|
| RMRedeliverEvent !(AnyBackend RedeliverEventQuery)
|
|
|
|
| RMInvokeEventTrigger !(AnyBackend InvokeEventTriggerQuery)
|
2022-09-09 11:26:44 +03:00
|
|
|
| RMCleanupEventTriggerLog !TriggerLogCleanupConfig
|
2022-09-21 08:59:14 +03:00
|
|
|
| RMResumeEventTriggerCleanup !TriggerLogCleanupToggleConfig
|
2022-09-13 11:33:44 +03:00
|
|
|
| RMPauseEventTriggerCleanup !TriggerLogCleanupToggleConfig
|
2021-01-07 12:04:22 +03:00
|
|
|
| -- Remote schemas
|
2021-07-07 04:43:42 +03:00
|
|
|
RMAddRemoteSchema !AddRemoteSchemaQuery
|
|
|
|
| RMUpdateRemoteSchema !AddRemoteSchemaQuery
|
|
|
|
| RMRemoveRemoteSchema !RemoteSchemaNameQuery
|
|
|
|
| RMReloadRemoteSchema !RemoteSchemaNameQuery
|
2021-01-07 12:04:22 +03:00
|
|
|
| RMIntrospectRemoteSchema !RemoteSchemaNameQuery
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Remote schemas permissions
|
2021-08-09 13:20:04 +03:00
|
|
|
RMAddRemoteSchemaPermissions !AddRemoteSchemaPermission
|
2021-01-18 13:38:34 +03:00
|
|
|
| RMDropRemoteSchemaPermissions !DropRemoteSchemaPermissions
|
2022-03-17 23:53:56 +03:00
|
|
|
| -- Remote Schema remote relationships
|
|
|
|
RMCreateRemoteSchemaRemoteRelationship CreateRemoteSchemaRemoteRelationship
|
|
|
|
| RMUpdateRemoteSchemaRemoteRelationship CreateRemoteSchemaRemoteRelationship
|
|
|
|
| RMDeleteRemoteSchemaRemoteRelationship DeleteRemoteSchemaRemoteRelationship
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Scheduled triggers
|
2022-03-11 02:22:54 +03:00
|
|
|
RMCreateCronTrigger !(Unvalidated CreateCronTrigger)
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMDeleteCronTrigger !ScheduledTriggerName
|
2021-01-07 12:04:22 +03:00
|
|
|
| RMCreateScheduledEvent !CreateScheduledEvent
|
|
|
|
| RMDeleteScheduledEvent !DeleteScheduledEvent
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMGetScheduledEvents !GetScheduledEvents
|
2022-11-03 13:21:56 +03:00
|
|
|
| RMGetScheduledEventInvocations !GetScheduledEventInvocations
|
2022-01-27 09:43:39 +03:00
|
|
|
| RMGetCronTriggers
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Actions
|
2022-03-11 02:22:54 +03:00
|
|
|
RMCreateAction !(Unvalidated CreateAction)
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMDropAction !DropAction
|
2022-03-11 02:22:54 +03:00
|
|
|
| RMUpdateAction !(Unvalidated UpdateAction)
|
2021-01-07 12:04:22 +03:00
|
|
|
| RMCreateActionPermission !CreateActionPermission
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMDropActionPermission !DropActionPermission
|
|
|
|
| -- Query collections, allow list related
|
|
|
|
RMCreateQueryCollection !CreateCollection
|
2022-08-19 16:36:02 +03:00
|
|
|
| RMRenameQueryCollection !RenameCollection
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMDropQueryCollection !DropCollection
|
|
|
|
| RMAddQueryToCollection !AddQueryToCollection
|
|
|
|
| RMDropQueryFromCollection !DropQueryFromCollection
|
2022-02-08 19:53:30 +03:00
|
|
|
| RMAddCollectionToAllowlist !AllowlistEntry
|
|
|
|
| RMDropCollectionFromAllowlist !DropCollectionFromAllowlist
|
|
|
|
| RMUpdateScopeOfCollectionInAllowlist !UpdateScopeOfCollectionInAllowlist
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Rest endpoints
|
2021-01-29 04:02:34 +03:00
|
|
|
RMCreateRestEndpoint !CreateEndpoint
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMDropRestEndpoint !DropEndpoint
|
2022-08-18 01:13:32 +03:00
|
|
|
| -- GraphQL Data Connectors
|
|
|
|
RMDCAddAgent !DCAddAgent
|
|
|
|
| RMDCDeleteAgent !DCDeleteAgent
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Custom types
|
2021-01-07 12:04:22 +03:00
|
|
|
RMSetCustomTypes !CustomTypes
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Api limits
|
2021-02-11 20:54:25 +03:00
|
|
|
RMSetApiLimits !ApiLimit
|
|
|
|
| RMRemoveApiLimits
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Metrics config
|
2021-02-11 20:54:25 +03:00
|
|
|
RMSetMetricsConfig !MetricsConfig
|
|
|
|
| RMRemoveMetricsConfig
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Inherited roles
|
2021-07-17 00:18:58 +03:00
|
|
|
RMAddInheritedRole !InheritedRole
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
| RMDropInheritedRole !DropInheritedRole
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Metadata management
|
|
|
|
RMReplaceMetadata !ReplaceMetadata
|
|
|
|
| RMExportMetadata !ExportMetadata
|
|
|
|
| RMClearMetadata !ClearMetadata
|
|
|
|
| RMReloadMetadata !ReloadMetadata
|
|
|
|
| RMGetInconsistentMetadata !GetInconsistentMetadata
|
|
|
|
| RMDropInconsistentMetadata !DropInconsistentMetadata
|
|
|
|
| -- Introspection options
|
2021-05-05 15:25:27 +03:00
|
|
|
RMSetGraphqlSchemaIntrospectionOptions !SetGraphqlIntrospectionOptions
|
2021-08-24 10:36:32 +03:00
|
|
|
| -- Network
|
|
|
|
RMAddHostToTLSAllowlist !AddHostToTLSAllowlist
|
|
|
|
| RMDropHostFromTLSAllowlist !DropHostFromTLSAllowlist
|
2021-09-23 15:37:56 +03:00
|
|
|
| -- QueryTags
|
|
|
|
RMSetQueryTagsConfig !SetQueryTagsConfig
|
2022-11-23 05:49:29 +03:00
|
|
|
| -- OpenTelemetry
|
|
|
|
RMSetOpenTelemetryConfig !OpenTelemetryConfig
|
|
|
|
| RMSetOpenTelemetryStatus !OtelStatus
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Debug
|
|
|
|
RMDumpInternalState !DumpInternalState
|
|
|
|
| RMGetCatalogState !GetCatalogState
|
|
|
|
| RMSetCatalogState !SetCatalogState
|
2022-03-11 02:22:54 +03:00
|
|
|
| RMTestWebhookTransform !(Unvalidated TestWebhookTransform)
|
2023-01-23 16:35:48 +03:00
|
|
|
| -- Feature Flags
|
|
|
|
RMGetFeatureFlag !GetFeatureFlag
|
2021-07-07 04:43:42 +03:00
|
|
|
| -- Bulk metadata queries
|
2021-02-19 05:39:30 +03:00
|
|
|
RMBulk [RQLMetadataRequest]
|
2022-08-19 00:56:47 +03:00
|
|
|
deriving (Generic)
|
|
|
|
|
|
|
|
-- NOTE! If you add a new request type here that is read-only, make sure to
|
|
|
|
-- update queryModifiesMetadata
|
2021-01-07 12:04:22 +03:00
|
|
|
|
2021-07-07 04:43:42 +03:00
|
|
|
instance FromJSON RQLMetadataV1 where
|
|
|
|
parseJSON = withObject "RQLMetadataV1" \o -> do
|
|
|
|
queryType <- o .: "type"
|
|
|
|
let args :: forall a. FromJSON a => A.Parser a
|
|
|
|
args = o .: "args"
|
|
|
|
case queryType of
|
|
|
|
-- backend agnostic
|
|
|
|
"rename_source" -> RMRenameSource <$> args
|
|
|
|
"add_remote_schema" -> RMAddRemoteSchema <$> args
|
|
|
|
"update_remote_schema" -> RMUpdateRemoteSchema <$> args
|
|
|
|
"remove_remote_schema" -> RMRemoveRemoteSchema <$> args
|
|
|
|
"reload_remote_schema" -> RMReloadRemoteSchema <$> args
|
|
|
|
"introspect_remote_schema" -> RMIntrospectRemoteSchema <$> args
|
|
|
|
"add_remote_schema_permissions" -> RMAddRemoteSchemaPermissions <$> args
|
|
|
|
"drop_remote_schema_permissions" -> RMDropRemoteSchemaPermissions <$> args
|
2022-03-17 23:53:56 +03:00
|
|
|
"create_remote_schema_remote_relationship" -> RMCreateRemoteSchemaRemoteRelationship <$> args
|
|
|
|
"update_remote_schema_remote_relationship" -> RMUpdateRemoteSchemaRemoteRelationship <$> args
|
|
|
|
"delete_remote_schema_remote_relationship" -> RMDeleteRemoteSchemaRemoteRelationship <$> args
|
2022-09-09 11:26:44 +03:00
|
|
|
"cleanup_event_trigger_logs" -> RMCleanupEventTriggerLog <$> args
|
2022-09-21 08:59:14 +03:00
|
|
|
"resume_event_trigger_cleanups" -> RMResumeEventTriggerCleanup <$> args
|
2022-09-13 11:33:44 +03:00
|
|
|
"pause_event_trigger_cleanups" -> RMPauseEventTriggerCleanup <$> args
|
2021-07-07 04:43:42 +03:00
|
|
|
"create_cron_trigger" -> RMCreateCronTrigger <$> args
|
|
|
|
"delete_cron_trigger" -> RMDeleteCronTrigger <$> args
|
|
|
|
"create_scheduled_event" -> RMCreateScheduledEvent <$> args
|
|
|
|
"delete_scheduled_event" -> RMDeleteScheduledEvent <$> args
|
|
|
|
"get_scheduled_events" -> RMGetScheduledEvents <$> args
|
2022-11-03 13:21:56 +03:00
|
|
|
"get_scheduled_event_invocations" -> RMGetScheduledEventInvocations <$> args
|
2022-01-27 09:43:39 +03:00
|
|
|
"get_cron_triggers" -> pure RMGetCronTriggers
|
2021-07-07 04:43:42 +03:00
|
|
|
"create_action" -> RMCreateAction <$> args
|
|
|
|
"drop_action" -> RMDropAction <$> args
|
|
|
|
"update_action" -> RMUpdateAction <$> args
|
|
|
|
"create_action_permission" -> RMCreateActionPermission <$> args
|
|
|
|
"drop_action_permission" -> RMDropActionPermission <$> args
|
|
|
|
"create_query_collection" -> RMCreateQueryCollection <$> args
|
2022-08-19 16:36:02 +03:00
|
|
|
"rename_query_collection" -> RMRenameQueryCollection <$> args
|
2021-07-07 04:43:42 +03:00
|
|
|
"drop_query_collection" -> RMDropQueryCollection <$> args
|
|
|
|
"add_query_to_collection" -> RMAddQueryToCollection <$> args
|
|
|
|
"drop_query_from_collection" -> RMDropQueryFromCollection <$> args
|
|
|
|
"add_collection_to_allowlist" -> RMAddCollectionToAllowlist <$> args
|
|
|
|
"drop_collection_from_allowlist" -> RMDropCollectionFromAllowlist <$> args
|
2022-02-08 19:53:30 +03:00
|
|
|
"update_scope_of_collection_in_allowlist" -> RMUpdateScopeOfCollectionInAllowlist <$> args
|
2021-07-07 04:43:42 +03:00
|
|
|
"create_rest_endpoint" -> RMCreateRestEndpoint <$> args
|
|
|
|
"drop_rest_endpoint" -> RMDropRestEndpoint <$> args
|
2022-08-18 01:13:32 +03:00
|
|
|
"dc_add_agent" -> RMDCAddAgent <$> args
|
|
|
|
"dc_delete_agent" -> RMDCDeleteAgent <$> args
|
|
|
|
"list_source_kinds" -> RMListSourceKinds <$> args
|
2022-09-02 20:50:09 +03:00
|
|
|
"get_source_kind_capabilities" -> RMGetSourceKindCapabilities <$> args
|
2022-08-30 02:51:34 +03:00
|
|
|
"get_source_tables" -> RMGetSourceTables <$> args
|
|
|
|
"get_table_info" -> RMGetTableInfo <$> args
|
2021-07-07 04:43:42 +03:00
|
|
|
"set_custom_types" -> RMSetCustomTypes <$> args
|
|
|
|
"set_api_limits" -> RMSetApiLimits <$> args
|
|
|
|
"remove_api_limits" -> pure RMRemoveApiLimits
|
|
|
|
"set_metrics_config" -> RMSetMetricsConfig <$> args
|
|
|
|
"remove_metrics_config" -> pure RMRemoveMetricsConfig
|
|
|
|
"add_inherited_role" -> RMAddInheritedRole <$> args
|
|
|
|
"drop_inherited_role" -> RMDropInheritedRole <$> args
|
|
|
|
"replace_metadata" -> RMReplaceMetadata <$> args
|
|
|
|
"export_metadata" -> RMExportMetadata <$> args
|
|
|
|
"clear_metadata" -> RMClearMetadata <$> args
|
|
|
|
"reload_metadata" -> RMReloadMetadata <$> args
|
|
|
|
"get_inconsistent_metadata" -> RMGetInconsistentMetadata <$> args
|
|
|
|
"drop_inconsistent_metadata" -> RMDropInconsistentMetadata <$> args
|
2021-08-24 10:36:32 +03:00
|
|
|
"add_host_to_tls_allowlist" -> RMAddHostToTLSAllowlist <$> args
|
|
|
|
"drop_host_from_tls_allowlist" -> RMDropHostFromTLSAllowlist <$> args
|
2021-07-07 04:43:42 +03:00
|
|
|
"dump_internal_state" -> RMDumpInternalState <$> args
|
|
|
|
"get_catalog_state" -> RMGetCatalogState <$> args
|
|
|
|
"set_catalog_state" -> RMSetCatalogState <$> args
|
|
|
|
"set_graphql_schema_introspection_options" -> RMSetGraphqlSchemaIntrospectionOptions <$> args
|
2021-09-29 11:13:30 +03:00
|
|
|
"test_webhook_transform" -> RMTestWebhookTransform <$> args
|
2021-09-23 15:37:56 +03:00
|
|
|
"set_query_tags" -> RMSetQueryTagsConfig <$> args
|
2022-11-23 05:49:29 +03:00
|
|
|
"set_opentelemetry_config" -> RMSetOpenTelemetryConfig <$> args
|
|
|
|
"set_opentelemetry_status" -> RMSetOpenTelemetryStatus <$> args
|
2023-01-23 16:35:48 +03:00
|
|
|
"get_feature_flag" -> RMGetFeatureFlag <$> args
|
2021-07-07 04:43:42 +03:00
|
|
|
"bulk" -> RMBulk <$> args
|
2022-08-24 00:46:10 +03:00
|
|
|
-- Backend prefixed metadata actions:
|
2021-07-07 04:43:42 +03:00
|
|
|
_ -> do
|
2022-08-24 00:46:10 +03:00
|
|
|
-- 1) Parse the backend source kind and metadata command:
|
2022-04-29 05:13:13 +03:00
|
|
|
(backendSourceKind, cmd) <- parseQueryType queryType
|
|
|
|
dispatchAnyBackend @BackendAPI backendSourceKind \(backendSourceKind' :: BackendSourceKind b) -> do
|
2022-08-24 00:46:10 +03:00
|
|
|
-- 2) Parse the args field:
|
2022-04-29 05:13:13 +03:00
|
|
|
argValue <- args
|
2022-08-24 00:46:10 +03:00
|
|
|
-- 2) Attempt to run all the backend specific command parsers against the source kind, cmd, and arg:
|
|
|
|
-- NOTE: If parsers succeed then this will pick out the first successful one.
|
2022-04-29 05:13:13 +03:00
|
|
|
command <- choice <$> sequenceA [p backendSourceKind' cmd argValue | p <- metadataV1CommandParsers @b]
|
|
|
|
onNothing command $
|
|
|
|
fail $
|
2022-10-20 15:45:31 +03:00
|
|
|
"unknown metadata command \""
|
|
|
|
<> T.unpack cmd
|
2022-04-29 05:13:13 +03:00
|
|
|
<> "\" for backend "
|
|
|
|
<> T.unpack (T.toTxt backendSourceKind')
|
2022-08-24 00:46:10 +03:00
|
|
|
|
|
|
|
-- | Parse the Metadata API action type returning a tuple of the
|
|
|
|
-- 'BackendSourceKind' and the action suffix.
|
|
|
|
--
|
|
|
|
-- For example: @"pg_add_source"@ parses as @(PostgresVanillaValue, "add_source")@
|
|
|
|
parseQueryType :: MonadFail m => Text -> m (AnyBackend BackendSourceKind, Text)
|
|
|
|
parseQueryType queryType =
|
|
|
|
let (prefix, T.drop 1 -> cmd) = T.breakOn "_" queryType
|
2022-10-20 15:45:31 +03:00
|
|
|
in (,cmd)
|
|
|
|
<$> backendSourceKindFromText prefix
|
2022-08-24 00:46:10 +03:00
|
|
|
`onNothing` fail
|
2022-10-20 15:45:31 +03:00
|
|
|
( "unknown metadata command \""
|
|
|
|
<> T.unpack queryType
|
2022-08-24 00:46:10 +03:00
|
|
|
<> "\"; \""
|
|
|
|
<> T.unpack prefix
|
|
|
|
<> "\" was not recognized as a valid backend name"
|
|
|
|
)
|
2021-07-07 04:43:42 +03:00
|
|
|
|
2021-02-16 11:08:19 +03:00
|
|
|
data RQLMetadataV2
|
|
|
|
= RMV2ReplaceMetadata !ReplaceMetadataV2
|
2021-07-07 04:43:42 +03:00
|
|
|
| RMV2ExportMetadata !ExportMetadata
|
Clean metadata arguments
## Description
Thanks to #1664, the Metadata API types no longer require a `ToJSON` instance. This PR follows up with a cleanup of the types of the arguments to the metadata API:
- whenever possible, it moves those argument types to where they're used (RQL.DDL.*)
- it removes all unrequired instances (mostly `ToJSON`)
This PR does not attempt to do it for _all_ such argument types. For some of the metadata operations, the type used to describe the argument to the API and used to represent the value in the metadata are one and the same (like for `CreateEndpoint`). Sometimes, the two types are intertwined in complex ways (`RemoteRelationship` and `RemoteRelationshipDef`). In the spirit of only doing uncontroversial cleaning work, this PR only moves types that are not used outside of RQL.DDL.
Furthermore, this is a small step towards separating the different types all jumbled together in RQL.Types.
## Notes
This PR also improves several `FromJSON` instances to make use of `withObject`, and to use a human readable string instead of a type name in error messages whenever possible. For instance:
- before: `expected Object for Object, but encountered X`
after: `expected Object for add computed field, but encountered X`
- before: `Expecting an object for update query`
after: `expected Object for update query, but encountered X`
This PR also renames `CreateFunctionPermission` to `FunctionPermissionArgument`, to remove the quite surprising `type DropFunctionPermission = CreateFunctionPermission`.
This PR also deletes some dead code, mostly in RQL.DML.
This PR also moves a PG-specific source resolving function from DDL.Schema.Source to the only place where it is used: App.hs.
https://github.com/hasura/graphql-engine-mono/pull/1844
GitOrigin-RevId: a594521194bb7fe6a111b02a9e099896f9fed59c
2021-07-27 13:41:42 +03:00
|
|
|
deriving (Generic)
|
2021-07-07 04:43:42 +03:00
|
|
|
|
|
|
|
instance FromJSON RQLMetadataV2 where
|
|
|
|
parseJSON =
|
|
|
|
genericParseJSON $
|
|
|
|
defaultOptions
|
|
|
|
{ constructorTagModifier = snakeCase . drop 4,
|
|
|
|
sumEncoding = TaggedObject "type" "args"
|
|
|
|
}
|
2021-02-16 11:08:19 +03:00
|
|
|
|
2021-02-19 05:39:30 +03:00
|
|
|
data RQLMetadataRequest
|
2021-02-16 11:08:19 +03:00
|
|
|
= RMV1 !RQLMetadataV1
|
|
|
|
| RMV2 !RQLMetadataV2
|
|
|
|
|
2021-02-19 05:39:30 +03:00
|
|
|
instance FromJSON RQLMetadataRequest where
|
|
|
|
parseJSON = withObject "RQLMetadataRequest" $ \o -> do
|
2021-02-16 11:08:19 +03:00
|
|
|
version <- o .:? "version" .!= VIVersion1
|
|
|
|
let val = Object o
|
|
|
|
case version of
|
|
|
|
VIVersion1 -> RMV1 <$> parseJSON val
|
|
|
|
VIVersion2 -> RMV2 <$> parseJSON val
|
|
|
|
|
2022-08-19 00:56:47 +03:00
|
|
|
-- | The payload for the @/v1/metadata@ endpoint. See:
|
|
|
|
--
|
|
|
|
-- https://hasura.io/docs/latest/graphql/core/api-reference/metadata-api/index/
|
2021-02-19 05:39:30 +03:00
|
|
|
data RQLMetadata = RQLMetadata
|
|
|
|
{ _rqlMetadataResourceVersion :: !(Maybe MetadataResourceVersion),
|
|
|
|
_rqlMetadata :: !RQLMetadataRequest
|
Clean metadata arguments
## Description
Thanks to #1664, the Metadata API types no longer require a `ToJSON` instance. This PR follows up with a cleanup of the types of the arguments to the metadata API:
- whenever possible, it moves those argument types to where they're used (RQL.DDL.*)
- it removes all unrequired instances (mostly `ToJSON`)
This PR does not attempt to do it for _all_ such argument types. For some of the metadata operations, the type used to describe the argument to the API and used to represent the value in the metadata are one and the same (like for `CreateEndpoint`). Sometimes, the two types are intertwined in complex ways (`RemoteRelationship` and `RemoteRelationshipDef`). In the spirit of only doing uncontroversial cleaning work, this PR only moves types that are not used outside of RQL.DDL.
Furthermore, this is a small step towards separating the different types all jumbled together in RQL.Types.
## Notes
This PR also improves several `FromJSON` instances to make use of `withObject`, and to use a human readable string instead of a type name in error messages whenever possible. For instance:
- before: `expected Object for Object, but encountered X`
after: `expected Object for add computed field, but encountered X`
- before: `Expecting an object for update query`
after: `expected Object for update query, but encountered X`
This PR also renames `CreateFunctionPermission` to `FunctionPermissionArgument`, to remove the quite surprising `type DropFunctionPermission = CreateFunctionPermission`.
This PR also deletes some dead code, mostly in RQL.DML.
This PR also moves a PG-specific source resolving function from DDL.Schema.Source to the only place where it is used: App.hs.
https://github.com/hasura/graphql-engine-mono/pull/1844
GitOrigin-RevId: a594521194bb7fe6a111b02a9e099896f9fed59c
2021-07-27 13:41:42 +03:00
|
|
|
}
|
2021-02-19 05:39:30 +03:00
|
|
|
|
|
|
|
instance FromJSON RQLMetadata where
|
|
|
|
parseJSON = withObject "RQLMetadata" $ \o -> do
|
|
|
|
_rqlMetadataResourceVersion <- o .:? "resource_version"
|
|
|
|
_rqlMetadata <- parseJSON $ Object o
|
|
|
|
pure RQLMetadata {..}
|
2021-09-24 01:56:37 +03:00
|
|
|
|
2021-01-07 12:04:22 +03:00
|
|
|
runMetadataQuery ::
|
2021-10-13 19:38:56 +03:00
|
|
|
( MonadIO m,
|
2023-02-03 04:03:23 +03:00
|
|
|
MonadError QErr m,
|
2021-01-07 12:04:22 +03:00
|
|
|
MonadBaseControl IO m,
|
|
|
|
Tracing.MonadTrace m,
|
2023-02-03 04:03:23 +03:00
|
|
|
MonadMetadataStorageQueryAPI m,
|
2022-09-09 11:26:44 +03:00
|
|
|
MonadResolveSource m,
|
harmonize network manager handling
## Description
### I want to speak to the `Manager`
Oh boy. This PR is both fairly straightforward and overreaching, so let's break it down.
For most network access, we need a [`HTTP.Manager`](https://hackage.haskell.org/package/http-client-0.1.0.0/docs/Network-HTTP-Client-Manager.html). It is created only once, at the top level, when starting the engine, and is then threaded through the application to wherever we need to make a network call. As of main, the way we do this is not standardized: most of the GraphQL execution code passes it "manually" as a function argument throughout the code. We also have a custom monad constraint, `HasHttpManagerM`, that describes a monad's ability to provide a manager. And, finally, several parts of the code store the manager in some kind of argument structure, such as `RunT`'s `RunCtx`.
This PR's first goal is to harmonize all of this: we always create the manager at the root, and we already have it when we do our very first `runReaderT`. Wouldn't it make sense for the rest of the code to not manually pass it anywhere, to not store it anywhere, but to always rely on the current monad providing it? This is, in short, what this PR does: it implements a constraint on the base monads, so that they provide the manager, and removes most explicit passing from the code.
### First come, first served
One way this PR goes a tiny bit further than "just" doing the aforementioned harmonization is that it starts the process of implementing the "Services oriented architecture" roughly outlined in this [draft document](https://docs.google.com/document/d/1FAigqrST0juU1WcT4HIxJxe1iEBwTuBZodTaeUvsKqQ/edit?usp=sharing). Instead of using the existing `HasHTTPManagerM`, this PR revamps it into the `ProvidesNetwork` service.
The idea is, again, that we should make all "external" dependencies of the engine, all things that the core of the engine doesn't care about, a "service". This allows us to define clear APIs for features, to choose different implementations based on which version of the engine we're running, harmonizes our many scattered monadic constraints... Which is why this service is called "Network": we can refine it, moving forward, to be the constraint that defines how all network communication is to operate, instead of relying on disparate classes constraint or hardcoded decisions. A comment in the code clarifies this intent.
### Side-effects? In my Haskell?
This PR also unavoidably touches some other aspects of the codebase. One such example: it introduces `Hasura.App.AppContext`, named after `HasuraPro.Context.AppContext`: a name for the reader structure at the base level. It also transforms `Handler` from a type alias to a newtype, as `Handler` is where we actually enforce HTTP limits; but without `Handler` being a distinct type, any code path could simply do a `runExceptT $ runReader` and forget to enforce them.
(As a rule of thumb, i am starting to consider any straggling `runReaderT` or `runExceptT` as a code smell: we should not stack / unstack monads haphazardly, and every layer should be an opaque `newtype` with a corresponding run function.)
## Further work
In several places, i have left TODOs when i have encountered things that suggest that we should do further unrelated cleanups. I'll write down the follow-up steps, either in the aforementioned document or on slack. But, in short, at a glance, in approximate order, we could:
- delete `ExecutionCtx` as it is only a subset of `ServerCtx`, and remove one more `runReaderT` call
- delete `ServerConfigCtx` as it is only a subset of `ServerCtx`, and remove it from `RunCtx`
- remove `ServerCtx` from `HandlerCtx`, and make it part of `AppContext`, or even make it the `AppContext` altogether (since, at least for the OSS version, `AppContext` is there again only a subset)
- remove `CacheBuildParams` and `CacheBuild` altogether, as they're just a distinct stack that is a `ReaderT` on top of `IO` that contains, you guessed it, the same thing as `ServerCtx`
- move `RunT` out of `RQL.Types` and rename it, since after the previous cleanups **it only contains `UserInfo`**; it could be bundled with the authentication service, made a small implementation detail in `Hasura.Server.Auth`
- rename `PGMetadaStorageT` to something a bit more accurate, such as `App`, and enforce its IO base
This would significantly simply our complex stack. From there, or in parallel, we can start moving existing dependencies as Services. For the purpose of supporting read replicas entitlement, we could move `MonadResolveSource` to a `SourceResolver` service, as attempted in #7653, and transform `UserAuthenticationM` into a `Authentication` service.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7736
GitOrigin-RevId: 68cce710eb9e7d752bda1ba0c49541d24df8209f
2023-02-22 18:53:52 +03:00
|
|
|
MonadEventLogCleanup m,
|
2023-03-13 14:44:18 +03:00
|
|
|
ProvidesHasuraServices m,
|
|
|
|
MonadGetApiTimeLimit m
|
2021-01-07 12:04:22 +03:00
|
|
|
) =>
|
|
|
|
Env.Environment ->
|
2021-07-27 18:14:12 +03:00
|
|
|
L.Logger L.Hasura ->
|
2021-01-07 12:04:22 +03:00
|
|
|
InstanceId ->
|
|
|
|
UserInfo ->
|
2021-01-29 08:48:17 +03:00
|
|
|
ServerConfigCtx ->
|
2023-02-15 09:11:05 +03:00
|
|
|
SchemaCacheRef ->
|
2021-01-07 12:04:22 +03:00
|
|
|
RQLMetadata ->
|
|
|
|
m (EncJSON, RebuildableSchemaCache)
|
harmonize network manager handling
## Description
### I want to speak to the `Manager`
Oh boy. This PR is both fairly straightforward and overreaching, so let's break it down.
For most network access, we need a [`HTTP.Manager`](https://hackage.haskell.org/package/http-client-0.1.0.0/docs/Network-HTTP-Client-Manager.html). It is created only once, at the top level, when starting the engine, and is then threaded through the application to wherever we need to make a network call. As of main, the way we do this is not standardized: most of the GraphQL execution code passes it "manually" as a function argument throughout the code. We also have a custom monad constraint, `HasHttpManagerM`, that describes a monad's ability to provide a manager. And, finally, several parts of the code store the manager in some kind of argument structure, such as `RunT`'s `RunCtx`.
This PR's first goal is to harmonize all of this: we always create the manager at the root, and we already have it when we do our very first `runReaderT`. Wouldn't it make sense for the rest of the code to not manually pass it anywhere, to not store it anywhere, but to always rely on the current monad providing it? This is, in short, what this PR does: it implements a constraint on the base monads, so that they provide the manager, and removes most explicit passing from the code.
### First come, first served
One way this PR goes a tiny bit further than "just" doing the aforementioned harmonization is that it starts the process of implementing the "Services oriented architecture" roughly outlined in this [draft document](https://docs.google.com/document/d/1FAigqrST0juU1WcT4HIxJxe1iEBwTuBZodTaeUvsKqQ/edit?usp=sharing). Instead of using the existing `HasHTTPManagerM`, this PR revamps it into the `ProvidesNetwork` service.
The idea is, again, that we should make all "external" dependencies of the engine, all things that the core of the engine doesn't care about, a "service". This allows us to define clear APIs for features, to choose different implementations based on which version of the engine we're running, harmonizes our many scattered monadic constraints... Which is why this service is called "Network": we can refine it, moving forward, to be the constraint that defines how all network communication is to operate, instead of relying on disparate classes constraint or hardcoded decisions. A comment in the code clarifies this intent.
### Side-effects? In my Haskell?
This PR also unavoidably touches some other aspects of the codebase. One such example: it introduces `Hasura.App.AppContext`, named after `HasuraPro.Context.AppContext`: a name for the reader structure at the base level. It also transforms `Handler` from a type alias to a newtype, as `Handler` is where we actually enforce HTTP limits; but without `Handler` being a distinct type, any code path could simply do a `runExceptT $ runReader` and forget to enforce them.
(As a rule of thumb, i am starting to consider any straggling `runReaderT` or `runExceptT` as a code smell: we should not stack / unstack monads haphazardly, and every layer should be an opaque `newtype` with a corresponding run function.)
## Further work
In several places, i have left TODOs when i have encountered things that suggest that we should do further unrelated cleanups. I'll write down the follow-up steps, either in the aforementioned document or on slack. But, in short, at a glance, in approximate order, we could:
- delete `ExecutionCtx` as it is only a subset of `ServerCtx`, and remove one more `runReaderT` call
- delete `ServerConfigCtx` as it is only a subset of `ServerCtx`, and remove it from `RunCtx`
- remove `ServerCtx` from `HandlerCtx`, and make it part of `AppContext`, or even make it the `AppContext` altogether (since, at least for the OSS version, `AppContext` is there again only a subset)
- remove `CacheBuildParams` and `CacheBuild` altogether, as they're just a distinct stack that is a `ReaderT` on top of `IO` that contains, you guessed it, the same thing as `ServerCtx`
- move `RunT` out of `RQL.Types` and rename it, since after the previous cleanups **it only contains `UserInfo`**; it could be bundled with the authentication service, made a small implementation detail in `Hasura.Server.Auth`
- rename `PGMetadaStorageT` to something a bit more accurate, such as `App`, and enforce its IO base
This would significantly simply our complex stack. From there, or in parallel, we can start moving existing dependencies as Services. For the purpose of supporting read replicas entitlement, we could move `MonadResolveSource` to a `SourceResolver` service, as attempted in #7653, and transform `UserAuthenticationM` into a `Authentication` service.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7736
GitOrigin-RevId: 68cce710eb9e7d752bda1ba0c49541d24df8209f
2023-02-22 18:53:52 +03:00
|
|
|
runMetadataQuery env logger instanceId userInfo serverConfigCtx schemaCacheRef RQLMetadata {..} = do
|
2023-02-15 09:11:05 +03:00
|
|
|
schemaCache <- liftIO $ fst <$> readSchemaCacheRef schemaCacheRef
|
Rewrite `Tracing` to allow for only one `TraceT` in the entire stack.
This PR is on top of #7789.
### Description
This PR entirely rewrites the API of the Tracing library, to make `interpTraceT` a thing of the past. Before this change, we ran traces by sticking a `TraceT` on top of whatever we were doing. This had several major drawbacks:
- we were carrying a bunch of `TraceT` across the codebase, and the entire codebase had to know about it
- we needed to carry a second class constraint around (`HasReporterM`) to be able to run all of those traces
- we kept having to do stack rewriting with `interpTraceT`, which went from inconvenient to horrible
- we had to declare several behavioral instances on `TraceT m`
This PR rewrite all of `Tracing` using a more conventional model: there is ONE `TraceT` at the bottom of the stack, and there is an associated class constraint `MonadTrace`: any part of the code that happens to satisfy `MonadTrace` is able to create new traces. We NEVER have to do stack rewriting, `interpTraceT` is gone, and `TraceT` and `Reporter` become implementation details that 99% of the code is blissfully unaware of: code that needs to do tracing only needs to declare that the monad in which it operates implements `MonadTrace`.
In doing so, this PR revealed **several bugs in the codebase**: places where we were expecting to trace something, but due to the default instance of `HasReporterM IO` we would actually not do anything. This PR also splits the code of `Tracing` in more byte-sized modules, with the goal of potentially moving to `server/lib` down the line.
### Remaining work
This PR is a draft; what's left to do is:
- [x] make Pro compile; i haven't updated `HasuraPro/Main` yet
- [x] document Tracing by writing a note that explains how to use the library, and the meaning of "reporter", "trace" and "span", as well as the pitfalls
- [x] discuss some of the trade-offs in the implementation, which is why i'm opening this PR already despite it not fully building yet
- [x] it depends on #7789 being merged first
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7791
GitOrigin-RevId: cadd32d039134c93ddbf364599a2f4dd988adea8
2023-03-13 20:37:16 +03:00
|
|
|
(metadata, currentResourceVersion) <- Tracing.newSpan "fetchMetadata" $ liftEitherM fetchMetadata
|
2022-10-20 15:45:31 +03:00
|
|
|
let exportsMetadata = \case
|
|
|
|
RMV1 (RMExportMetadata _) -> True
|
|
|
|
RMV2 (RMV2ExportMetadata _) -> True
|
|
|
|
_ -> False
|
|
|
|
metadataDefaults =
|
2022-12-07 01:33:54 +03:00
|
|
|
-- Note: The following check is performed to determine if the metadata defaults can
|
|
|
|
-- be safely merged into the reader at this point.
|
|
|
|
--
|
|
|
|
-- We want to prevent scenarios:
|
|
|
|
-- \* Exporting defaults - Contradicting the "roundtrip" principle of metadata operations
|
|
|
|
-- \* Serializing defaults into the metadata storage - Putting data into the users hdb_catalog
|
|
|
|
--
|
|
|
|
-- While this check does have the desired effect it relies on the fact that the only
|
|
|
|
-- operations that need access to the defaults here do not export or modify metadata.
|
|
|
|
-- If at some point in future an operation needs access to the defaults and also needs to
|
|
|
|
-- export/modify metadata, then another approach will need to be taken.
|
|
|
|
--
|
|
|
|
-- Luckily, most actual need for defaults access exists within the schema cache build phase,
|
|
|
|
-- so metadata operations don't need "smarts" that require defaults access.
|
|
|
|
--
|
|
|
|
if (exportsMetadata _rqlMetadata || queryModifiesMetadata _rqlMetadata)
|
2022-10-20 15:45:31 +03:00
|
|
|
then emptyMetadataDefaults
|
|
|
|
else _sccMetadataDefaults serverConfigCtx
|
2021-01-07 12:04:22 +03:00
|
|
|
((r, modMetadata), modSchemaCache, cacheInvalidations) <-
|
2021-02-19 05:39:30 +03:00
|
|
|
runMetadataQueryM env currentResourceVersion _rqlMetadata
|
2021-07-27 18:14:12 +03:00
|
|
|
& flip runReaderT logger
|
2022-10-20 15:45:31 +03:00
|
|
|
& runMetadataT metadata metadataDefaults
|
2021-01-07 12:04:22 +03:00
|
|
|
& runCacheRWT schemaCache
|
harmonize network manager handling
## Description
### I want to speak to the `Manager`
Oh boy. This PR is both fairly straightforward and overreaching, so let's break it down.
For most network access, we need a [`HTTP.Manager`](https://hackage.haskell.org/package/http-client-0.1.0.0/docs/Network-HTTP-Client-Manager.html). It is created only once, at the top level, when starting the engine, and is then threaded through the application to wherever we need to make a network call. As of main, the way we do this is not standardized: most of the GraphQL execution code passes it "manually" as a function argument throughout the code. We also have a custom monad constraint, `HasHttpManagerM`, that describes a monad's ability to provide a manager. And, finally, several parts of the code store the manager in some kind of argument structure, such as `RunT`'s `RunCtx`.
This PR's first goal is to harmonize all of this: we always create the manager at the root, and we already have it when we do our very first `runReaderT`. Wouldn't it make sense for the rest of the code to not manually pass it anywhere, to not store it anywhere, but to always rely on the current monad providing it? This is, in short, what this PR does: it implements a constraint on the base monads, so that they provide the manager, and removes most explicit passing from the code.
### First come, first served
One way this PR goes a tiny bit further than "just" doing the aforementioned harmonization is that it starts the process of implementing the "Services oriented architecture" roughly outlined in this [draft document](https://docs.google.com/document/d/1FAigqrST0juU1WcT4HIxJxe1iEBwTuBZodTaeUvsKqQ/edit?usp=sharing). Instead of using the existing `HasHTTPManagerM`, this PR revamps it into the `ProvidesNetwork` service.
The idea is, again, that we should make all "external" dependencies of the engine, all things that the core of the engine doesn't care about, a "service". This allows us to define clear APIs for features, to choose different implementations based on which version of the engine we're running, harmonizes our many scattered monadic constraints... Which is why this service is called "Network": we can refine it, moving forward, to be the constraint that defines how all network communication is to operate, instead of relying on disparate classes constraint or hardcoded decisions. A comment in the code clarifies this intent.
### Side-effects? In my Haskell?
This PR also unavoidably touches some other aspects of the codebase. One such example: it introduces `Hasura.App.AppContext`, named after `HasuraPro.Context.AppContext`: a name for the reader structure at the base level. It also transforms `Handler` from a type alias to a newtype, as `Handler` is where we actually enforce HTTP limits; but without `Handler` being a distinct type, any code path could simply do a `runExceptT $ runReader` and forget to enforce them.
(As a rule of thumb, i am starting to consider any straggling `runReaderT` or `runExceptT` as a code smell: we should not stack / unstack monads haphazardly, and every layer should be an opaque `newtype` with a corresponding run function.)
## Further work
In several places, i have left TODOs when i have encountered things that suggest that we should do further unrelated cleanups. I'll write down the follow-up steps, either in the aforementioned document or on slack. But, in short, at a glance, in approximate order, we could:
- delete `ExecutionCtx` as it is only a subset of `ServerCtx`, and remove one more `runReaderT` call
- delete `ServerConfigCtx` as it is only a subset of `ServerCtx`, and remove it from `RunCtx`
- remove `ServerCtx` from `HandlerCtx`, and make it part of `AppContext`, or even make it the `AppContext` altogether (since, at least for the OSS version, `AppContext` is there again only a subset)
- remove `CacheBuildParams` and `CacheBuild` altogether, as they're just a distinct stack that is a `ReaderT` on top of `IO` that contains, you guessed it, the same thing as `ServerCtx`
- move `RunT` out of `RQL.Types` and rename it, since after the previous cleanups **it only contains `UserInfo`**; it could be bundled with the authentication service, made a small implementation detail in `Hasura.Server.Auth`
- rename `PGMetadaStorageT` to something a bit more accurate, such as `App`, and enforce its IO base
This would significantly simply our complex stack. From there, or in parallel, we can start moving existing dependencies as Services. For the purpose of supporting read replicas entitlement, we could move `MonadResolveSource` to a `SourceResolver` service, as attempted in #7653, and transform `UserAuthenticationM` into a `Authentication` service.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7736
GitOrigin-RevId: 68cce710eb9e7d752bda1ba0c49541d24df8209f
2023-02-22 18:53:52 +03:00
|
|
|
& peelRun (RunCtx userInfo serverConfigCtx)
|
2021-01-07 12:04:22 +03:00
|
|
|
-- set modified metadata in storage
|
2021-05-21 05:46:58 +03:00
|
|
|
if queryModifiesMetadata _rqlMetadata
|
2021-12-08 09:26:46 +03:00
|
|
|
then case (_sccMaintenanceMode serverConfigCtx, _sccReadOnlyMode serverConfigCtx) of
|
|
|
|
(MaintenanceModeDisabled, ReadOnlyModeDisabled) -> do
|
2021-04-06 06:25:02 +03:00
|
|
|
-- set modified metadata in storage
|
2022-11-30 07:02:02 +03:00
|
|
|
L.unLogger logger $
|
|
|
|
SchemaSyncLog L.LevelInfo TTMetadataApi $
|
|
|
|
String $
|
|
|
|
"Attempting to put new metadata in storage"
|
2022-08-19 00:56:47 +03:00
|
|
|
newResourceVersion <-
|
Rewrite `Tracing` to allow for only one `TraceT` in the entire stack.
This PR is on top of #7789.
### Description
This PR entirely rewrites the API of the Tracing library, to make `interpTraceT` a thing of the past. Before this change, we ran traces by sticking a `TraceT` on top of whatever we were doing. This had several major drawbacks:
- we were carrying a bunch of `TraceT` across the codebase, and the entire codebase had to know about it
- we needed to carry a second class constraint around (`HasReporterM`) to be able to run all of those traces
- we kept having to do stack rewriting with `interpTraceT`, which went from inconvenient to horrible
- we had to declare several behavioral instances on `TraceT m`
This PR rewrite all of `Tracing` using a more conventional model: there is ONE `TraceT` at the bottom of the stack, and there is an associated class constraint `MonadTrace`: any part of the code that happens to satisfy `MonadTrace` is able to create new traces. We NEVER have to do stack rewriting, `interpTraceT` is gone, and `TraceT` and `Reporter` become implementation details that 99% of the code is blissfully unaware of: code that needs to do tracing only needs to declare that the monad in which it operates implements `MonadTrace`.
In doing so, this PR revealed **several bugs in the codebase**: places where we were expecting to trace something, but due to the default instance of `HasReporterM IO` we would actually not do anything. This PR also splits the code of `Tracing` in more byte-sized modules, with the goal of potentially moving to `server/lib` down the line.
### Remaining work
This PR is a draft; what's left to do is:
- [x] make Pro compile; i haven't updated `HasuraPro/Main` yet
- [x] document Tracing by writing a note that explains how to use the library, and the meaning of "reporter", "trace" and "span", as well as the pitfalls
- [x] discuss some of the trade-offs in the implementation, which is why i'm opening this PR already despite it not fully building yet
- [x] it depends on #7789 being merged first
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7791
GitOrigin-RevId: cadd32d039134c93ddbf364599a2f4dd988adea8
2023-03-13 20:37:16 +03:00
|
|
|
Tracing.newSpan "setMetadata" $
|
2023-02-03 04:03:23 +03:00
|
|
|
liftEitherM $
|
|
|
|
setMetadata (fromMaybe currentResourceVersion _rqlMetadataResourceVersion) modMetadata
|
2022-11-03 02:22:58 +03:00
|
|
|
L.unLogger logger $
|
2022-11-06 01:37:04 +03:00
|
|
|
SchemaSyncLog L.LevelInfo TTMetadataApi $
|
|
|
|
String $
|
2022-11-03 02:22:58 +03:00
|
|
|
"Put new metadata in storage, received new resource version " <> tshow newResourceVersion
|
2022-03-17 23:53:56 +03:00
|
|
|
|
2021-04-06 06:25:02 +03:00
|
|
|
-- notify schema cache sync
|
Rewrite `Tracing` to allow for only one `TraceT` in the entire stack.
This PR is on top of #7789.
### Description
This PR entirely rewrites the API of the Tracing library, to make `interpTraceT` a thing of the past. Before this change, we ran traces by sticking a `TraceT` on top of whatever we were doing. This had several major drawbacks:
- we were carrying a bunch of `TraceT` across the codebase, and the entire codebase had to know about it
- we needed to carry a second class constraint around (`HasReporterM`) to be able to run all of those traces
- we kept having to do stack rewriting with `interpTraceT`, which went from inconvenient to horrible
- we had to declare several behavioral instances on `TraceT m`
This PR rewrite all of `Tracing` using a more conventional model: there is ONE `TraceT` at the bottom of the stack, and there is an associated class constraint `MonadTrace`: any part of the code that happens to satisfy `MonadTrace` is able to create new traces. We NEVER have to do stack rewriting, `interpTraceT` is gone, and `TraceT` and `Reporter` become implementation details that 99% of the code is blissfully unaware of: code that needs to do tracing only needs to declare that the monad in which it operates implements `MonadTrace`.
In doing so, this PR revealed **several bugs in the codebase**: places where we were expecting to trace something, but due to the default instance of `HasReporterM IO` we would actually not do anything. This PR also splits the code of `Tracing` in more byte-sized modules, with the goal of potentially moving to `server/lib` down the line.
### Remaining work
This PR is a draft; what's left to do is:
- [x] make Pro compile; i haven't updated `HasuraPro/Main` yet
- [x] document Tracing by writing a note that explains how to use the library, and the meaning of "reporter", "trace" and "span", as well as the pitfalls
- [x] discuss some of the trade-offs in the implementation, which is why i'm opening this PR already despite it not fully building yet
- [x] it depends on #7789 being merged first
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7791
GitOrigin-RevId: cadd32d039134c93ddbf364599a2f4dd988adea8
2023-03-13 20:37:16 +03:00
|
|
|
Tracing.newSpan "notifySchemaCacheSync" $
|
2023-02-03 04:03:23 +03:00
|
|
|
liftEitherM $
|
|
|
|
notifySchemaCacheSync newResourceVersion instanceId cacheInvalidations
|
2022-11-03 02:22:58 +03:00
|
|
|
L.unLogger logger $
|
2022-11-06 01:37:04 +03:00
|
|
|
SchemaSyncLog L.LevelInfo TTMetadataApi $
|
|
|
|
String $
|
2022-11-03 02:22:58 +03:00
|
|
|
"Sent schema cache sync notification at resource version " <> tshow newResourceVersion
|
|
|
|
|
2021-04-06 06:25:02 +03:00
|
|
|
(_, modSchemaCache', _) <-
|
Rewrite `Tracing` to allow for only one `TraceT` in the entire stack.
This PR is on top of #7789.
### Description
This PR entirely rewrites the API of the Tracing library, to make `interpTraceT` a thing of the past. Before this change, we ran traces by sticking a `TraceT` on top of whatever we were doing. This had several major drawbacks:
- we were carrying a bunch of `TraceT` across the codebase, and the entire codebase had to know about it
- we needed to carry a second class constraint around (`HasReporterM`) to be able to run all of those traces
- we kept having to do stack rewriting with `interpTraceT`, which went from inconvenient to horrible
- we had to declare several behavioral instances on `TraceT m`
This PR rewrite all of `Tracing` using a more conventional model: there is ONE `TraceT` at the bottom of the stack, and there is an associated class constraint `MonadTrace`: any part of the code that happens to satisfy `MonadTrace` is able to create new traces. We NEVER have to do stack rewriting, `interpTraceT` is gone, and `TraceT` and `Reporter` become implementation details that 99% of the code is blissfully unaware of: code that needs to do tracing only needs to declare that the monad in which it operates implements `MonadTrace`.
In doing so, this PR revealed **several bugs in the codebase**: places where we were expecting to trace something, but due to the default instance of `HasReporterM IO` we would actually not do anything. This PR also splits the code of `Tracing` in more byte-sized modules, with the goal of potentially moving to `server/lib` down the line.
### Remaining work
This PR is a draft; what's left to do is:
- [x] make Pro compile; i haven't updated `HasuraPro/Main` yet
- [x] document Tracing by writing a note that explains how to use the library, and the meaning of "reporter", "trace" and "span", as well as the pitfalls
- [x] discuss some of the trade-offs in the implementation, which is why i'm opening this PR already despite it not fully building yet
- [x] it depends on #7789 being merged first
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7791
GitOrigin-RevId: cadd32d039134c93ddbf364599a2f4dd988adea8
2023-03-13 20:37:16 +03:00
|
|
|
Tracing.newSpan "setMetadataResourceVersionInSchemaCache" $
|
2022-08-19 00:56:47 +03:00
|
|
|
setMetadataResourceVersionInSchemaCache newResourceVersion
|
|
|
|
& runCacheRWT modSchemaCache
|
harmonize network manager handling
## Description
### I want to speak to the `Manager`
Oh boy. This PR is both fairly straightforward and overreaching, so let's break it down.
For most network access, we need a [`HTTP.Manager`](https://hackage.haskell.org/package/http-client-0.1.0.0/docs/Network-HTTP-Client-Manager.html). It is created only once, at the top level, when starting the engine, and is then threaded through the application to wherever we need to make a network call. As of main, the way we do this is not standardized: most of the GraphQL execution code passes it "manually" as a function argument throughout the code. We also have a custom monad constraint, `HasHttpManagerM`, that describes a monad's ability to provide a manager. And, finally, several parts of the code store the manager in some kind of argument structure, such as `RunT`'s `RunCtx`.
This PR's first goal is to harmonize all of this: we always create the manager at the root, and we already have it when we do our very first `runReaderT`. Wouldn't it make sense for the rest of the code to not manually pass it anywhere, to not store it anywhere, but to always rely on the current monad providing it? This is, in short, what this PR does: it implements a constraint on the base monads, so that they provide the manager, and removes most explicit passing from the code.
### First come, first served
One way this PR goes a tiny bit further than "just" doing the aforementioned harmonization is that it starts the process of implementing the "Services oriented architecture" roughly outlined in this [draft document](https://docs.google.com/document/d/1FAigqrST0juU1WcT4HIxJxe1iEBwTuBZodTaeUvsKqQ/edit?usp=sharing). Instead of using the existing `HasHTTPManagerM`, this PR revamps it into the `ProvidesNetwork` service.
The idea is, again, that we should make all "external" dependencies of the engine, all things that the core of the engine doesn't care about, a "service". This allows us to define clear APIs for features, to choose different implementations based on which version of the engine we're running, harmonizes our many scattered monadic constraints... Which is why this service is called "Network": we can refine it, moving forward, to be the constraint that defines how all network communication is to operate, instead of relying on disparate classes constraint or hardcoded decisions. A comment in the code clarifies this intent.
### Side-effects? In my Haskell?
This PR also unavoidably touches some other aspects of the codebase. One such example: it introduces `Hasura.App.AppContext`, named after `HasuraPro.Context.AppContext`: a name for the reader structure at the base level. It also transforms `Handler` from a type alias to a newtype, as `Handler` is where we actually enforce HTTP limits; but without `Handler` being a distinct type, any code path could simply do a `runExceptT $ runReader` and forget to enforce them.
(As a rule of thumb, i am starting to consider any straggling `runReaderT` or `runExceptT` as a code smell: we should not stack / unstack monads haphazardly, and every layer should be an opaque `newtype` with a corresponding run function.)
## Further work
In several places, i have left TODOs when i have encountered things that suggest that we should do further unrelated cleanups. I'll write down the follow-up steps, either in the aforementioned document or on slack. But, in short, at a glance, in approximate order, we could:
- delete `ExecutionCtx` as it is only a subset of `ServerCtx`, and remove one more `runReaderT` call
- delete `ServerConfigCtx` as it is only a subset of `ServerCtx`, and remove it from `RunCtx`
- remove `ServerCtx` from `HandlerCtx`, and make it part of `AppContext`, or even make it the `AppContext` altogether (since, at least for the OSS version, `AppContext` is there again only a subset)
- remove `CacheBuildParams` and `CacheBuild` altogether, as they're just a distinct stack that is a `ReaderT` on top of `IO` that contains, you guessed it, the same thing as `ServerCtx`
- move `RunT` out of `RQL.Types` and rename it, since after the previous cleanups **it only contains `UserInfo`**; it could be bundled with the authentication service, made a small implementation detail in `Hasura.Server.Auth`
- rename `PGMetadaStorageT` to something a bit more accurate, such as `App`, and enforce its IO base
This would significantly simply our complex stack. From there, or in parallel, we can start moving existing dependencies as Services. For the purpose of supporting read replicas entitlement, we could move `MonadResolveSource` to a `SourceResolver` service, as attempted in #7653, and transform `UserAuthenticationM` into a `Authentication` service.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7736
GitOrigin-RevId: 68cce710eb9e7d752bda1ba0c49541d24df8209f
2023-02-22 18:53:52 +03:00
|
|
|
& peelRun (RunCtx userInfo serverConfigCtx)
|
2021-08-24 10:36:32 +03:00
|
|
|
|
2021-04-06 06:25:02 +03:00
|
|
|
pure (r, modSchemaCache')
|
2022-04-28 23:55:13 +03:00
|
|
|
(MaintenanceModeEnabled (), ReadOnlyModeDisabled) ->
|
2021-12-08 09:26:46 +03:00
|
|
|
throw500 "metadata cannot be modified in maintenance mode"
|
|
|
|
(MaintenanceModeDisabled, ReadOnlyModeEnabled) ->
|
|
|
|
throw400 NotSupported "metadata cannot be modified in read-only mode"
|
2022-04-28 23:55:13 +03:00
|
|
|
(MaintenanceModeEnabled (), ReadOnlyModeEnabled) ->
|
2021-04-06 06:25:02 +03:00
|
|
|
throw500 "metadata cannot be modified in maintenance mode"
|
|
|
|
else pure (r, modSchemaCache)
|
2021-01-07 12:04:22 +03:00
|
|
|
|
2021-02-19 05:39:30 +03:00
|
|
|
queryModifiesMetadata :: RQLMetadataRequest -> Bool
|
2021-02-18 19:46:14 +03:00
|
|
|
queryModifiesMetadata = \case
|
|
|
|
RMV1 q ->
|
|
|
|
case q of
|
2021-09-06 14:15:36 +03:00
|
|
|
RMRedeliverEvent _ -> False
|
|
|
|
RMInvokeEventTrigger _ -> False
|
2021-04-27 07:22:32 +03:00
|
|
|
RMGetInconsistentMetadata _ -> False
|
|
|
|
RMIntrospectRemoteSchema _ -> False
|
|
|
|
RMDumpInternalState _ -> False
|
|
|
|
RMSetCatalogState _ -> False
|
|
|
|
RMGetCatalogState _ -> False
|
|
|
|
RMExportMetadata _ -> False
|
2022-11-03 13:21:56 +03:00
|
|
|
RMGetScheduledEventInvocations _ -> False
|
2022-01-27 09:43:39 +03:00
|
|
|
RMGetCronTriggers -> False
|
2021-04-27 07:22:32 +03:00
|
|
|
RMGetScheduledEvents _ -> False
|
|
|
|
RMCreateScheduledEvent _ -> False
|
|
|
|
RMDeleteScheduledEvent _ -> False
|
2021-10-05 02:49:51 +03:00
|
|
|
RMTestWebhookTransform _ -> False
|
2022-09-15 10:34:03 +03:00
|
|
|
RMGetSourceKindCapabilities _ -> False
|
|
|
|
RMListSourceKinds _ -> False
|
|
|
|
RMGetSourceTables _ -> False
|
|
|
|
RMGetTableInfo _ -> False
|
2023-01-25 10:12:53 +03:00
|
|
|
RMTestConnectionTemplate _ -> False
|
2023-01-09 10:25:32 +03:00
|
|
|
RMSuggestRelationships _ -> False
|
2023-02-21 16:45:12 +03:00
|
|
|
RMGetLogicalModel _ -> False
|
|
|
|
RMTrackLogicalModel _ -> True
|
|
|
|
RMUntrackLogicalModel _ -> True
|
2023-03-03 18:27:51 +03:00
|
|
|
RMCreateSelectLogicalModelPermission _ -> True
|
2023-03-03 20:58:22 +03:00
|
|
|
RMDropSelectLogicalModelPermission _ -> True
|
2021-04-27 07:22:32 +03:00
|
|
|
RMBulk qs -> any queryModifiesMetadata qs
|
2022-09-15 10:34:03 +03:00
|
|
|
-- We used to assume that the fallthrough was True,
|
|
|
|
-- but it is better to be explicit here to warn when new constructors are added.
|
|
|
|
RMAddSource _ -> True
|
|
|
|
RMDropSource _ -> True
|
|
|
|
RMRenameSource _ -> True
|
|
|
|
RMUpdateSource _ -> True
|
|
|
|
RMTrackTable _ -> True
|
|
|
|
RMUntrackTable _ -> True
|
|
|
|
RMSetTableCustomization _ -> True
|
|
|
|
RMSetApolloFederationConfig _ -> True
|
|
|
|
RMPgSetTableIsEnum _ -> True
|
|
|
|
RMCreateInsertPermission _ -> True
|
|
|
|
RMCreateSelectPermission _ -> True
|
|
|
|
RMCreateUpdatePermission _ -> True
|
|
|
|
RMCreateDeletePermission _ -> True
|
|
|
|
RMDropInsertPermission _ -> True
|
|
|
|
RMDropSelectPermission _ -> True
|
|
|
|
RMDropUpdatePermission _ -> True
|
|
|
|
RMDropDeletePermission _ -> True
|
|
|
|
RMSetPermissionComment _ -> True
|
|
|
|
RMCreateObjectRelationship _ -> True
|
|
|
|
RMCreateArrayRelationship _ -> True
|
|
|
|
RMDropRelationship _ -> True
|
|
|
|
RMSetRelationshipComment _ -> True
|
|
|
|
RMRenameRelationship _ -> True
|
|
|
|
RMCreateRemoteRelationship _ -> True
|
|
|
|
RMUpdateRemoteRelationship _ -> True
|
|
|
|
RMDeleteRemoteRelationship _ -> True
|
|
|
|
RMTrackFunction _ -> True
|
|
|
|
RMUntrackFunction _ -> True
|
|
|
|
RMSetFunctionCustomization _ -> True
|
|
|
|
RMCreateFunctionPermission _ -> True
|
|
|
|
RMDropFunctionPermission _ -> True
|
|
|
|
RMAddComputedField _ -> True
|
|
|
|
RMDropComputedField _ -> True
|
|
|
|
RMCreateEventTrigger _ -> True
|
|
|
|
RMDeleteEventTrigger _ -> True
|
|
|
|
RMCleanupEventTriggerLog _ -> True
|
2022-09-21 08:59:14 +03:00
|
|
|
RMResumeEventTriggerCleanup _ -> True
|
2022-09-15 10:34:03 +03:00
|
|
|
RMPauseEventTriggerCleanup _ -> True
|
|
|
|
RMAddRemoteSchema _ -> True
|
|
|
|
RMUpdateRemoteSchema _ -> True
|
|
|
|
RMRemoveRemoteSchema _ -> True
|
|
|
|
RMReloadRemoteSchema _ -> True
|
|
|
|
RMAddRemoteSchemaPermissions _ -> True
|
|
|
|
RMDropRemoteSchemaPermissions _ -> True
|
|
|
|
RMCreateRemoteSchemaRemoteRelationship _ -> True
|
|
|
|
RMUpdateRemoteSchemaRemoteRelationship _ -> True
|
|
|
|
RMDeleteRemoteSchemaRemoteRelationship _ -> True
|
|
|
|
RMCreateCronTrigger _ -> True
|
|
|
|
RMDeleteCronTrigger _ -> True
|
|
|
|
RMCreateAction _ -> True
|
|
|
|
RMDropAction _ -> True
|
|
|
|
RMUpdateAction _ -> True
|
|
|
|
RMCreateActionPermission _ -> True
|
|
|
|
RMDropActionPermission _ -> True
|
|
|
|
RMCreateQueryCollection _ -> True
|
|
|
|
RMRenameQueryCollection _ -> True
|
|
|
|
RMDropQueryCollection _ -> True
|
|
|
|
RMAddQueryToCollection _ -> True
|
|
|
|
RMDropQueryFromCollection _ -> True
|
|
|
|
RMAddCollectionToAllowlist _ -> True
|
|
|
|
RMDropCollectionFromAllowlist _ -> True
|
|
|
|
RMUpdateScopeOfCollectionInAllowlist _ -> True
|
|
|
|
RMCreateRestEndpoint _ -> True
|
|
|
|
RMDropRestEndpoint _ -> True
|
|
|
|
RMDCAddAgent _ -> True
|
|
|
|
RMDCDeleteAgent _ -> True
|
|
|
|
RMSetCustomTypes _ -> True
|
|
|
|
RMSetApiLimits _ -> True
|
|
|
|
RMRemoveApiLimits -> True
|
|
|
|
RMSetMetricsConfig _ -> True
|
|
|
|
RMRemoveMetricsConfig -> True
|
|
|
|
RMAddInheritedRole _ -> True
|
|
|
|
RMDropInheritedRole _ -> True
|
|
|
|
RMReplaceMetadata _ -> True
|
|
|
|
RMClearMetadata _ -> True
|
|
|
|
RMReloadMetadata _ -> True
|
|
|
|
RMDropInconsistentMetadata _ -> True
|
|
|
|
RMSetGraphqlSchemaIntrospectionOptions _ -> True
|
|
|
|
RMAddHostToTLSAllowlist _ -> True
|
|
|
|
RMDropHostFromTLSAllowlist _ -> True
|
|
|
|
RMSetQueryTagsConfig _ -> True
|
2022-11-23 05:49:29 +03:00
|
|
|
RMSetOpenTelemetryConfig _ -> True
|
|
|
|
RMSetOpenTelemetryStatus _ -> True
|
2023-01-23 16:35:48 +03:00
|
|
|
RMGetFeatureFlag _ -> False
|
2021-02-19 05:39:30 +03:00
|
|
|
RMV2 q ->
|
|
|
|
case q of
|
2021-04-27 07:22:32 +03:00
|
|
|
RMV2ExportMetadata _ -> False
|
|
|
|
_ -> True
|
2021-09-24 01:56:37 +03:00
|
|
|
|
2021-01-07 12:04:22 +03:00
|
|
|
runMetadataQueryM ::
|
2021-10-13 19:38:56 +03:00
|
|
|
( MonadIO m,
|
2021-01-07 12:04:22 +03:00
|
|
|
MonadBaseControl IO m,
|
|
|
|
CacheRWM m,
|
|
|
|
Tracing.MonadTrace m,
|
|
|
|
UserInfoM m,
|
|
|
|
MetadataM m,
|
|
|
|
MonadMetadataStorageQueryAPI m,
|
2021-01-29 08:48:17 +03:00
|
|
|
HasServerConfigCtx m,
|
2021-07-27 18:14:12 +03:00
|
|
|
MonadReader r m,
|
2022-09-09 11:26:44 +03:00
|
|
|
Has (L.Logger L.Hasura) r,
|
2023-02-03 04:03:23 +03:00
|
|
|
MonadError QErr m,
|
harmonize network manager handling
## Description
### I want to speak to the `Manager`
Oh boy. This PR is both fairly straightforward and overreaching, so let's break it down.
For most network access, we need a [`HTTP.Manager`](https://hackage.haskell.org/package/http-client-0.1.0.0/docs/Network-HTTP-Client-Manager.html). It is created only once, at the top level, when starting the engine, and is then threaded through the application to wherever we need to make a network call. As of main, the way we do this is not standardized: most of the GraphQL execution code passes it "manually" as a function argument throughout the code. We also have a custom monad constraint, `HasHttpManagerM`, that describes a monad's ability to provide a manager. And, finally, several parts of the code store the manager in some kind of argument structure, such as `RunT`'s `RunCtx`.
This PR's first goal is to harmonize all of this: we always create the manager at the root, and we already have it when we do our very first `runReaderT`. Wouldn't it make sense for the rest of the code to not manually pass it anywhere, to not store it anywhere, but to always rely on the current monad providing it? This is, in short, what this PR does: it implements a constraint on the base monads, so that they provide the manager, and removes most explicit passing from the code.
### First come, first served
One way this PR goes a tiny bit further than "just" doing the aforementioned harmonization is that it starts the process of implementing the "Services oriented architecture" roughly outlined in this [draft document](https://docs.google.com/document/d/1FAigqrST0juU1WcT4HIxJxe1iEBwTuBZodTaeUvsKqQ/edit?usp=sharing). Instead of using the existing `HasHTTPManagerM`, this PR revamps it into the `ProvidesNetwork` service.
The idea is, again, that we should make all "external" dependencies of the engine, all things that the core of the engine doesn't care about, a "service". This allows us to define clear APIs for features, to choose different implementations based on which version of the engine we're running, harmonizes our many scattered monadic constraints... Which is why this service is called "Network": we can refine it, moving forward, to be the constraint that defines how all network communication is to operate, instead of relying on disparate classes constraint or hardcoded decisions. A comment in the code clarifies this intent.
### Side-effects? In my Haskell?
This PR also unavoidably touches some other aspects of the codebase. One such example: it introduces `Hasura.App.AppContext`, named after `HasuraPro.Context.AppContext`: a name for the reader structure at the base level. It also transforms `Handler` from a type alias to a newtype, as `Handler` is where we actually enforce HTTP limits; but without `Handler` being a distinct type, any code path could simply do a `runExceptT $ runReader` and forget to enforce them.
(As a rule of thumb, i am starting to consider any straggling `runReaderT` or `runExceptT` as a code smell: we should not stack / unstack monads haphazardly, and every layer should be an opaque `newtype` with a corresponding run function.)
## Further work
In several places, i have left TODOs when i have encountered things that suggest that we should do further unrelated cleanups. I'll write down the follow-up steps, either in the aforementioned document or on slack. But, in short, at a glance, in approximate order, we could:
- delete `ExecutionCtx` as it is only a subset of `ServerCtx`, and remove one more `runReaderT` call
- delete `ServerConfigCtx` as it is only a subset of `ServerCtx`, and remove it from `RunCtx`
- remove `ServerCtx` from `HandlerCtx`, and make it part of `AppContext`, or even make it the `AppContext` altogether (since, at least for the OSS version, `AppContext` is there again only a subset)
- remove `CacheBuildParams` and `CacheBuild` altogether, as they're just a distinct stack that is a `ReaderT` on top of `IO` that contains, you guessed it, the same thing as `ServerCtx`
- move `RunT` out of `RQL.Types` and rename it, since after the previous cleanups **it only contains `UserInfo`**; it could be bundled with the authentication service, made a small implementation detail in `Hasura.Server.Auth`
- rename `PGMetadaStorageT` to something a bit more accurate, such as `App`, and enforce its IO base
This would significantly simply our complex stack. From there, or in parallel, we can start moving existing dependencies as Services. For the purpose of supporting read replicas entitlement, we could move `MonadResolveSource` to a `SourceResolver` service, as attempted in #7653, and transform `UserAuthenticationM` into a `Authentication` service.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7736
GitOrigin-RevId: 68cce710eb9e7d752bda1ba0c49541d24df8209f
2023-02-22 18:53:52 +03:00
|
|
|
MonadEventLogCleanup m,
|
2023-03-13 14:44:18 +03:00
|
|
|
ProvidesHasuraServices m,
|
|
|
|
MonadGetApiTimeLimit m
|
2021-01-07 12:04:22 +03:00
|
|
|
) =>
|
|
|
|
Env.Environment ->
|
2021-02-19 05:39:30 +03:00
|
|
|
MetadataResourceVersion ->
|
|
|
|
RQLMetadataRequest ->
|
2021-01-07 12:04:22 +03:00
|
|
|
m EncJSON
|
2021-02-19 05:39:30 +03:00
|
|
|
runMetadataQueryM env currentResourceVersion =
|
|
|
|
withPathK "args" . \case
|
2022-08-19 00:56:47 +03:00
|
|
|
-- NOTE: This is a good place to install tracing, since it's involved in
|
|
|
|
-- the recursive case via "bulk":
|
|
|
|
RMV1 q ->
|
Rewrite `Tracing` to allow for only one `TraceT` in the entire stack.
This PR is on top of #7789.
### Description
This PR entirely rewrites the API of the Tracing library, to make `interpTraceT` a thing of the past. Before this change, we ran traces by sticking a `TraceT` on top of whatever we were doing. This had several major drawbacks:
- we were carrying a bunch of `TraceT` across the codebase, and the entire codebase had to know about it
- we needed to carry a second class constraint around (`HasReporterM`) to be able to run all of those traces
- we kept having to do stack rewriting with `interpTraceT`, which went from inconvenient to horrible
- we had to declare several behavioral instances on `TraceT m`
This PR rewrite all of `Tracing` using a more conventional model: there is ONE `TraceT` at the bottom of the stack, and there is an associated class constraint `MonadTrace`: any part of the code that happens to satisfy `MonadTrace` is able to create new traces. We NEVER have to do stack rewriting, `interpTraceT` is gone, and `TraceT` and `Reporter` become implementation details that 99% of the code is blissfully unaware of: code that needs to do tracing only needs to declare that the monad in which it operates implements `MonadTrace`.
In doing so, this PR revealed **several bugs in the codebase**: places where we were expecting to trace something, but due to the default instance of `HasReporterM IO` we would actually not do anything. This PR also splits the code of `Tracing` in more byte-sized modules, with the goal of potentially moving to `server/lib` down the line.
### Remaining work
This PR is a draft; what's left to do is:
- [x] make Pro compile; i haven't updated `HasuraPro/Main` yet
- [x] document Tracing by writing a note that explains how to use the library, and the meaning of "reporter", "trace" and "span", as well as the pitfalls
- [x] discuss some of the trade-offs in the implementation, which is why i'm opening this PR already despite it not fully building yet
- [x] it depends on #7789 being merged first
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7791
GitOrigin-RevId: cadd32d039134c93ddbf364599a2f4dd988adea8
2023-03-13 20:37:16 +03:00
|
|
|
Tracing.newSpan ("v1 " <> T.pack (constrName q)) $
|
2022-08-19 00:56:47 +03:00
|
|
|
runMetadataQueryV1M env currentResourceVersion q
|
|
|
|
RMV2 q ->
|
Rewrite `Tracing` to allow for only one `TraceT` in the entire stack.
This PR is on top of #7789.
### Description
This PR entirely rewrites the API of the Tracing library, to make `interpTraceT` a thing of the past. Before this change, we ran traces by sticking a `TraceT` on top of whatever we were doing. This had several major drawbacks:
- we were carrying a bunch of `TraceT` across the codebase, and the entire codebase had to know about it
- we needed to carry a second class constraint around (`HasReporterM`) to be able to run all of those traces
- we kept having to do stack rewriting with `interpTraceT`, which went from inconvenient to horrible
- we had to declare several behavioral instances on `TraceT m`
This PR rewrite all of `Tracing` using a more conventional model: there is ONE `TraceT` at the bottom of the stack, and there is an associated class constraint `MonadTrace`: any part of the code that happens to satisfy `MonadTrace` is able to create new traces. We NEVER have to do stack rewriting, `interpTraceT` is gone, and `TraceT` and `Reporter` become implementation details that 99% of the code is blissfully unaware of: code that needs to do tracing only needs to declare that the monad in which it operates implements `MonadTrace`.
In doing so, this PR revealed **several bugs in the codebase**: places where we were expecting to trace something, but due to the default instance of `HasReporterM IO` we would actually not do anything. This PR also splits the code of `Tracing` in more byte-sized modules, with the goal of potentially moving to `server/lib` down the line.
### Remaining work
This PR is a draft; what's left to do is:
- [x] make Pro compile; i haven't updated `HasuraPro/Main` yet
- [x] document Tracing by writing a note that explains how to use the library, and the meaning of "reporter", "trace" and "span", as well as the pitfalls
- [x] discuss some of the trade-offs in the implementation, which is why i'm opening this PR already despite it not fully building yet
- [x] it depends on #7789 being merged first
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7791
GitOrigin-RevId: cadd32d039134c93ddbf364599a2f4dd988adea8
2023-03-13 20:37:16 +03:00
|
|
|
Tracing.newSpan ("v2 " <> T.pack (constrName q)) $
|
2022-08-19 00:56:47 +03:00
|
|
|
runMetadataQueryV2M currentResourceVersion q
|
2021-09-24 01:56:37 +03:00
|
|
|
|
2021-02-16 11:08:19 +03:00
|
|
|
runMetadataQueryV1M ::
|
2021-07-27 18:14:12 +03:00
|
|
|
forall m r.
|
2021-10-13 19:38:56 +03:00
|
|
|
( MonadIO m,
|
2021-02-16 11:08:19 +03:00
|
|
|
MonadBaseControl IO m,
|
|
|
|
CacheRWM m,
|
|
|
|
Tracing.MonadTrace m,
|
|
|
|
UserInfoM m,
|
|
|
|
MetadataM m,
|
|
|
|
MonadMetadataStorageQueryAPI m,
|
|
|
|
HasServerConfigCtx m,
|
2021-07-27 18:14:12 +03:00
|
|
|
MonadReader r m,
|
2022-09-09 11:26:44 +03:00
|
|
|
Has (L.Logger L.Hasura) r,
|
2023-02-03 04:03:23 +03:00
|
|
|
MonadError QErr m,
|
harmonize network manager handling
## Description
### I want to speak to the `Manager`
Oh boy. This PR is both fairly straightforward and overreaching, so let's break it down.
For most network access, we need a [`HTTP.Manager`](https://hackage.haskell.org/package/http-client-0.1.0.0/docs/Network-HTTP-Client-Manager.html). It is created only once, at the top level, when starting the engine, and is then threaded through the application to wherever we need to make a network call. As of main, the way we do this is not standardized: most of the GraphQL execution code passes it "manually" as a function argument throughout the code. We also have a custom monad constraint, `HasHttpManagerM`, that describes a monad's ability to provide a manager. And, finally, several parts of the code store the manager in some kind of argument structure, such as `RunT`'s `RunCtx`.
This PR's first goal is to harmonize all of this: we always create the manager at the root, and we already have it when we do our very first `runReaderT`. Wouldn't it make sense for the rest of the code to not manually pass it anywhere, to not store it anywhere, but to always rely on the current monad providing it? This is, in short, what this PR does: it implements a constraint on the base monads, so that they provide the manager, and removes most explicit passing from the code.
### First come, first served
One way this PR goes a tiny bit further than "just" doing the aforementioned harmonization is that it starts the process of implementing the "Services oriented architecture" roughly outlined in this [draft document](https://docs.google.com/document/d/1FAigqrST0juU1WcT4HIxJxe1iEBwTuBZodTaeUvsKqQ/edit?usp=sharing). Instead of using the existing `HasHTTPManagerM`, this PR revamps it into the `ProvidesNetwork` service.
The idea is, again, that we should make all "external" dependencies of the engine, all things that the core of the engine doesn't care about, a "service". This allows us to define clear APIs for features, to choose different implementations based on which version of the engine we're running, harmonizes our many scattered monadic constraints... Which is why this service is called "Network": we can refine it, moving forward, to be the constraint that defines how all network communication is to operate, instead of relying on disparate classes constraint or hardcoded decisions. A comment in the code clarifies this intent.
### Side-effects? In my Haskell?
This PR also unavoidably touches some other aspects of the codebase. One such example: it introduces `Hasura.App.AppContext`, named after `HasuraPro.Context.AppContext`: a name for the reader structure at the base level. It also transforms `Handler` from a type alias to a newtype, as `Handler` is where we actually enforce HTTP limits; but without `Handler` being a distinct type, any code path could simply do a `runExceptT $ runReader` and forget to enforce them.
(As a rule of thumb, i am starting to consider any straggling `runReaderT` or `runExceptT` as a code smell: we should not stack / unstack monads haphazardly, and every layer should be an opaque `newtype` with a corresponding run function.)
## Further work
In several places, i have left TODOs when i have encountered things that suggest that we should do further unrelated cleanups. I'll write down the follow-up steps, either in the aforementioned document or on slack. But, in short, at a glance, in approximate order, we could:
- delete `ExecutionCtx` as it is only a subset of `ServerCtx`, and remove one more `runReaderT` call
- delete `ServerConfigCtx` as it is only a subset of `ServerCtx`, and remove it from `RunCtx`
- remove `ServerCtx` from `HandlerCtx`, and make it part of `AppContext`, or even make it the `AppContext` altogether (since, at least for the OSS version, `AppContext` is there again only a subset)
- remove `CacheBuildParams` and `CacheBuild` altogether, as they're just a distinct stack that is a `ReaderT` on top of `IO` that contains, you guessed it, the same thing as `ServerCtx`
- move `RunT` out of `RQL.Types` and rename it, since after the previous cleanups **it only contains `UserInfo`**; it could be bundled with the authentication service, made a small implementation detail in `Hasura.Server.Auth`
- rename `PGMetadaStorageT` to something a bit more accurate, such as `App`, and enforce its IO base
This would significantly simply our complex stack. From there, or in parallel, we can start moving existing dependencies as Services. For the purpose of supporting read replicas entitlement, we could move `MonadResolveSource` to a `SourceResolver` service, as attempted in #7653, and transform `UserAuthenticationM` into a `Authentication` service.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7736
GitOrigin-RevId: 68cce710eb9e7d752bda1ba0c49541d24df8209f
2023-02-22 18:53:52 +03:00
|
|
|
MonadEventLogCleanup m,
|
2023-03-13 14:44:18 +03:00
|
|
|
ProvidesHasuraServices m,
|
|
|
|
MonadGetApiTimeLimit m
|
2021-02-16 11:08:19 +03:00
|
|
|
) =>
|
|
|
|
Env.Environment ->
|
2021-02-19 05:39:30 +03:00
|
|
|
MetadataResourceVersion ->
|
2021-02-16 11:08:19 +03:00
|
|
|
RQLMetadataV1 ->
|
|
|
|
m EncJSON
|
2021-02-19 05:39:30 +03:00
|
|
|
runMetadataQueryV1M env currentResourceVersion = \case
|
2023-02-14 15:14:33 +03:00
|
|
|
RMAddSource q -> dispatchMetadata (runAddSource env) q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMDropSource q -> runDropSource q
|
|
|
|
RMRenameSource q -> runRenameSource q
|
2022-06-22 10:06:19 +03:00
|
|
|
RMUpdateSource q -> dispatchMetadata runUpdateSource q
|
2022-08-30 02:51:34 +03:00
|
|
|
RMListSourceKinds q -> runListSourceKinds q
|
2022-09-02 20:50:09 +03:00
|
|
|
RMGetSourceKindCapabilities q -> runGetSourceKindCapabilities q
|
2022-12-02 11:01:06 +03:00
|
|
|
RMGetSourceTables q -> runGetSourceTables env q
|
2022-12-21 02:38:24 +03:00
|
|
|
RMGetTableInfo q -> runGetTableInfo env q
|
2021-09-06 14:15:36 +03:00
|
|
|
RMTrackTable q -> dispatchMetadata runTrackTableV2Q q
|
2022-07-27 10:56:53 +03:00
|
|
|
RMUntrackTable q -> dispatchMetadataAndEventTrigger runUntrackTableQ q
|
2021-10-07 16:02:19 +03:00
|
|
|
RMSetFunctionCustomization q -> dispatchMetadata runSetFunctionCustomization q
|
2021-09-06 14:15:36 +03:00
|
|
|
RMSetTableCustomization q -> dispatchMetadata runSetTableCustomization q
|
2022-08-04 12:35:58 +03:00
|
|
|
RMSetApolloFederationConfig q -> dispatchMetadata runSetApolloFederationConfig q
|
2022-10-11 12:08:04 +03:00
|
|
|
RMPgSetTableIsEnum q -> dispatchMetadata runSetExistingTableIsEnumQ q
|
2021-09-06 14:15:36 +03:00
|
|
|
RMCreateInsertPermission q -> dispatchMetadata runCreatePerm q
|
|
|
|
RMCreateSelectPermission q -> dispatchMetadata runCreatePerm q
|
|
|
|
RMCreateUpdatePermission q -> dispatchMetadata runCreatePerm q
|
|
|
|
RMCreateDeletePermission q -> dispatchMetadata runCreatePerm q
|
2022-04-06 15:47:35 +03:00
|
|
|
RMDropInsertPermission q -> dispatchMetadata (runDropPerm PTInsert) q
|
|
|
|
RMDropSelectPermission q -> dispatchMetadata (runDropPerm PTSelect) q
|
|
|
|
RMDropUpdatePermission q -> dispatchMetadata (runDropPerm PTUpdate) q
|
|
|
|
RMDropDeletePermission q -> dispatchMetadata (runDropPerm PTDelete) q
|
2021-09-06 14:15:36 +03:00
|
|
|
RMSetPermissionComment q -> dispatchMetadata runSetPermComment q
|
|
|
|
RMCreateObjectRelationship q -> dispatchMetadata (runCreateRelationship ObjRel . unCreateObjRel) q
|
|
|
|
RMCreateArrayRelationship q -> dispatchMetadata (runCreateRelationship ArrRel . unCreateArrRel) q
|
|
|
|
RMDropRelationship q -> dispatchMetadata runDropRel q
|
|
|
|
RMSetRelationshipComment q -> dispatchMetadata runSetRelComment q
|
|
|
|
RMRenameRelationship q -> dispatchMetadata runRenameRel q
|
2023-01-09 10:25:32 +03:00
|
|
|
RMSuggestRelationships q -> dispatchMetadata runSuggestRels q
|
2021-09-06 14:15:36 +03:00
|
|
|
RMCreateRemoteRelationship q -> dispatchMetadata runCreateRemoteRelationship q
|
|
|
|
RMUpdateRemoteRelationship q -> dispatchMetadata runUpdateRemoteRelationship q
|
Fix several issues with remote relationships.
## Remaining Work
- [x] changelog entry
- [x] more tests: `<backend>_delete_remote_relationship` is definitely untested
- [x] negative tests: we probably want to assert that there are some APIs we DON'T support
- [x] update the console to use the new API, if necessary
- [x] ~~adding the corresponding documentation for the API for other backends (only `pg_` was added here)~~
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3170
- [x] ~~deciding which backends should support this API~~
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3170
- [x] ~~deciding what to do about potentially overlapping schematic representations~~
- ~~cf. https://github.com/hasura/graphql-engine-mono/pull/3157#issuecomment-995307624~~
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3171
- [x] ~~add more descriptive versioning information to some of the types that are changing in this PR~~
- cf. https://github.com/hasura/graphql-engine-mono/pull/3157#discussion_r769830920
- deferred to https://github.com/hasura/graphql-engine-mono/issues/3172
## Description
This PR fixes several important issues wrt. the remote relationship API.
- it fixes a regression introduced by [#3124](https://github.com/hasura/graphql-engine-mono/pull/3124), which prevented `<backend>_create_remote_relationship` from accepting the old argument format (break of backwards compatibility, broke the console)
- it removes the command `create_remote_relationship` added to the v1/metadata API as a work-around as part of [#3124](https://github.com/hasura/graphql-engine-mono/pull/3124)
- it reverts the subsequent fix in the console: [#3149](https://github.com/hasura/graphql-engine-mono/pull/3149)
Furthermore, this PR also addresses two other issues:
- THE DOCUMENTATION OF THE METADATA API WAS WRONG, and documented `create_remote_relationship` instead of `<backend>_create_remote_relationship`: this PR fixes this by adding `pg_` everywhere, but does not attempt to add the corresponding documentation for other backends, partly because:
- `<backend>_delete_remote_relationship` WAS BROKEN ON NON-POSTGRES BACKENDS; it always expected an argument parameterized by Postgres.
As of main, the `<backend>_(create|update|delete)_remote_relationship` commands are supported on Postgres, Citus, BigQuery, but **NOT MSSQL**. I do not know if this is intentional or not, if it even should be publicized or not, and as a result this PR doesn't change this.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3157
Co-authored-by: jkachmar <8461423+jkachmar@users.noreply.github.com>
GitOrigin-RevId: 37e2f41522a9229a11c595574c3f4984317d652a
2021-12-16 23:28:08 +03:00
|
|
|
RMDeleteRemoteRelationship q -> dispatchMetadata runDeleteRemoteRelationship q
|
2021-09-06 14:15:36 +03:00
|
|
|
RMTrackFunction q -> dispatchMetadata runTrackFunctionV2 q
|
|
|
|
RMUntrackFunction q -> dispatchMetadata runUntrackFunc q
|
|
|
|
RMCreateFunctionPermission q -> dispatchMetadata runCreateFunctionPermission q
|
|
|
|
RMDropFunctionPermission q -> dispatchMetadata runDropFunctionPermission q
|
2022-05-04 17:52:29 +03:00
|
|
|
RMAddComputedField q -> dispatchMetadata runAddComputedField q
|
|
|
|
RMDropComputedField q -> dispatchMetadata runDropComputedField q
|
2023-01-25 10:12:53 +03:00
|
|
|
RMTestConnectionTemplate q -> dispatchMetadata runTestConnectionTemplate q
|
2023-02-21 16:45:12 +03:00
|
|
|
RMGetLogicalModel q -> dispatchMetadata LogicalModels.runGetLogicalModel q
|
|
|
|
RMTrackLogicalModel q -> dispatchMetadata (LogicalModels.runTrackLogicalModel env) q
|
|
|
|
RMUntrackLogicalModel q -> dispatchMetadata LogicalModels.runUntrackLogicalModel q
|
2023-03-03 18:27:51 +03:00
|
|
|
RMCreateSelectLogicalModelPermission q -> dispatchMetadata LogicalModels.runCreateSelectLogicalModelPermission q
|
2023-03-03 20:58:22 +03:00
|
|
|
RMDropSelectLogicalModelPermission q -> dispatchMetadata LogicalModels.runDropSelectLogicalModelPermission q
|
2022-03-11 02:22:54 +03:00
|
|
|
RMCreateEventTrigger q ->
|
2022-03-15 11:41:03 +03:00
|
|
|
dispatchMetadataAndEventTrigger
|
2022-03-11 02:22:54 +03:00
|
|
|
( validateTransforms
|
|
|
|
(unUnvalidate1 . cetqRequestTransform . _Just)
|
2022-11-14 18:18:05 +03:00
|
|
|
(unUnvalidate1 . cetqResponseTrasnform . _Just)
|
2022-03-11 02:22:54 +03:00
|
|
|
(runCreateEventTriggerQuery . _unUnvalidate1)
|
|
|
|
)
|
|
|
|
q
|
2021-09-09 14:54:19 +03:00
|
|
|
RMDeleteEventTrigger q -> dispatchMetadataAndEventTrigger runDeleteEventTriggerQuery q
|
2021-09-06 14:15:36 +03:00
|
|
|
RMRedeliverEvent q -> dispatchEventTrigger runRedeliverEvent q
|
|
|
|
RMInvokeEventTrigger q -> dispatchEventTrigger runInvokeEventTrigger q
|
2022-09-09 11:26:44 +03:00
|
|
|
RMCleanupEventTriggerLog q -> runCleanupEventTriggerLog q
|
2022-09-21 08:59:14 +03:00
|
|
|
RMResumeEventTriggerCleanup q -> runEventTriggerResumeCleanup q
|
2022-09-13 11:33:44 +03:00
|
|
|
RMPauseEventTriggerCleanup q -> runEventTriggerPauseCleanup q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMAddRemoteSchema q -> runAddRemoteSchema env q
|
|
|
|
RMUpdateRemoteSchema q -> runUpdateRemoteSchema env q
|
|
|
|
RMRemoveRemoteSchema q -> runRemoveRemoteSchema q
|
|
|
|
RMReloadRemoteSchema q -> runReloadRemoteSchema q
|
|
|
|
RMIntrospectRemoteSchema q -> runIntrospectRemoteSchema q
|
|
|
|
RMAddRemoteSchemaPermissions q -> runAddRemoteSchemaPermissions q
|
|
|
|
RMDropRemoteSchemaPermissions q -> runDropRemoteSchemaPermissions q
|
2022-03-17 23:53:56 +03:00
|
|
|
RMCreateRemoteSchemaRemoteRelationship q -> runCreateRemoteSchemaRemoteRelationship q
|
|
|
|
RMUpdateRemoteSchemaRemoteRelationship q -> runUpdateRemoteSchemaRemoteRelationship q
|
|
|
|
RMDeleteRemoteSchemaRemoteRelationship q -> runDeleteRemoteSchemaRemoteRelationship q
|
2022-03-11 02:22:54 +03:00
|
|
|
RMCreateCronTrigger q ->
|
|
|
|
validateTransforms
|
|
|
|
(unUnvalidate . cctRequestTransform . _Just)
|
2022-11-14 18:18:05 +03:00
|
|
|
(unUnvalidate . cctResponseTransform . _Just)
|
2022-03-11 02:22:54 +03:00
|
|
|
(runCreateCronTrigger . _unUnvalidate)
|
|
|
|
q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMDeleteCronTrigger q -> runDeleteCronTrigger q
|
|
|
|
RMCreateScheduledEvent q -> runCreateScheduledEvent q
|
|
|
|
RMDeleteScheduledEvent q -> runDeleteScheduledEvent q
|
|
|
|
RMGetScheduledEvents q -> runGetScheduledEvents q
|
2022-11-03 13:21:56 +03:00
|
|
|
RMGetScheduledEventInvocations q -> runGetScheduledEventInvocations q
|
2022-01-27 09:43:39 +03:00
|
|
|
RMGetCronTriggers -> runGetCronTriggers
|
2022-03-11 02:22:54 +03:00
|
|
|
RMCreateAction q ->
|
|
|
|
validateTransforms
|
|
|
|
(unUnvalidate . caDefinition . adRequestTransform . _Just)
|
2022-11-14 18:18:05 +03:00
|
|
|
(unUnvalidate . caDefinition . adResponseTransform . _Just)
|
2022-03-11 02:22:54 +03:00
|
|
|
(runCreateAction . _unUnvalidate)
|
|
|
|
q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMDropAction q -> runDropAction q
|
2022-03-11 02:22:54 +03:00
|
|
|
RMUpdateAction q ->
|
|
|
|
validateTransforms
|
|
|
|
(unUnvalidate . uaDefinition . adRequestTransform . _Just)
|
2022-11-14 18:18:05 +03:00
|
|
|
(unUnvalidate . uaDefinition . adResponseTransform . _Just)
|
2022-03-11 02:22:54 +03:00
|
|
|
(runUpdateAction . _unUnvalidate)
|
|
|
|
q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMCreateActionPermission q -> runCreateActionPermission q
|
|
|
|
RMDropActionPermission q -> runDropActionPermission q
|
|
|
|
RMCreateQueryCollection q -> runCreateCollection q
|
2022-08-19 16:36:02 +03:00
|
|
|
RMRenameQueryCollection q -> runRenameCollection q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMDropQueryCollection q -> runDropCollection q
|
|
|
|
RMAddQueryToCollection q -> runAddQueryToCollection q
|
|
|
|
RMDropQueryFromCollection q -> runDropQueryFromCollection q
|
|
|
|
RMAddCollectionToAllowlist q -> runAddCollectionToAllowlist q
|
|
|
|
RMDropCollectionFromAllowlist q -> runDropCollectionFromAllowlist q
|
2022-02-08 19:53:30 +03:00
|
|
|
RMUpdateScopeOfCollectionInAllowlist q -> runUpdateScopeOfCollectionInAllowlist q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMCreateRestEndpoint q -> runCreateEndpoint q
|
|
|
|
RMDropRestEndpoint q -> runDropEndpoint q
|
2022-08-18 01:13:32 +03:00
|
|
|
RMDCAddAgent q -> runAddDataConnectorAgent q
|
|
|
|
RMDCDeleteAgent q -> runDeleteDataConnectorAgent q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMSetCustomTypes q -> runSetCustomTypes q
|
|
|
|
RMSetApiLimits q -> runSetApiLimits q
|
|
|
|
RMRemoveApiLimits -> runRemoveApiLimits
|
|
|
|
RMSetMetricsConfig q -> runSetMetricsConfig q
|
|
|
|
RMRemoveMetricsConfig -> runRemoveMetricsConfig
|
|
|
|
RMAddInheritedRole q -> runAddInheritedRole q
|
|
|
|
RMDropInheritedRole q -> runDropInheritedRole q
|
|
|
|
RMReplaceMetadata q -> runReplaceMetadata q
|
|
|
|
RMExportMetadata q -> runExportMetadata q
|
|
|
|
RMClearMetadata q -> runClearMetadata q
|
|
|
|
RMReloadMetadata q -> runReloadMetadata q
|
|
|
|
RMGetInconsistentMetadata q -> runGetInconsistentMetadata q
|
|
|
|
RMDropInconsistentMetadata q -> runDropInconsistentMetadata q
|
2021-05-05 15:25:27 +03:00
|
|
|
RMSetGraphqlSchemaIntrospectionOptions q -> runSetGraphqlSchemaIntrospectionOptions q
|
2021-08-24 10:36:32 +03:00
|
|
|
RMAddHostToTLSAllowlist q -> runAddHostToTLSAllowlist q
|
|
|
|
RMDropHostFromTLSAllowlist q -> runDropHostFromTLSAllowlist q
|
2021-07-07 04:43:42 +03:00
|
|
|
RMDumpInternalState q -> runDumpInternalState q
|
|
|
|
RMGetCatalogState q -> runGetCatalogState q
|
|
|
|
RMSetCatalogState q -> runSetCatalogState q
|
2022-03-11 02:22:54 +03:00
|
|
|
RMTestWebhookTransform q ->
|
|
|
|
validateTransforms
|
2022-11-14 18:18:05 +03:00
|
|
|
(unUnvalidate . twtRequestTransformer)
|
|
|
|
(unUnvalidate . twtResponseTransformer . _Just)
|
2022-03-11 02:22:54 +03:00
|
|
|
(runTestWebhookTransform . _unUnvalidate)
|
|
|
|
q
|
2021-09-23 15:37:56 +03:00
|
|
|
RMSetQueryTagsConfig q -> runSetQueryTagsConfig q
|
2022-11-23 05:49:29 +03:00
|
|
|
RMSetOpenTelemetryConfig q -> runSetOpenTelemetryConfig q
|
|
|
|
RMSetOpenTelemetryStatus q -> runSetOpenTelemetryStatus q
|
2023-01-23 16:35:48 +03:00
|
|
|
RMGetFeatureFlag q -> runGetFeatureFlag q
|
2021-05-05 15:25:27 +03:00
|
|
|
RMBulk q -> encJFromList <$> indexedMapM (runMetadataQueryM env currentResourceVersion) q
|
2021-07-07 04:43:42 +03:00
|
|
|
where
|
2021-09-06 14:15:36 +03:00
|
|
|
dispatchMetadata ::
|
2021-07-07 04:43:42 +03:00
|
|
|
(forall b. BackendMetadata b => i b -> a) ->
|
|
|
|
AnyBackend i ->
|
|
|
|
a
|
2021-09-06 14:15:36 +03:00
|
|
|
dispatchMetadata f x = dispatchAnyBackend @BackendMetadata x f
|
|
|
|
|
|
|
|
dispatchEventTrigger :: (forall b. BackendEventTrigger b => i b -> a) -> AnyBackend i -> a
|
|
|
|
dispatchEventTrigger f x = dispatchAnyBackend @BackendEventTrigger x f
|
|
|
|
|
2021-09-09 14:54:19 +03:00
|
|
|
dispatchMetadataAndEventTrigger ::
|
|
|
|
(forall b. (BackendMetadata b, BackendEventTrigger b) => i b -> a) ->
|
|
|
|
AnyBackend i ->
|
|
|
|
a
|
|
|
|
dispatchMetadataAndEventTrigger f x = dispatchAnyBackendWithTwoConstraints @BackendMetadata @BackendEventTrigger x f
|
2021-02-16 11:08:19 +03:00
|
|
|
|
|
|
|
runMetadataQueryV2M ::
|
|
|
|
( MonadIO m,
|
|
|
|
CacheRWM m,
|
2022-09-13 07:16:35 +03:00
|
|
|
MonadBaseControl IO m,
|
2021-02-16 11:08:19 +03:00
|
|
|
MetadataM m,
|
|
|
|
MonadMetadataStorageQueryAPI m,
|
2021-09-09 14:54:19 +03:00
|
|
|
MonadReader r m,
|
2022-09-15 14:45:14 +03:00
|
|
|
Has (L.Logger L.Hasura) r,
|
2023-02-03 04:03:23 +03:00
|
|
|
MonadError QErr m,
|
2023-03-13 14:44:18 +03:00
|
|
|
MonadEventLogCleanup m,
|
|
|
|
MonadGetApiTimeLimit m
|
2021-02-16 11:08:19 +03:00
|
|
|
) =>
|
2021-02-19 05:39:30 +03:00
|
|
|
MetadataResourceVersion ->
|
|
|
|
RQLMetadataV2 ->
|
2021-02-16 11:08:19 +03:00
|
|
|
m EncJSON
|
2021-02-19 05:39:30 +03:00
|
|
|
runMetadataQueryV2M currentResourceVersion = \case
|
2022-12-16 13:19:42 +03:00
|
|
|
RMV2ReplaceMetadata q -> runReplaceMetadataV2 q
|
2021-02-19 05:39:30 +03:00
|
|
|
RMV2ExportMetadata q -> runExportMetadataV2 currentResourceVersion q
|