2021-09-22 18:34:53 +03:00
|
|
|
{-# LANGUAGE ApplicativeDo #-}
|
2022-05-26 14:54:30 +03:00
|
|
|
{-# LANGUAGE TemplateHaskell #-}
|
2021-04-12 13:18:29 +03:00
|
|
|
{-# OPTIONS_GHC -fno-warn-orphans #-}
|
|
|
|
|
|
|
|
module Hasura.Backends.BigQuery.Instances.Schema () where
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
import Data.Aeson qualified as J
|
|
|
|
import Data.Has
|
|
|
|
import Data.HashMap.Strict qualified as Map
|
|
|
|
import Data.List.NonEmpty qualified as NE
|
|
|
|
import Data.Text qualified as T
|
2022-05-26 14:54:30 +03:00
|
|
|
import Data.Text.Casing qualified as C
|
2021-09-24 01:56:37 +03:00
|
|
|
import Data.Text.Extended
|
|
|
|
import Hasura.Backends.BigQuery.Types qualified as BigQuery
|
|
|
|
import Hasura.Base.Error
|
|
|
|
import Hasura.GraphQL.Parser hiding (EnumValueInfo, field)
|
|
|
|
import Hasura.GraphQL.Parser qualified as P
|
2022-04-18 22:43:00 +03:00
|
|
|
import Hasura.GraphQL.Parser.Constants qualified as G
|
2021-09-24 01:56:37 +03:00
|
|
|
import Hasura.GraphQL.Parser.Internal.Parser hiding (field)
|
2022-05-25 13:24:41 +03:00
|
|
|
import Hasura.GraphQL.Parser.Internal.Parser qualified as P
|
2021-09-24 01:56:37 +03:00
|
|
|
import Hasura.GraphQL.Schema.Backend
|
|
|
|
import Hasura.GraphQL.Schema.BoolExp
|
|
|
|
import Hasura.GraphQL.Schema.Build qualified as GSB
|
|
|
|
import Hasura.GraphQL.Schema.Common
|
|
|
|
import Hasura.GraphQL.Schema.Select
|
2022-05-25 13:24:41 +03:00
|
|
|
import Hasura.GraphQL.Schema.Table
|
2021-09-24 01:56:37 +03:00
|
|
|
import Hasura.Prelude
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.IR.BoolExp
|
2021-09-24 01:56:37 +03:00
|
|
|
import Hasura.RQL.IR.Select qualified as IR
|
2022-05-31 01:07:02 +03:00
|
|
|
import Hasura.RQL.IR.Value qualified as IR
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.Types.Backend
|
|
|
|
import Hasura.RQL.Types.Column
|
|
|
|
import Hasura.RQL.Types.Common
|
|
|
|
import Hasura.RQL.Types.ComputedField
|
|
|
|
import Hasura.RQL.Types.Function
|
2022-05-25 13:24:41 +03:00
|
|
|
import Hasura.RQL.Types.SchemaCache hiding (askTableInfo)
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
import Hasura.RQL.Types.Source (SourceInfo)
|
2022-05-26 14:54:30 +03:00
|
|
|
import Hasura.RQL.Types.SourceCustomization (NamingCase)
|
2022-04-27 16:57:28 +03:00
|
|
|
import Hasura.RQL.Types.Table
|
|
|
|
import Hasura.SQL.Backend
|
2021-09-24 01:56:37 +03:00
|
|
|
import Language.GraphQL.Draft.Syntax qualified as G
|
2021-05-11 18:18:31 +03:00
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
----------------------------------------------------------------
|
|
|
|
-- BackendSchema instance
|
|
|
|
|
|
|
|
instance BackendSchema 'BigQuery where
|
|
|
|
-- top level parsers
|
2021-09-24 01:56:37 +03:00
|
|
|
buildTableQueryFields = GSB.buildTableQueryFields
|
|
|
|
buildTableRelayQueryFields = bqBuildTableRelayQueryFields
|
2022-04-22 22:53:12 +03:00
|
|
|
buildTableStreamingSubscriptionFields = GSB.buildTableStreamingSubscriptionFields
|
2021-06-15 18:53:20 +03:00
|
|
|
buildTableInsertMutationFields = bqBuildTableInsertMutationFields
|
|
|
|
buildTableUpdateMutationFields = bqBuildTableUpdateMutationFields
|
|
|
|
buildTableDeleteMutationFields = bqBuildTableDeleteMutationFields
|
2021-09-24 01:56:37 +03:00
|
|
|
buildFunctionQueryFields = bqBuildFunctionQueryFields
|
|
|
|
buildFunctionRelayQueryFields = bqBuildFunctionRelayQueryFields
|
|
|
|
buildFunctionMutationFields = bqBuildFunctionMutationFields
|
2021-06-15 18:53:20 +03:00
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
-- backend extensions
|
2021-09-24 01:56:37 +03:00
|
|
|
relayExtension = Nothing
|
|
|
|
nodesAggExtension = Just ()
|
2022-04-22 22:53:12 +03:00
|
|
|
streamSubscriptionExtension = Nothing
|
2021-06-15 18:53:20 +03:00
|
|
|
|
|
|
|
-- table arguments
|
2022-01-17 13:01:25 +03:00
|
|
|
tableArguments = defaultTableArgs
|
2021-06-15 18:53:20 +03:00
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
-- indivdual components
|
2021-09-24 01:56:37 +03:00
|
|
|
columnParser = bqColumnParser
|
2022-05-03 11:58:56 +03:00
|
|
|
scalarSelectionArgumentsParser = bqScalarSelectionArgumentsParser
|
2021-09-24 01:56:37 +03:00
|
|
|
orderByOperators = bqOrderByOperators
|
|
|
|
comparisonExps = bqComparisonExps
|
2022-01-18 17:53:44 +03:00
|
|
|
countTypeInput = bqCountTypeInput
|
2021-04-12 13:18:29 +03:00
|
|
|
aggregateOrderByCountType = BigQuery.IntegerScalarType
|
2021-09-24 01:56:37 +03:00
|
|
|
computedField = bqComputedField
|
|
|
|
node = bqNode
|
2021-06-15 18:53:20 +03:00
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
----------------------------------------------------------------
|
|
|
|
-- Top level parsers
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqBuildTableRelayQueryFields ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
TableName 'BigQuery ->
|
|
|
|
TableInfo 'BigQuery ->
|
2022-05-26 14:54:30 +03:00
|
|
|
C.GQLNameIdentifier ->
|
2021-09-24 01:56:37 +03:00
|
|
|
NESeq (ColumnInfo 'BigQuery) ->
|
2021-11-26 00:07:53 +03:00
|
|
|
m [a]
|
Role-invariant schema constructors
We build the GraphQL schema by combining building blocks such as `tableSelectionSet` and `columnParser`. These building blocks individually build `{InputFields,Field,}Parser` objects. Those object specify the valid GraphQL schema.
Since the GraphQL schema is role-dependent, at some point we need to know what fragment of the GraphQL schema a specific role is allowed to access, and this is stored in `{Sel,Upd,Ins,Del}PermInfo` objects.
We have passed around these permission objects as function arguments to the schema building blocks since we first started dealing with permissions during the PDV refactor - see hasura/graphql-engine@5168b99e463199b1934d8645bd6cd37eddb64ae1 in hasura/graphql-engine#4111. This means that, for instance, `tableSelectionSet` has as its type:
```haskell
tableSelectionSet ::
forall b r m n.
MonadBuildSchema b r m n =>
SourceName ->
TableInfo b ->
SelPermInfo b ->
m (Parser 'Output n (AnnotatedFields b))
```
There are three reasons to change this.
1. We often pass a `Maybe (xPermInfo b)` instead of a proper `xPermInfo b`, and it's not clear what the intended semantics of this is. Some potential improvements on the data types involved are discussed in issue hasura/graphql-engine-mono#3125.
2. In most cases we also already pass a `TableInfo b`, and together with the `MonadRole` that is usually also in scope, this means that we could look up the required permissions regardless: so passing the permissions explicitly undermines the "single source of truth" principle. Breaking this principle also makes the code more difficult to read.
3. We are working towards role-based parsers (see hasura/graphql-engine-mono#2711), where the `{InputFields,Field,}Parser` objects are constructed in a role-invariant way, so that we have a single object that can be used for all roles. In particular, this means that the schema building blocks _need_ to be constructed in a role-invariant way. While this PR doesn't accomplish that, it does reduce the amount of role-specific arguments being passed, thus fixing hasura/graphql-engine-mono#3068.
Concretely, this PR simply drops the `xPermInfo b` argument from almost all schema building blocks. Instead these objects are looked up from the `TableInfo b` as-needed. The resulting code is considerably simpler and shorter.
One way to interpret this change is as follows. Before this PR, we figured out permissions at the top-level in `Hasura.GraphQL.Schema`, passing down the obtained `xPermInfo` objects as required. After this PR, we have a bottom-up approach where the schema building blocks themselves decide whether they want to be included for a particular role.
So this moves some permission logic out of `Hasura.GraphQL.Schema`, which is very complex.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3608
GitOrigin-RevId: 51a744f34ec7d57bc8077667ae7f9cb9c4f6c962
2022-02-17 11:16:20 +03:00
|
|
|
bqBuildTableRelayQueryFields _sourceName _tableName _tableInfo _gqlName _pkeyColumns =
|
2021-06-15 18:53:20 +03:00
|
|
|
pure []
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqBuildTableInsertMutationFields ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
Role-invariant schema constructors
We build the GraphQL schema by combining building blocks such as `tableSelectionSet` and `columnParser`. These building blocks individually build `{InputFields,Field,}Parser` objects. Those object specify the valid GraphQL schema.
Since the GraphQL schema is role-dependent, at some point we need to know what fragment of the GraphQL schema a specific role is allowed to access, and this is stored in `{Sel,Upd,Ins,Del}PermInfo` objects.
We have passed around these permission objects as function arguments to the schema building blocks since we first started dealing with permissions during the PDV refactor - see hasura/graphql-engine@5168b99e463199b1934d8645bd6cd37eddb64ae1 in hasura/graphql-engine#4111. This means that, for instance, `tableSelectionSet` has as its type:
```haskell
tableSelectionSet ::
forall b r m n.
MonadBuildSchema b r m n =>
SourceName ->
TableInfo b ->
SelPermInfo b ->
m (Parser 'Output n (AnnotatedFields b))
```
There are three reasons to change this.
1. We often pass a `Maybe (xPermInfo b)` instead of a proper `xPermInfo b`, and it's not clear what the intended semantics of this is. Some potential improvements on the data types involved are discussed in issue hasura/graphql-engine-mono#3125.
2. In most cases we also already pass a `TableInfo b`, and together with the `MonadRole` that is usually also in scope, this means that we could look up the required permissions regardless: so passing the permissions explicitly undermines the "single source of truth" principle. Breaking this principle also makes the code more difficult to read.
3. We are working towards role-based parsers (see hasura/graphql-engine-mono#2711), where the `{InputFields,Field,}Parser` objects are constructed in a role-invariant way, so that we have a single object that can be used for all roles. In particular, this means that the schema building blocks _need_ to be constructed in a role-invariant way. While this PR doesn't accomplish that, it does reduce the amount of role-specific arguments being passed, thus fixing hasura/graphql-engine-mono#3068.
Concretely, this PR simply drops the `xPermInfo b` argument from almost all schema building blocks. Instead these objects are looked up from the `TableInfo b` as-needed. The resulting code is considerably simpler and shorter.
One way to interpret this change is as follows. Before this PR, we figured out permissions at the top-level in `Hasura.GraphQL.Schema`, passing down the obtained `xPermInfo` objects as required. After this PR, we have a bottom-up approach where the schema building blocks themselves decide whether they want to be included for a particular role.
So this moves some permission logic out of `Hasura.GraphQL.Schema`, which is very complex.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3608
GitOrigin-RevId: 51a744f34ec7d57bc8077667ae7f9cb9c4f6c962
2022-02-17 11:16:20 +03:00
|
|
|
Scenario ->
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
TableName 'BigQuery ->
|
|
|
|
TableInfo 'BigQuery ->
|
2022-05-26 14:54:30 +03:00
|
|
|
C.GQLNameIdentifier ->
|
2021-11-26 00:07:53 +03:00
|
|
|
m [a]
|
Role-invariant schema constructors
We build the GraphQL schema by combining building blocks such as `tableSelectionSet` and `columnParser`. These building blocks individually build `{InputFields,Field,}Parser` objects. Those object specify the valid GraphQL schema.
Since the GraphQL schema is role-dependent, at some point we need to know what fragment of the GraphQL schema a specific role is allowed to access, and this is stored in `{Sel,Upd,Ins,Del}PermInfo` objects.
We have passed around these permission objects as function arguments to the schema building blocks since we first started dealing with permissions during the PDV refactor - see hasura/graphql-engine@5168b99e463199b1934d8645bd6cd37eddb64ae1 in hasura/graphql-engine#4111. This means that, for instance, `tableSelectionSet` has as its type:
```haskell
tableSelectionSet ::
forall b r m n.
MonadBuildSchema b r m n =>
SourceName ->
TableInfo b ->
SelPermInfo b ->
m (Parser 'Output n (AnnotatedFields b))
```
There are three reasons to change this.
1. We often pass a `Maybe (xPermInfo b)` instead of a proper `xPermInfo b`, and it's not clear what the intended semantics of this is. Some potential improvements on the data types involved are discussed in issue hasura/graphql-engine-mono#3125.
2. In most cases we also already pass a `TableInfo b`, and together with the `MonadRole` that is usually also in scope, this means that we could look up the required permissions regardless: so passing the permissions explicitly undermines the "single source of truth" principle. Breaking this principle also makes the code more difficult to read.
3. We are working towards role-based parsers (see hasura/graphql-engine-mono#2711), where the `{InputFields,Field,}Parser` objects are constructed in a role-invariant way, so that we have a single object that can be used for all roles. In particular, this means that the schema building blocks _need_ to be constructed in a role-invariant way. While this PR doesn't accomplish that, it does reduce the amount of role-specific arguments being passed, thus fixing hasura/graphql-engine-mono#3068.
Concretely, this PR simply drops the `xPermInfo b` argument from almost all schema building blocks. Instead these objects are looked up from the `TableInfo b` as-needed. The resulting code is considerably simpler and shorter.
One way to interpret this change is as follows. Before this PR, we figured out permissions at the top-level in `Hasura.GraphQL.Schema`, passing down the obtained `xPermInfo` objects as required. After this PR, we have a bottom-up approach where the schema building blocks themselves decide whether they want to be included for a particular role.
So this moves some permission logic out of `Hasura.GraphQL.Schema`, which is very complex.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3608
GitOrigin-RevId: 51a744f34ec7d57bc8077667ae7f9cb9c4f6c962
2022-02-17 11:16:20 +03:00
|
|
|
bqBuildTableInsertMutationFields _scenario _sourceName _tableName _tableInfo _gqlName =
|
2021-04-12 13:18:29 +03:00
|
|
|
pure []
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqBuildTableUpdateMutationFields ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
2022-05-31 17:41:09 +03:00
|
|
|
Scenario ->
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
TableName 'BigQuery ->
|
|
|
|
TableInfo 'BigQuery ->
|
2022-05-26 14:54:30 +03:00
|
|
|
C.GQLNameIdentifier ->
|
2021-11-26 00:07:53 +03:00
|
|
|
m [a]
|
2022-05-31 17:41:09 +03:00
|
|
|
bqBuildTableUpdateMutationFields _scenario _sourceName _tableName _tableInfo _gqlName =
|
2021-04-12 13:18:29 +03:00
|
|
|
pure []
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqBuildTableDeleteMutationFields ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
2022-05-31 17:41:09 +03:00
|
|
|
Scenario ->
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
TableName 'BigQuery ->
|
|
|
|
TableInfo 'BigQuery ->
|
2022-05-26 14:54:30 +03:00
|
|
|
C.GQLNameIdentifier ->
|
2021-11-26 00:07:53 +03:00
|
|
|
m [a]
|
2022-05-31 17:41:09 +03:00
|
|
|
bqBuildTableDeleteMutationFields _scenario _sourceName _tableName _tableInfo _gqlName =
|
2021-04-12 13:18:29 +03:00
|
|
|
pure []
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqBuildFunctionQueryFields ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
FunctionName 'BigQuery ->
|
|
|
|
FunctionInfo 'BigQuery ->
|
|
|
|
TableName 'BigQuery ->
|
2021-11-26 00:07:53 +03:00
|
|
|
m [a]
|
Role-invariant schema constructors
We build the GraphQL schema by combining building blocks such as `tableSelectionSet` and `columnParser`. These building blocks individually build `{InputFields,Field,}Parser` objects. Those object specify the valid GraphQL schema.
Since the GraphQL schema is role-dependent, at some point we need to know what fragment of the GraphQL schema a specific role is allowed to access, and this is stored in `{Sel,Upd,Ins,Del}PermInfo` objects.
We have passed around these permission objects as function arguments to the schema building blocks since we first started dealing with permissions during the PDV refactor - see hasura/graphql-engine@5168b99e463199b1934d8645bd6cd37eddb64ae1 in hasura/graphql-engine#4111. This means that, for instance, `tableSelectionSet` has as its type:
```haskell
tableSelectionSet ::
forall b r m n.
MonadBuildSchema b r m n =>
SourceName ->
TableInfo b ->
SelPermInfo b ->
m (Parser 'Output n (AnnotatedFields b))
```
There are three reasons to change this.
1. We often pass a `Maybe (xPermInfo b)` instead of a proper `xPermInfo b`, and it's not clear what the intended semantics of this is. Some potential improvements on the data types involved are discussed in issue hasura/graphql-engine-mono#3125.
2. In most cases we also already pass a `TableInfo b`, and together with the `MonadRole` that is usually also in scope, this means that we could look up the required permissions regardless: so passing the permissions explicitly undermines the "single source of truth" principle. Breaking this principle also makes the code more difficult to read.
3. We are working towards role-based parsers (see hasura/graphql-engine-mono#2711), where the `{InputFields,Field,}Parser` objects are constructed in a role-invariant way, so that we have a single object that can be used for all roles. In particular, this means that the schema building blocks _need_ to be constructed in a role-invariant way. While this PR doesn't accomplish that, it does reduce the amount of role-specific arguments being passed, thus fixing hasura/graphql-engine-mono#3068.
Concretely, this PR simply drops the `xPermInfo b` argument from almost all schema building blocks. Instead these objects are looked up from the `TableInfo b` as-needed. The resulting code is considerably simpler and shorter.
One way to interpret this change is as follows. Before this PR, we figured out permissions at the top-level in `Hasura.GraphQL.Schema`, passing down the obtained `xPermInfo` objects as required. After this PR, we have a bottom-up approach where the schema building blocks themselves decide whether they want to be included for a particular role.
So this moves some permission logic out of `Hasura.GraphQL.Schema`, which is very complex.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3608
GitOrigin-RevId: 51a744f34ec7d57bc8077667ae7f9cb9c4f6c962
2022-02-17 11:16:20 +03:00
|
|
|
bqBuildFunctionQueryFields _ _ _ _ =
|
2021-04-12 13:18:29 +03:00
|
|
|
pure []
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqBuildFunctionRelayQueryFields ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
FunctionName 'BigQuery ->
|
|
|
|
FunctionInfo 'BigQuery ->
|
|
|
|
TableName 'BigQuery ->
|
|
|
|
NESeq (ColumnInfo 'BigQuery) ->
|
2021-11-26 00:07:53 +03:00
|
|
|
m [a]
|
Role-invariant schema constructors
We build the GraphQL schema by combining building blocks such as `tableSelectionSet` and `columnParser`. These building blocks individually build `{InputFields,Field,}Parser` objects. Those object specify the valid GraphQL schema.
Since the GraphQL schema is role-dependent, at some point we need to know what fragment of the GraphQL schema a specific role is allowed to access, and this is stored in `{Sel,Upd,Ins,Del}PermInfo` objects.
We have passed around these permission objects as function arguments to the schema building blocks since we first started dealing with permissions during the PDV refactor - see hasura/graphql-engine@5168b99e463199b1934d8645bd6cd37eddb64ae1 in hasura/graphql-engine#4111. This means that, for instance, `tableSelectionSet` has as its type:
```haskell
tableSelectionSet ::
forall b r m n.
MonadBuildSchema b r m n =>
SourceName ->
TableInfo b ->
SelPermInfo b ->
m (Parser 'Output n (AnnotatedFields b))
```
There are three reasons to change this.
1. We often pass a `Maybe (xPermInfo b)` instead of a proper `xPermInfo b`, and it's not clear what the intended semantics of this is. Some potential improvements on the data types involved are discussed in issue hasura/graphql-engine-mono#3125.
2. In most cases we also already pass a `TableInfo b`, and together with the `MonadRole` that is usually also in scope, this means that we could look up the required permissions regardless: so passing the permissions explicitly undermines the "single source of truth" principle. Breaking this principle also makes the code more difficult to read.
3. We are working towards role-based parsers (see hasura/graphql-engine-mono#2711), where the `{InputFields,Field,}Parser` objects are constructed in a role-invariant way, so that we have a single object that can be used for all roles. In particular, this means that the schema building blocks _need_ to be constructed in a role-invariant way. While this PR doesn't accomplish that, it does reduce the amount of role-specific arguments being passed, thus fixing hasura/graphql-engine-mono#3068.
Concretely, this PR simply drops the `xPermInfo b` argument from almost all schema building blocks. Instead these objects are looked up from the `TableInfo b` as-needed. The resulting code is considerably simpler and shorter.
One way to interpret this change is as follows. Before this PR, we figured out permissions at the top-level in `Hasura.GraphQL.Schema`, passing down the obtained `xPermInfo` objects as required. After this PR, we have a bottom-up approach where the schema building blocks themselves decide whether they want to be included for a particular role.
So this moves some permission logic out of `Hasura.GraphQL.Schema`, which is very complex.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3608
GitOrigin-RevId: 51a744f34ec7d57bc8077667ae7f9cb9c4f6c962
2022-02-17 11:16:20 +03:00
|
|
|
bqBuildFunctionRelayQueryFields _sourceName _functionName _functionInfo _tableName _pkeyColumns =
|
2021-06-15 18:53:20 +03:00
|
|
|
pure []
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqBuildFunctionMutationFields ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
FunctionName 'BigQuery ->
|
|
|
|
FunctionInfo 'BigQuery ->
|
|
|
|
TableName 'BigQuery ->
|
2021-11-26 00:07:53 +03:00
|
|
|
m [a]
|
Role-invariant schema constructors
We build the GraphQL schema by combining building blocks such as `tableSelectionSet` and `columnParser`. These building blocks individually build `{InputFields,Field,}Parser` objects. Those object specify the valid GraphQL schema.
Since the GraphQL schema is role-dependent, at some point we need to know what fragment of the GraphQL schema a specific role is allowed to access, and this is stored in `{Sel,Upd,Ins,Del}PermInfo` objects.
We have passed around these permission objects as function arguments to the schema building blocks since we first started dealing with permissions during the PDV refactor - see hasura/graphql-engine@5168b99e463199b1934d8645bd6cd37eddb64ae1 in hasura/graphql-engine#4111. This means that, for instance, `tableSelectionSet` has as its type:
```haskell
tableSelectionSet ::
forall b r m n.
MonadBuildSchema b r m n =>
SourceName ->
TableInfo b ->
SelPermInfo b ->
m (Parser 'Output n (AnnotatedFields b))
```
There are three reasons to change this.
1. We often pass a `Maybe (xPermInfo b)` instead of a proper `xPermInfo b`, and it's not clear what the intended semantics of this is. Some potential improvements on the data types involved are discussed in issue hasura/graphql-engine-mono#3125.
2. In most cases we also already pass a `TableInfo b`, and together with the `MonadRole` that is usually also in scope, this means that we could look up the required permissions regardless: so passing the permissions explicitly undermines the "single source of truth" principle. Breaking this principle also makes the code more difficult to read.
3. We are working towards role-based parsers (see hasura/graphql-engine-mono#2711), where the `{InputFields,Field,}Parser` objects are constructed in a role-invariant way, so that we have a single object that can be used for all roles. In particular, this means that the schema building blocks _need_ to be constructed in a role-invariant way. While this PR doesn't accomplish that, it does reduce the amount of role-specific arguments being passed, thus fixing hasura/graphql-engine-mono#3068.
Concretely, this PR simply drops the `xPermInfo b` argument from almost all schema building blocks. Instead these objects are looked up from the `TableInfo b` as-needed. The resulting code is considerably simpler and shorter.
One way to interpret this change is as follows. Before this PR, we figured out permissions at the top-level in `Hasura.GraphQL.Schema`, passing down the obtained `xPermInfo` objects as required. After this PR, we have a bottom-up approach where the schema building blocks themselves decide whether they want to be included for a particular role.
So this moves some permission logic out of `Hasura.GraphQL.Schema`, which is very complex.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3608
GitOrigin-RevId: 51a744f34ec7d57bc8077667ae7f9cb9c4f6c962
2022-02-17 11:16:20 +03:00
|
|
|
bqBuildFunctionMutationFields _ _ _ _ =
|
2021-04-12 13:18:29 +03:00
|
|
|
pure []
|
|
|
|
|
|
|
|
----------------------------------------------------------------
|
|
|
|
-- Individual components
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqColumnParser ::
|
2021-10-29 17:42:07 +03:00
|
|
|
(MonadSchema n m, MonadError QErr m, MonadReader r m, Has MkTypename r) =>
|
2021-09-24 01:56:37 +03:00
|
|
|
ColumnType 'BigQuery ->
|
|
|
|
G.Nullability ->
|
2022-05-31 01:07:02 +03:00
|
|
|
m (Parser 'Both n (IR.ValueWithOrigin (ColumnValue 'BigQuery)))
|
2021-06-15 18:53:20 +03:00
|
|
|
bqColumnParser columnType (G.Nullability isNullable) =
|
server: remove remnants of query plan caching (fix #1795)
Query plan caching was introduced by - I believe - hasura/graphql-engine#1934 in order to reduce the query response latency. During the development of PDV in hasura/graphql-engine#4111, it was found out that the new architecture (for which query plan caching wasn't implemented) performed comparably to the pre-PDV architecture with caching. Hence, it was decided to leave query plan caching until some day in the future when it was deemed necessary.
Well, we're in the future now, and there still isn't a convincing argument for query plan caching. So the time has come to remove some references to query plan caching from the codebase. For the most part, any code being removed would probably not be very well suited to the post-PDV architecture of query execution, so arguably not much is lost.
Apart from simplifying the code, this PR will contribute towards making the GraphQL schema generation more modular, testable, and easier to profile. I'd like to eventually work towards a situation in which it's easy to generate a GraphQL schema parser *in isolation*, without being connected to a database, and then parse a GraphQL query *in isolation*, without even listening any HTTP port. It is important that both of these operations can be examined in detail, and in isolation, since they are two major performance bottlenecks, as well as phases where many important upcoming features hook into.
Implementation
The following have been removed:
- The entirety of `server/src-lib/Hasura/GraphQL/Execute/Plan.hs`
- The core phases of query parsing and execution no longer have any references to query plan caching. Note that this is not to be confused with query *response* caching, which is not affected by this PR. This includes removal of the types:
- - `Opaque`, which is replaced by a tuple. Note that the old implementation was broken and did not adequately hide the constructors.
- - `QueryReusability` (and the `markNotReusable` method). Notably, the implementation of the `ParseT` monad now consists of two, rather than three, monad transformers.
- Cache-related tests (in `server/src-test/Hasura/CacheBoundedSpec.hs`) have been removed .
- References to query plan caching in the documentation.
- The `planCacheOptions` in the `TenantConfig` type class was removed. However, during parsing, unrecognized fields in the YAML config get ignored, so this does not cause a breaking change. (Confirmed manually, as well as in consultation with @sordina.)
- The metrics no longer send cache hit/miss messages.
There are a few places in which one can still find references to query plan caching:
- We still accept the `--query-plan-cache-size` command-line option for backwards compatibility. The `HASURA_QUERY_PLAN_CACHE_SIZE` environment variable is not read.
https://github.com/hasura/graphql-engine-mono/pull/1815
GitOrigin-RevId: 17d92b254ec093c62a7dfeec478658ede0813eb7
2021-07-27 14:51:52 +03:00
|
|
|
peelWithOrigin . fmap (ColumnValue columnType) <$> case columnType of
|
2021-04-12 13:18:29 +03:00
|
|
|
ColumnScalar scalarType -> case scalarType of
|
|
|
|
-- bytestrings
|
|
|
|
-- we only accept string literals
|
2022-04-18 22:43:00 +03:00
|
|
|
BigQuery.BytesScalarType -> pure $ possiblyNullable scalarType $ BigQuery.StringValue <$> stringBased G._Bytes
|
2021-04-12 13:18:29 +03:00
|
|
|
-- text
|
2021-09-24 01:56:37 +03:00
|
|
|
BigQuery.StringScalarType -> pure $ possiblyNullable scalarType $ BigQuery.StringValue <$> P.string
|
2021-04-12 13:18:29 +03:00
|
|
|
-- floating point values
|
|
|
|
-- TODO: we do not perform size checks here, meaning we would accept an
|
|
|
|
-- out-of-bounds value as long as it can be represented by a GraphQL float; this
|
|
|
|
-- will in all likelihood error on the BigQuery side. Do we want to handle those
|
|
|
|
-- properly here?
|
2021-09-24 01:56:37 +03:00
|
|
|
BigQuery.FloatScalarType -> pure $ possiblyNullable scalarType $ BigQuery.FloatValue . BigQuery.doubleToFloat64 <$> P.float
|
|
|
|
BigQuery.IntegerScalarType -> pure $ possiblyNullable scalarType $ BigQuery.IntegerValue . BigQuery.intToInt64 . fromIntegral <$> P.int
|
2022-04-13 20:08:46 +03:00
|
|
|
BigQuery.DecimalScalarType -> pure $ possiblyNullable scalarType $ BigQuery.DecimalValue . BigQuery.Decimal . BigQuery.scientificToText <$> P.scientific
|
2022-04-19 14:36:48 +03:00
|
|
|
BigQuery.BigDecimalScalarType -> pure $ possiblyNullable scalarType $ BigQuery.BigDecimalValue . BigQuery.BigDecimal . BigQuery.scientificToText <$> P.scientific
|
2021-04-12 13:18:29 +03:00
|
|
|
-- boolean type
|
2021-09-24 01:56:37 +03:00
|
|
|
BigQuery.BoolScalarType -> pure $ possiblyNullable scalarType $ BigQuery.BoolValue <$> P.boolean
|
2022-04-18 22:43:00 +03:00
|
|
|
BigQuery.DateScalarType -> pure $ possiblyNullable scalarType $ BigQuery.DateValue . BigQuery.Date <$> stringBased G._Date
|
|
|
|
BigQuery.TimeScalarType -> pure $ possiblyNullable scalarType $ BigQuery.TimeValue . BigQuery.Time <$> stringBased G._Time
|
|
|
|
BigQuery.DatetimeScalarType -> pure $ possiblyNullable scalarType $ BigQuery.DatetimeValue . BigQuery.Datetime <$> stringBased G._Datetime
|
2022-01-18 16:33:27 +03:00
|
|
|
BigQuery.GeographyScalarType ->
|
2022-04-18 22:43:00 +03:00
|
|
|
pure $ possiblyNullable scalarType $ BigQuery.GeographyValue . BigQuery.Geography <$> throughJSON G._Geography
|
2022-02-21 17:47:04 +03:00
|
|
|
BigQuery.TimestampScalarType ->
|
2022-04-18 22:43:00 +03:00
|
|
|
pure $ possiblyNullable scalarType $ BigQuery.TimestampValue . BigQuery.Timestamp <$> stringBased G._Timestamp
|
2021-05-11 18:18:31 +03:00
|
|
|
ty -> throwError $ internalError $ T.pack $ "Type currently unsupported for BigQuery: " ++ show ty
|
2022-04-13 13:07:06 +03:00
|
|
|
ColumnEnumReference enumRef@(EnumReference _ enumValues _) ->
|
2021-04-12 13:18:29 +03:00
|
|
|
case nonEmpty (Map.toList enumValues) of
|
|
|
|
Just enumValuesList -> do
|
2022-04-13 13:07:06 +03:00
|
|
|
enumName <- mkEnumTypeName enumRef
|
2021-04-12 13:18:29 +03:00
|
|
|
pure $ possiblyNullable BigQuery.StringScalarType $ P.enum enumName Nothing (mkEnumValue <$> enumValuesList)
|
|
|
|
Nothing -> throw400 ValidationFailed "empty enum values"
|
|
|
|
where
|
|
|
|
possiblyNullable _scalarType
|
|
|
|
| isNullable = fmap (fromMaybe BigQuery.NullValue) . P.nullable
|
2021-09-24 01:56:37 +03:00
|
|
|
| otherwise = id
|
2021-04-12 13:18:29 +03:00
|
|
|
mkEnumValue :: (EnumValue, EnumValueInfo) -> (P.Definition P.EnumValueInfo, ScalarValue 'BigQuery)
|
|
|
|
mkEnumValue (EnumValue value, EnumValueInfo description) =
|
Remove `Unique` from `Definition`
GraphQL types can refer to each other in a circular way. The PDV framework used to use values of type `Unique` to recognize two fragments of GraphQL schema as being the same instance. Internally, this is based on `Data.Unique` from the `base` package, which simply increases a counter on every creation of a `Unique` object.
**NB**: The `Unique` values are _not_ used for knot tying the schema combinators themselves (i.e. `Parser`s). The knot tying for `Parser`s is purely based on keys provided to `memoizeOn`. The `Unique` values are _only_ used to recognize two pieces of GraphQL _schema_ as being identical. Originally, the idea was that this would help us with a perfectly correct identification of GraphQL types. But this fully correct equality checking of GraphQL types was never implemented, and does not seem to be necessary to prevent bugs.
Specifically, these `Unique` values are stored as part of `data Definition a`, which specifies a part of our internal abstract syntax tree for the GraphQL types that we expose. The `Unique` values get initialized by the `SchemaT` effect.
In #2894 and #2895, we are experimenting with how (parts of) the GraphQL types can be hidden behind certain permission predicates. This would allow a single GraphQL schema in memory to serve all roles, implementing #2711. The permission predicates get evaluated at query parsing time when we know what role is doing a certain request, thus outputting the correct GraphQL types for that role.
If the approach of #2895 is followed, then the `Definition` objects, and thus the `Unique` values, would be hidden behind the permission predicates. Since the permission predicates are evaluated only after the schema is already supposed to be built, this means that the permission predicates would prevent us from initializing the `Unique` values, rendering them useless.
The simplest remedy to this is to remove our usage of `Unique` altogether from the GraphQL schema and schema combinators. It doesn't serve a functional purpose, doesn't prevent bugs, and requires extra bookkeeping.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/2980
GitOrigin-RevId: 50d3f9e0b9fbf578ac49c8fc773ba64a94b1f43d
2021-12-01 19:20:35 +03:00
|
|
|
( P.Definition value (G.Description <$> description) P.EnumValueInfo,
|
2021-09-24 01:56:37 +03:00
|
|
|
BigQuery.StringValue $ G.unName value
|
2021-04-12 13:18:29 +03:00
|
|
|
)
|
|
|
|
throughJSON scalarName =
|
2021-12-20 20:02:32 +03:00
|
|
|
let schemaType = P.TNamed P.NonNullable $ P.Definition scalarName Nothing P.TIScalar
|
2021-09-24 01:56:37 +03:00
|
|
|
in Parser
|
|
|
|
{ pType = schemaType,
|
|
|
|
pParser =
|
|
|
|
valueToJSON (P.toGraphQLType schemaType)
|
|
|
|
>=> either (parseErrorWith ParseFailed . qeError) pure . runAesonParser J.parseJSON
|
|
|
|
}
|
2022-01-18 16:33:27 +03:00
|
|
|
stringBased :: MonadParse m => G.Name -> Parser 'Both m Text
|
|
|
|
stringBased scalarName =
|
|
|
|
P.string {pType = P.TNamed P.NonNullable $ P.Definition scalarName Nothing P.TIScalar}
|
2021-09-24 01:56:37 +03:00
|
|
|
|
2022-05-03 11:58:56 +03:00
|
|
|
bqScalarSelectionArgumentsParser ::
|
2021-09-24 01:56:37 +03:00
|
|
|
MonadParse n =>
|
|
|
|
ColumnType 'BigQuery ->
|
2022-05-03 11:58:56 +03:00
|
|
|
InputFieldsParser n (Maybe (ScalarSelectionArguments 'BigQuery))
|
|
|
|
bqScalarSelectionArgumentsParser _columnType = pure Nothing
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqOrderByOperators ::
|
2022-05-26 14:54:30 +03:00
|
|
|
NamingCase ->
|
2021-09-24 01:56:37 +03:00
|
|
|
NonEmpty
|
|
|
|
( Definition P.EnumValueInfo,
|
|
|
|
(BasicOrderType 'BigQuery, NullsOrderType 'BigQuery)
|
2021-04-12 13:18:29 +03:00
|
|
|
)
|
2022-05-26 14:54:30 +03:00
|
|
|
bqOrderByOperators _tCase =
|
|
|
|
-- NOTE: NamingCase is not being used here as we don't support naming conventions for this DB
|
2021-09-24 01:56:37 +03:00
|
|
|
NE.fromList
|
2022-04-18 22:43:00 +03:00
|
|
|
[ ( define G._asc "in ascending order, nulls first",
|
2021-09-24 01:56:37 +03:00
|
|
|
(BigQuery.AscOrder, BigQuery.NullsFirst)
|
|
|
|
),
|
2022-04-18 22:43:00 +03:00
|
|
|
( define G._asc_nulls_first "in ascending order, nulls first",
|
2021-09-24 01:56:37 +03:00
|
|
|
(BigQuery.AscOrder, BigQuery.NullsFirst)
|
|
|
|
),
|
2022-04-18 22:43:00 +03:00
|
|
|
( define G._asc_nulls_last "in ascending order, nulls last",
|
2021-09-24 01:56:37 +03:00
|
|
|
(BigQuery.AscOrder, BigQuery.NullsLast)
|
|
|
|
),
|
2022-04-18 22:43:00 +03:00
|
|
|
( define G._desc "in descending order, nulls last",
|
2021-09-24 01:56:37 +03:00
|
|
|
(BigQuery.DescOrder, BigQuery.NullsLast)
|
|
|
|
),
|
2022-04-18 22:43:00 +03:00
|
|
|
( define G._desc_nulls_first "in descending order, nulls first",
|
2021-09-24 01:56:37 +03:00
|
|
|
(BigQuery.DescOrder, BigQuery.NullsFirst)
|
|
|
|
),
|
2022-04-18 22:43:00 +03:00
|
|
|
( define G._desc_nulls_last "in descending order, nulls last",
|
2021-09-24 01:56:37 +03:00
|
|
|
(BigQuery.DescOrder, BigQuery.NullsLast)
|
|
|
|
)
|
|
|
|
]
|
2021-04-12 13:18:29 +03:00
|
|
|
where
|
Remove `Unique` from `Definition`
GraphQL types can refer to each other in a circular way. The PDV framework used to use values of type `Unique` to recognize two fragments of GraphQL schema as being the same instance. Internally, this is based on `Data.Unique` from the `base` package, which simply increases a counter on every creation of a `Unique` object.
**NB**: The `Unique` values are _not_ used for knot tying the schema combinators themselves (i.e. `Parser`s). The knot tying for `Parser`s is purely based on keys provided to `memoizeOn`. The `Unique` values are _only_ used to recognize two pieces of GraphQL _schema_ as being identical. Originally, the idea was that this would help us with a perfectly correct identification of GraphQL types. But this fully correct equality checking of GraphQL types was never implemented, and does not seem to be necessary to prevent bugs.
Specifically, these `Unique` values are stored as part of `data Definition a`, which specifies a part of our internal abstract syntax tree for the GraphQL types that we expose. The `Unique` values get initialized by the `SchemaT` effect.
In #2894 and #2895, we are experimenting with how (parts of) the GraphQL types can be hidden behind certain permission predicates. This would allow a single GraphQL schema in memory to serve all roles, implementing #2711. The permission predicates get evaluated at query parsing time when we know what role is doing a certain request, thus outputting the correct GraphQL types for that role.
If the approach of #2895 is followed, then the `Definition` objects, and thus the `Unique` values, would be hidden behind the permission predicates. Since the permission predicates are evaluated only after the schema is already supposed to be built, this means that the permission predicates would prevent us from initializing the `Unique` values, rendering them useless.
The simplest remedy to this is to remove our usage of `Unique` altogether from the GraphQL schema and schema combinators. It doesn't serve a functional purpose, doesn't prevent bugs, and requires extra bookkeeping.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/2980
GitOrigin-RevId: 50d3f9e0b9fbf578ac49c8fc773ba64a94b1f43d
2021-12-01 19:20:35 +03:00
|
|
|
define name desc = P.Definition name (Just desc) P.EnumValueInfo
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
bqComparisonExps ::
|
|
|
|
forall m n r.
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
(MonadBuildSchema 'BigQuery r m n) =>
|
2021-09-24 01:56:37 +03:00
|
|
|
ColumnType 'BigQuery ->
|
|
|
|
m (Parser 'Input n [ComparisonExp 'BigQuery])
|
2021-06-15 18:53:20 +03:00
|
|
|
bqComparisonExps = P.memoize 'comparisonExps $ \columnType -> do
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
collapseIfNull <- retrieve soDangerousBooleanCollapse
|
2022-01-18 16:33:27 +03:00
|
|
|
dWithinGeogOpParser <- geographyWithinDistanceInput
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase <- asks getter
|
2021-04-12 13:18:29 +03:00
|
|
|
-- see Note [Columns in comparison expression are never nullable]
|
2021-09-24 01:56:37 +03:00
|
|
|
typedParser <- columnParser columnType (G.Nullability False)
|
2022-04-18 22:43:00 +03:00
|
|
|
_nullableTextParser <- columnParser (ColumnScalar @'BigQuery BigQuery.StringScalarType) (G.Nullability True)
|
2021-10-29 17:42:07 +03:00
|
|
|
-- textParser <- columnParser (ColumnScalar @'BigQuery BigQuery.StringScalarType) (G.Nullability False)
|
2022-04-18 22:43:00 +03:00
|
|
|
let name = P.getName typedParser <> G.__BigQuery_comparison_exp
|
2021-09-24 01:56:37 +03:00
|
|
|
desc =
|
|
|
|
G.Description $
|
|
|
|
"Boolean expression to compare columns of type "
|
|
|
|
<> P.getName typedParser
|
|
|
|
<<> ". All fields are combined with logical 'AND'."
|
2021-10-29 17:42:07 +03:00
|
|
|
-- textListParser = fmap openValueOrigin <$> P.list textParser
|
2022-05-31 01:07:02 +03:00
|
|
|
columnListParser = fmap IR.openValueOrigin <$> P.list typedParser
|
|
|
|
mkListLiteral :: [ColumnValue 'BigQuery] -> IR.UnpreparedValue 'BigQuery
|
2021-08-06 22:57:37 +03:00
|
|
|
mkListLiteral =
|
2022-05-31 01:07:02 +03:00
|
|
|
IR.UVLiteral . BigQuery.ListExpression . fmap (BigQuery.ValueExpression . cvValue)
|
2021-09-24 01:56:37 +03:00
|
|
|
pure $
|
|
|
|
P.object name (Just desc) $
|
|
|
|
fmap catMaybes $
|
|
|
|
sequenceA $
|
|
|
|
concat
|
2022-01-18 16:33:27 +03:00
|
|
|
[ -- from https://cloud.google.com/bigquery/docs/reference/standard-sql/data-types:
|
|
|
|
-- GEOGRAPHY comparisons are not supported. To compare GEOGRAPHY values, use ST_Equals.
|
|
|
|
guard (isScalarColumnWhere (/= BigQuery.GeographyScalarType) columnType)
|
|
|
|
*> equalityOperators
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-31 01:07:02 +03:00
|
|
|
(IR.mkParameter <$> typedParser)
|
2022-01-18 16:33:27 +03:00
|
|
|
(mkListLiteral <$> columnListParser),
|
|
|
|
guard (isScalarColumnWhere (/= BigQuery.GeographyScalarType) columnType)
|
|
|
|
*> comparisonOperators
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-31 01:07:02 +03:00
|
|
|
(IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
-- Ops for String type
|
|
|
|
guard (isScalarColumnWhere (== BigQuery.StringScalarType) columnType)
|
|
|
|
*> [ mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromName G.__like)
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "does the column match the given pattern")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ALIKE . IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromName G.__nlike)
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "does the column NOT match the given pattern")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ANLIKE . IR.mkParameter <$> typedParser)
|
2022-01-18 16:33:27 +03:00
|
|
|
],
|
|
|
|
-- Ops for Bytes type
|
|
|
|
guard (isScalarColumnWhere (== BigQuery.BytesScalarType) columnType)
|
|
|
|
*> [ mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromName G.__like)
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "does the column match the given pattern")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ALIKE . IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromName G.__nlike)
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "does the column NOT match the given pattern")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ANLIKE . IR.mkParameter <$> typedParser)
|
2022-01-18 16:33:27 +03:00
|
|
|
],
|
|
|
|
-- Ops for Geography type
|
|
|
|
guard (isScalarColumnWhere (== BigQuery.GeographyScalarType) columnType)
|
|
|
|
*> [ mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromTuple $$(G.litGQLIdentifier ["_st", "contains"]))
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "does the column contain the given geography value")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ABackendSpecific . BigQuery.ASTContains . IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromTuple $$(G.litGQLIdentifier ["_st", "equals"]))
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "is the column equal to given geography value (directionality is ignored)")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ABackendSpecific . BigQuery.ASTEquals . IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromTuple $$(G.litGQLIdentifier ["_st", "touches"]))
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "does the column have at least one point in common with the given geography value")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ABackendSpecific . BigQuery.ASTTouches . IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromTuple $$(G.litGQLIdentifier ["_st", "within"]))
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "is the column contained in the given geography value")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ABackendSpecific . BigQuery.ASTWithin . IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromTuple $$(G.litGQLIdentifier ["_st", "intersects"]))
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "does the column spatially intersect the given geography value")
|
2022-05-31 01:07:02 +03:00
|
|
|
(ABackendSpecific . BigQuery.ASTIntersects . IR.mkParameter <$> typedParser),
|
2022-01-18 16:33:27 +03:00
|
|
|
mkBoolOperator
|
2022-05-26 14:54:30 +03:00
|
|
|
tCase
|
2022-01-18 16:33:27 +03:00
|
|
|
collapseIfNull
|
2022-05-26 14:54:30 +03:00
|
|
|
(C.fromTuple $$(G.litGQLIdentifier ["_st", "d", "within"]))
|
2022-01-18 16:33:27 +03:00
|
|
|
(Just "is the column within a given distance from the given geometry value")
|
|
|
|
(ABackendSpecific . BigQuery.ASTDWithin <$> dWithinGeogOpParser)
|
|
|
|
]
|
2021-09-24 01:56:37 +03:00
|
|
|
]
|
|
|
|
|
2022-01-18 17:53:44 +03:00
|
|
|
bqCountTypeInput ::
|
|
|
|
MonadParse n =>
|
|
|
|
Maybe (Parser 'Both n (Column 'BigQuery)) ->
|
|
|
|
InputFieldsParser n (IR.CountDistinct -> CountType 'BigQuery)
|
|
|
|
bqCountTypeInput = \case
|
|
|
|
Just columnEnum -> do
|
2022-04-18 22:43:00 +03:00
|
|
|
columns <- P.fieldOptional G._columns Nothing $ P.list columnEnum
|
2022-01-18 17:53:44 +03:00
|
|
|
pure $ flip mkCountType columns
|
|
|
|
Nothing -> pure $ flip mkCountType Nothing
|
|
|
|
where
|
|
|
|
mkCountType :: IR.CountDistinct -> Maybe [Column 'BigQuery] -> CountType 'BigQuery
|
|
|
|
mkCountType _ Nothing = BigQuery.StarCountable
|
|
|
|
mkCountType IR.SelectCountDistinct (Just cols) =
|
|
|
|
maybe BigQuery.StarCountable BigQuery.DistinctCountable $ nonEmpty cols
|
|
|
|
mkCountType IR.SelectCountNonDistinct (Just cols) =
|
|
|
|
maybe BigQuery.StarCountable BigQuery.NonNullFieldCountable $ nonEmpty cols
|
|
|
|
|
2022-01-18 16:33:27 +03:00
|
|
|
geographyWithinDistanceInput ::
|
|
|
|
forall m n r.
|
2022-05-26 14:54:30 +03:00
|
|
|
(MonadSchema n m, MonadError QErr m, MonadReader r m, Has MkTypename r, Has NamingCase r) =>
|
2022-05-31 01:07:02 +03:00
|
|
|
m (Parser 'Input n (DWithinGeogOp (IR.UnpreparedValue 'BigQuery)))
|
2022-01-18 16:33:27 +03:00
|
|
|
geographyWithinDistanceInput = do
|
|
|
|
geographyParser <- columnParser (ColumnScalar BigQuery.GeographyScalarType) (G.Nullability False)
|
|
|
|
-- practically BigQuery (as of 2021-11-19) doesn't support TRUE as use_spheroid parameter for ST_DWITHIN
|
|
|
|
booleanParser <- columnParser (ColumnScalar BigQuery.BoolScalarType) (G.Nullability True)
|
|
|
|
floatParser <- columnParser (ColumnScalar BigQuery.FloatScalarType) (G.Nullability False)
|
|
|
|
pure $
|
2022-04-18 22:43:00 +03:00
|
|
|
P.object G._st_dwithin_input Nothing $
|
2022-05-31 01:07:02 +03:00
|
|
|
DWithinGeogOp <$> (IR.mkParameter <$> P.field G._distance Nothing floatParser)
|
|
|
|
<*> (IR.mkParameter <$> P.field G._from Nothing geographyParser)
|
|
|
|
<*> (IR.mkParameter <$> P.fieldWithDefault G._use_spheroid Nothing (G.VBoolean False) booleanParser)
|
2022-01-18 16:33:27 +03:00
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
-- | Computed field parser.
|
2021-09-24 01:56:37 +03:00
|
|
|
bqComputedField ::
|
2022-05-25 13:24:41 +03:00
|
|
|
forall r m n.
|
2021-09-24 01:56:37 +03:00
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
SourceInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
ComputedFieldInfo 'BigQuery ->
|
|
|
|
TableName 'BigQuery ->
|
Role-invariant schema constructors
We build the GraphQL schema by combining building blocks such as `tableSelectionSet` and `columnParser`. These building blocks individually build `{InputFields,Field,}Parser` objects. Those object specify the valid GraphQL schema.
Since the GraphQL schema is role-dependent, at some point we need to know what fragment of the GraphQL schema a specific role is allowed to access, and this is stored in `{Sel,Upd,Ins,Del}PermInfo` objects.
We have passed around these permission objects as function arguments to the schema building blocks since we first started dealing with permissions during the PDV refactor - see hasura/graphql-engine@5168b99e463199b1934d8645bd6cd37eddb64ae1 in hasura/graphql-engine#4111. This means that, for instance, `tableSelectionSet` has as its type:
```haskell
tableSelectionSet ::
forall b r m n.
MonadBuildSchema b r m n =>
SourceName ->
TableInfo b ->
SelPermInfo b ->
m (Parser 'Output n (AnnotatedFields b))
```
There are three reasons to change this.
1. We often pass a `Maybe (xPermInfo b)` instead of a proper `xPermInfo b`, and it's not clear what the intended semantics of this is. Some potential improvements on the data types involved are discussed in issue hasura/graphql-engine-mono#3125.
2. In most cases we also already pass a `TableInfo b`, and together with the `MonadRole` that is usually also in scope, this means that we could look up the required permissions regardless: so passing the permissions explicitly undermines the "single source of truth" principle. Breaking this principle also makes the code more difficult to read.
3. We are working towards role-based parsers (see hasura/graphql-engine-mono#2711), where the `{InputFields,Field,}Parser` objects are constructed in a role-invariant way, so that we have a single object that can be used for all roles. In particular, this means that the schema building blocks _need_ to be constructed in a role-invariant way. While this PR doesn't accomplish that, it does reduce the amount of role-specific arguments being passed, thus fixing hasura/graphql-engine-mono#3068.
Concretely, this PR simply drops the `xPermInfo b` argument from almost all schema building blocks. Instead these objects are looked up from the `TableInfo b` as-needed. The resulting code is considerably simpler and shorter.
One way to interpret this change is as follows. Before this PR, we figured out permissions at the top-level in `Hasura.GraphQL.Schema`, passing down the obtained `xPermInfo` objects as required. After this PR, we have a bottom-up approach where the schema building blocks themselves decide whether they want to be included for a particular role.
So this moves some permission logic out of `Hasura.GraphQL.Schema`, which is very complex.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3608
GitOrigin-RevId: 51a744f34ec7d57bc8077667ae7f9cb9c4f6c962
2022-02-17 11:16:20 +03:00
|
|
|
TableInfo 'BigQuery ->
|
2021-09-24 01:56:37 +03:00
|
|
|
m (Maybe (FieldParser n (AnnotatedField 'BigQuery)))
|
2022-05-25 13:24:41 +03:00
|
|
|
bqComputedField sourceName ComputedFieldInfo {..} tableName _tableInfo = runMaybeT do
|
Remove circular dependency in schema building code
### Description
The main goal of this PR is, as stated, to remove the circular dependency in the schema building code. This cycle arises from the existence of remote relationships: when we build the schema for a source A, a remote relationship might force us to jump to the schema of a source B, or some remote schema. As a result, we end up having to do a dispatch from a "leaf" of the schema, similar to the one done at the root. In turn, this forces us to carry along in the schema a lot of information required for that dispatch, AND it forces us to import the instances in scope, creating an import loop.
As discussed in #4489, this PR implements the "dependency injection" solution: we pass to the schema a function to call to do the dispatch, and to get a generated field for a remote relationship. That way, this function can be chosen at the root level, and the leaves need not be aware of the overall context.
This PR grew a bit bigger than that, however; in an attempt to try and remove the `SourceCache` from the schema altogether, it changed a lot of functions across the schema building code, to thread along the `SourceInfo b` of the source being built. This avoids having to do cache lookups within a given source. A few cases remain, such as relay, that we might try to tackle in a subsequent PR.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/4557
GitOrigin-RevId: 9388e48372877520a72a9fd1677005df9f7b2d72
2022-05-27 20:21:22 +03:00
|
|
|
stringifyNum <- retrieve soStringifyNum
|
2022-05-25 13:24:41 +03:00
|
|
|
fieldName <- lift $ textToName $ computedFieldNameToText _cfiName
|
|
|
|
functionArgsParser <- lift $ computedFieldFunctionArgs _cfiFunction
|
|
|
|
case _cfiReturnType of
|
|
|
|
BigQuery.ReturnExistingTable returnTable -> do
|
|
|
|
returnTableInfo <- lift $ askTableInfo sourceName returnTable
|
|
|
|
returnTablePermissions <- MaybeT $ tableSelectPermissions returnTableInfo
|
|
|
|
selectionSetParser <- MaybeT (fmap (P.multiple . P.nonNullableParser) <$> tableSelectionSet sourceName returnTableInfo)
|
|
|
|
selectArgsParser <- lift $ tableArguments sourceName returnTableInfo
|
|
|
|
let fieldArgsParser = liftA2 (,) functionArgsParser selectArgsParser
|
|
|
|
pure $
|
|
|
|
P.subselection fieldName fieldDescription fieldArgsParser selectionSetParser
|
|
|
|
<&> \((functionArgs', args), fields) ->
|
|
|
|
IR.AFComputedField _cfiXComputedFieldInfo _cfiName $
|
|
|
|
IR.CFSTable JASMultipleRows $
|
|
|
|
IR.AnnSelectG
|
|
|
|
{ IR._asnFields = fields,
|
|
|
|
IR._asnFrom = IR.FromFunction (_cffName _cfiFunction) functionArgs' Nothing,
|
|
|
|
IR._asnPerm = tablePermissionsInfo returnTablePermissions,
|
|
|
|
IR._asnArgs = args,
|
|
|
|
IR._asnStrfyNum = stringifyNum
|
|
|
|
}
|
|
|
|
BigQuery.ReturnTableSchema returnFields -> do
|
|
|
|
objectTypeName <-
|
|
|
|
P.mkTypename =<< do
|
|
|
|
computedFieldGQLName <- textToName $ computedFieldNameToText _cfiName
|
|
|
|
pure $ computedFieldGQLName <> G.__ <> G.__fields
|
|
|
|
selectionSetParser <- do
|
|
|
|
fieldParsers <- lift $ for returnFields selectArbitraryField
|
|
|
|
let description = G.Description $ "column fields returning by " <>> _cfiName
|
|
|
|
pure $
|
|
|
|
P.selectionSetObject objectTypeName (Just description) fieldParsers []
|
|
|
|
<&> parsedSelectionsToFields IR.AFExpression
|
|
|
|
pure $
|
|
|
|
P.subselection fieldName fieldDescription functionArgsParser selectionSetParser
|
|
|
|
<&> \(functionArgs', fields) ->
|
|
|
|
IR.AFComputedField _cfiXComputedFieldInfo _cfiName $
|
|
|
|
IR.CFSTable JASMultipleRows $
|
|
|
|
IR.AnnSelectG
|
|
|
|
{ IR._asnFields = fields,
|
|
|
|
IR._asnFrom = IR.FromFunction (_cffName _cfiFunction) functionArgs' Nothing,
|
|
|
|
IR._asnPerm = IR.noTablePermissions,
|
|
|
|
IR._asnArgs = IR.noSelectArgs,
|
|
|
|
IR._asnStrfyNum = stringifyNum
|
|
|
|
}
|
|
|
|
where
|
|
|
|
fieldDescription :: Maybe G.Description
|
|
|
|
fieldDescription = G.Description <$> _cfiDescription
|
|
|
|
|
|
|
|
selectArbitraryField ::
|
|
|
|
(BigQuery.ColumnName, G.Name, BigQuery.ScalarType) ->
|
|
|
|
m (FieldParser n (AnnotatedField 'BigQuery))
|
|
|
|
selectArbitraryField (columnName, graphQLName, columnType) = do
|
|
|
|
field <- columnParser @'BigQuery (ColumnScalar columnType) (G.Nullability True)
|
|
|
|
pure $
|
|
|
|
P.selection_ graphQLName Nothing field
|
|
|
|
$> IR.mkAnnColumnField columnName (ColumnScalar columnType) Nothing Nothing
|
|
|
|
|
|
|
|
computedFieldFunctionArgs ::
|
|
|
|
ComputedFieldFunction 'BigQuery ->
|
2022-05-31 01:07:02 +03:00
|
|
|
m (InputFieldsParser n (FunctionArgsExp 'BigQuery (IR.UnpreparedValue 'BigQuery)))
|
2022-05-25 13:24:41 +03:00
|
|
|
computedFieldFunctionArgs ComputedFieldFunction {..} = do
|
|
|
|
let fieldName = G._args
|
|
|
|
fieldDesc =
|
|
|
|
G.Description $
|
|
|
|
"input parameters for computed field "
|
|
|
|
<> _cfiName <<> " defined on table " <>> tableName
|
|
|
|
|
|
|
|
objectName <-
|
|
|
|
P.mkTypename =<< do
|
|
|
|
tableInfo <- askTableInfo sourceName tableName
|
|
|
|
computedFieldGQLName <- textToName $ computedFieldNameToText _cfiName
|
|
|
|
tableGQLName <- getTableGQLName @'BigQuery tableInfo
|
|
|
|
pure $ computedFieldGQLName <> G.__ <> tableGQLName <> G.__args
|
|
|
|
|
|
|
|
let userInputArgs = filter (not . flip Map.member _cffComputedFieldImplicitArgs . BigQuery._faName) (toList _cffInputArgs)
|
|
|
|
|
|
|
|
argumentParsers <- sequenceA <$> forM userInputArgs parseArgument
|
|
|
|
|
|
|
|
let objectParser =
|
|
|
|
P.object objectName Nothing argumentParsers `P.bind` \inputArguments -> do
|
|
|
|
let tableColumnInputs = Map.map BigQuery.AETableColumn $ Map.mapKeys getFuncArgNameTxt _cffComputedFieldImplicitArgs
|
|
|
|
pure $ FunctionArgsExp mempty $ Map.fromList inputArguments <> tableColumnInputs
|
|
|
|
|
|
|
|
pure $ P.field fieldName (Just fieldDesc) objectParser
|
|
|
|
|
2022-05-31 01:07:02 +03:00
|
|
|
parseArgument :: BigQuery.FunctionArgument -> m (InputFieldsParser n (Text, BigQuery.ArgumentExp (IR.UnpreparedValue 'BigQuery)))
|
2022-05-25 13:24:41 +03:00
|
|
|
parseArgument arg = do
|
|
|
|
typedParser <- columnParser (ColumnScalar $ BigQuery._faType arg) (G.Nullability False)
|
|
|
|
let argumentName = getFuncArgNameTxt $ BigQuery._faName arg
|
|
|
|
fieldName <- textToName argumentName
|
|
|
|
let argParser = P.field fieldName Nothing typedParser
|
2022-05-31 01:07:02 +03:00
|
|
|
pure $ argParser `P.bindFields` \inputValue -> pure ((argumentName, BigQuery.AEInput $ IR.mkParameter inputValue))
|
2021-05-20 09:28:35 +03:00
|
|
|
|
2022-04-18 22:43:00 +03:00
|
|
|
{-
|
|
|
|
NOTE: Unused. Should we remove?
|
2021-04-12 13:18:29 +03:00
|
|
|
-- | Remote join field parser.
|
|
|
|
-- Currently unsupported: returns Nothing for now.
|
2021-09-24 01:56:37 +03:00
|
|
|
bqRemoteRelationshipField ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
2021-12-22 02:14:56 +03:00
|
|
|
RemoteFieldInfo (DBJoinField 'BigQuery) ->
|
2021-09-24 01:56:37 +03:00
|
|
|
m (Maybe [FieldParser n (AnnotatedField 'BigQuery)])
|
2021-06-15 18:53:20 +03:00
|
|
|
bqRemoteRelationshipField _remoteFieldInfo = pure Nothing
|
2022-04-18 22:43:00 +03:00
|
|
|
-}
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- | The 'node' root field of a Relay request. Relay is currently unsupported on BigQuery,
|
|
|
|
-- meaning this parser will never be called: any attempt to create this parser should
|
|
|
|
-- therefore fail.
|
2021-09-24 01:56:37 +03:00
|
|
|
bqNode ::
|
|
|
|
MonadBuildSchema 'BigQuery r m n =>
|
|
|
|
m
|
|
|
|
( Parser
|
|
|
|
'Output
|
|
|
|
n
|
|
|
|
( HashMap
|
|
|
|
(TableName 'BigQuery)
|
|
|
|
( SourceName,
|
|
|
|
SourceConfig 'BigQuery,
|
|
|
|
SelPermInfo 'BigQuery,
|
|
|
|
PrimaryKeyColumns 'BigQuery,
|
|
|
|
AnnotatedFields 'BigQuery
|
|
|
|
)
|
|
|
|
)
|
|
|
|
)
|
2021-06-15 18:53:20 +03:00
|
|
|
bqNode = throw500 "BigQuery does not support relay; `node` should never be exposed in the schema."
|