graphql-engine/server/src-lib/Hasura/Backends/DataConnector/Adapter/Metadata.hs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

385 lines
18 KiB
Haskell
Raw Normal View History

{-# LANGUAGE Arrows #-}
{-# OPTIONS_GHC -fno-warn-orphans #-}
module Hasura.Backends.DataConnector.Adapter.Metadata () where
import Control.Arrow.Extended
import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Aeson qualified as J
import Data.Aeson.Key qualified as K
import Data.Aeson.KeyMap qualified as KM
import Data.Environment (Environment)
import Data.HashMap.Strict qualified as Map
import Data.HashMap.Strict.Extended qualified as HashMap
import Data.HashMap.Strict.InsOrd qualified as OMap
import Data.HashMap.Strict.NonEmpty qualified as NEHashMap
import Data.HashSet qualified as HashSet
import Data.Sequence qualified as Seq
import Data.Sequence.NonEmpty qualified as NESeq
import Data.Text.Extended (toTxt, (<<>), (<>>))
import Hasura.Backends.DataConnector.API (capabilitiesCase, errorResponseSummary, schemaCase)
import Hasura.Backends.DataConnector.API qualified as API
import Hasura.Backends.DataConnector.API.V0.ErrorResponse (_crDetails)
import Hasura.Backends.DataConnector.Adapter.Backend (columnTypeToScalarType)
import Hasura.Backends.DataConnector.Adapter.ConfigTransform (transformConnSourceConfig)
import Hasura.Backends.DataConnector.Adapter.Types qualified as DC
import Hasura.Backends.DataConnector.Agent.Client (AgentClientContext (..), runAgentClientT)
import Hasura.Backends.Postgres.SQL.Types (PGDescription (..))
Fix metadata defaults bug - Defaults serialised into metadata table - GDC-647 ## Description There is a bug in the metadata defaults code, see [the original PR](https://github.com/hasura/graphql-engine-mono/pull/6286). Steps to reproduce this issue: * Start a new HGE project * Start HGE with a defaults argument: `HASURA_GRAPHQL_LOG_LEVEL=debug cabal run exe:graphql-engine -- serve --enable-console --console-assets-dir=./console/static/dist --metadata-defaults='{"backend_configs": {"dataconnector": {"mongo": {"display_name": "BONGOBB", "uri": "http://localhost:8123"}}}}'` * Add a source (doesn't need to be related to the defaults) * Export metadata * See that the defaults are present in the exported metadata ## Related Issues * Github Issue: https://github.com/hasura/graphql-engine/issues/9237 * Jira: https://hasurahq.atlassian.net/browse/GDC-647 * Original PR: https://github.com/hasura/graphql-engine-mono/pull/6286 ## Solution * The test for if defaults should be included for metadata api operations has been extended to check for updates * Metadata inconsistencies have been hidden for `/capabilities` calls on startup ## TODO * [x] Fix bug * [x] Write tests * [x] OSS Metadata Migration to correct persisted data - `server/src-rsr/migrations/47_to_48.sql` * [x] Cloud Metadata Migration - `pro/server/res/cloud/migrations/6_to_7.sql` * [x] Bump Catalog Version - `server/src-rsr/catalog_version.txt` * [x] Update Catalog Versions - `server/src-rsr/catalog_versions.txt` (This will be done by Infra when creating a release) * [x] Log connection error as it occurs *(Already being logged. Requires `--enabled-log-types startup,webhook-log,websocket-log,http-log,data-connector-log`) * [x] Don't mark metadata inconsistencies for this call. ## Questions * [ ] Does the `pro/server/res/cloud/migrations/6_to_7.sql` cover the cloud scenarios? * [ ] Should we have `SET search_path` in migrations? * [x] What should be in `server/src-rsr/catalog_versions.txt`? ## Testing To test the solution locally run: > docker compose up -d and > cabal run -- exe:api-tests --skip BigQuery --skip SQLServer --skip '/Test.API.Explain/Postgres/' ## Solution In `runMetadataQuery` in `server/src-lib/Hasura/Server/API/Metadata.hs`: ```diff - if (exportsMetadata _rqlMetadata) + if (exportsMetadata _rqlMetadata || queryModifiesMetadata _rqlMetadata) ``` This ensures that defaults aren't present in operations that serialise metadata. Note: You might think that `X_add_source` would need the defaults to be present to add a source that references the defaults, but since the resolution occurs in the schema-cache building phase, the defaults can be excluded for the metadata modifications required for `X_add_source`. In addition to the code-change, a metadata migration has been introduced in order to clean up serialised defaults. The following scenarios need to be considered for both OSS and Cloud: * The user has not had defaults serialised * The user has had the defaults serialised and no other backends configured * The user has had the defaults serialised and has also configured other backends We want to remove as much of the metadata as possible without any user-specified data and this should be reflected in migration `server/src-rsr/migrations/47_to_48.sql`. ## Server checklist ### Catalog upgrade Does this PR change Hasura Catalog version? - ✅ Yes ### Metadata Does this PR add a new Metadata feature? - ✅ No ### GraphQL - ✅ No new GraphQL schema is generated ### Breaking changes - ✅ No Breaking changes ## Changelog __Component__ : server __Type__: bugfix __Product__: community-edition ### Short Changelog Fixes a metadata defaults serialization bug and introduces a metadata migration to correct data that has been persisted due to the bug. PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7034 GitOrigin-RevId: ad7d4f748397a1a607f2c0c886bf0fbbc3f873f2
2022-12-07 01:33:54 +03:00
import Hasura.Base.Error (Code (..), QErr (..), decodeValue, throw400, throw400WithDetail, throw500, withPathK)
import Hasura.Incremental qualified as Inc
import Hasura.Incremental.Select qualified as Inc
import Hasura.Logging (Hasura, Logger)
import Hasura.Prelude
import Hasura.RQL.IR.BoolExp (OpExpG (..), PartialSQLExp (..), RootOrCurrent (..), RootOrCurrentColumn (..))
import Hasura.RQL.Types.Column qualified as RQL.T.C
import Hasura.RQL.Types.Common (OID (..), SourceName)
import Hasura.RQL.Types.EventTrigger (RecreateEventTriggers (RETDoNothing))
import Hasura.RQL.Types.Metadata (SourceMetadata (..))
import Hasura.RQL.Types.Metadata.Backend (BackendMetadata (..))
import Hasura.RQL.Types.Metadata.Object
import Hasura.RQL.Types.SchemaCache qualified as SchemaCache
import Hasura.RQL.Types.SchemaCache.Build
server: plumb `StoredIntrospection` while building the Schema Cache We'd like to be able to build a Schema Cache from only serializable data. We already have Metadata. The data that's missing to build a Schema Cache is referred to as "stored introspection", and this includes: - DB introspection - User-defined enum values (i.e. contents of specific DB tables) - Remote schema introspection This PR introduces a new `StoredIntrospection` container that holds that data, and plumbs it through to the right parts of the schema cache building process, so that stored introspection can be used as a substitute for fresh introspection requests against live data sources. The serialization of `StoredIntrospection` is intended to be straightforward: just take the serialized source introspection results, and put them in an appropriate JSON object. Though I don't think that this PR achieves that entirely. In order for `StoredIntrospection` to be deserializable (through `aeson` instances), while keeping the required code changes low, this piggy-backs off of the `ResolvedSource` data type. `ResolvedSource` is _almost_ exactly what we want, and _almost_ deserializable, so this PR brings it across the finish line by moving a few things out of that type, and adding a `FromJSON (RawFunctionInfo b)` context to the `Backend` type class. [PLAT-270]: https://hasurahq.atlassian.net/browse/PLAT-270?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-270]: https://hasurahq.atlassian.net/browse/PLAT-270?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-276]: https://hasurahq.atlassian.net/browse/PLAT-276?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-276]: https://hasurahq.atlassian.net/browse/PLAT-276?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7053 GitOrigin-RevId: 5001b4ea086195cb5e65886747eac2a0a657b64c
2023-01-20 17:51:11 +03:00
import Hasura.RQL.Types.Source (DBObjectsIntrospection (..))
import Hasura.RQL.Types.Table (ForeignKey (_fkConstraint))
import Hasura.RQL.Types.Table qualified as RQL.T.T
import Hasura.SQL.Backend (BackendSourceKind (..), BackendType (..))
import Hasura.SQL.Types (CollectableType (..))
import Hasura.Server.Migrate.Version (SourceCatalogMigrationState (..))
import Hasura.Server.Utils qualified as HSU
import Hasura.Session (SessionVariable, mkSessionVariable)
import Hasura.Tracing (ignoreTraceT)
import Language.GraphQL.Draft.Syntax qualified as GQL
import Network.HTTP.Client qualified as HTTP
import Network.HTTP.Client.Manager
import Servant.Client.Core.HasClient ((//))
import Servant.Client.Generic (genericClient)
import Witch qualified
instance BackendMetadata 'DataConnector where
prepareCatalog _ = pure (RETDoNothing, SCMSNotSupported)
type BackendInvalidationKeys 'DataConnector = HashMap DC.DataConnectorName Inc.InvalidationKey
resolveBackendInfo = resolveBackendInfo'
resolveSourceConfig = resolveSourceConfig'
resolveDatabaseMetadata = resolveDatabaseMetadata'
parseBoolExpOperations = parseBoolExpOperations'
parseCollectableType = parseCollectableType'
buildComputedFieldInfo = error "buildComputedFieldInfo: not implemented for the Data Connector backend."
-- If/when we implement enums for Data Connector backend, we will also need to fix columnTypeToScalarType function
-- in Hasura.Backends.DataConnector.Adapter.Backend. See note there for more information.
fetchAndValidateEnumValues = error "fetchAndValidateEnumValues: not implemented for the Data Connector backend."
buildFunctionInfo = error "buildFunctionInfo: not implemented for the Data Connector backend."
updateColumnInEventTrigger = error "updateColumnInEventTrigger: not implemented for the Data Connector backend."
postDropSourceHook _sourceConfig _tableTriggerMap = pure ()
buildComputedFieldBooleanExp _ _ _ _ _ _ =
error "buildComputedFieldBooleanExp: not implemented for the Data Connector backend."
resolveBackendInfo' ::
( ArrowChoice arr,
Inc.ArrowCache m arr,
Inc.ArrowDistribute arr,
ArrowWriter (Seq (Either InconsistentMetadata MetadataDependency)) arr,
MonadIO m,
MonadBaseControl IO m,
HasHttpManagerM m
) =>
Logger Hasura ->
(Inc.Dependency (Maybe (HashMap DC.DataConnectorName Inc.InvalidationKey)), InsOrdHashMap DC.DataConnectorName DC.DataConnectorOptions) `arr` HashMap DC.DataConnectorName DC.DataConnectorInfo
resolveBackendInfo' logger = proc (invalidationKeys, optionsMap) -> do
maybeDataConnectorCapabilities <-
(|
Inc.keyed
( \dataConnectorName dataConnectorOptions -> do
getDataConnectorCapabilitiesIfNeeded -< (invalidationKeys, dataConnectorName, dataConnectorOptions)
)
|) (OMap.toHashMap optionsMap)
returnA -< HashMap.catMaybes maybeDataConnectorCapabilities
where
getDataConnectorCapabilitiesIfNeeded ::
forall arr m.
( ArrowChoice arr,
Inc.ArrowCache m arr,
ArrowWriter (Seq (Either InconsistentMetadata MetadataDependency)) arr,
MonadIO m,
MonadBaseControl IO m,
HasHttpManagerM m
) =>
(Inc.Dependency (Maybe (HashMap DC.DataConnectorName Inc.InvalidationKey)), DC.DataConnectorName, DC.DataConnectorOptions) `arr` Maybe DC.DataConnectorInfo
getDataConnectorCapabilitiesIfNeeded = Inc.cache proc (invalidationKeys, dataConnectorName, dataConnectorOptions) -> do
let metadataObj = MetadataObject (MODataConnectorAgent dataConnectorName) $ J.toJSON dataConnectorName
httpMgr <- bindA -< askHttpManager
Inc.dependOn -< Inc.selectMaybeD (Inc.ConstS dataConnectorName) invalidationKeys
(|
withRecordInconsistency
( liftEitherA <<< bindA -< getDataConnectorCapabilities dataConnectorOptions httpMgr
)
|) metadataObj
getDataConnectorCapabilities ::
(MonadIO m, MonadBaseControl IO m) =>
DC.DataConnectorOptions ->
HTTP.Manager ->
m (Either QErr DC.DataConnectorInfo)
getDataConnectorCapabilities options@DC.DataConnectorOptions {..} manager = runExceptT do
capabilitiesU <-
ignoreTraceT
. flip runAgentClientT (AgentClientContext logger _dcoUri manager Nothing)
$ genericClient // API._capabilities
let defaultAction = throw400 DataConnectorError "Unexpected data connector capabilities response - Unexpected Type"
capabilitiesAction API.CapabilitiesResponse {..} = pure $ DC.DataConnectorInfo options _crCapabilities _crConfigSchemaResponse _crDisplayName _crReleaseName
capabilitiesCase defaultAction capabilitiesAction errorAction capabilitiesU
resolveSourceConfig' ::
(MonadIO m, MonadBaseControl IO m) =>
Logger Hasura ->
SourceName ->
DC.ConnSourceConfig ->
BackendSourceKind 'DataConnector ->
HashMap DC.DataConnectorName DC.DataConnectorInfo ->
Environment ->
HTTP.Manager ->
m (Either QErr DC.SourceConfig)
resolveSourceConfig'
logger
sourceName
csc@DC.ConnSourceConfig {template, timeout, value = originalConfig}
(DataConnectorKind dataConnectorName)
backendInfo
env
manager = runExceptT do
DC.DataConnectorInfo {..} <- getDataConnectorInfo dataConnectorName backendInfo
let DC.DataConnectorOptions {_dcoUri} = _dciOptions
transformedConfig <- transformConnSourceConfig dataConnectorName sourceName _dciConfigSchemaResponse csc [("$session", J.object []), ("$env", J.toJSON env)] env
schemaResponseU <-
ignoreTraceT
. flip runAgentClientT (AgentClientContext logger _dcoUri manager (DC.sourceTimeoutMicroseconds <$> timeout))
$ (genericClient // API._schema) (toTxt sourceName) transformedConfig
let defaultAction = throw400 DataConnectorError "Unexpected data connector schema response - Unexpected Type"
schemaResponse <- schemaCase defaultAction pure errorAction schemaResponseU
pure
DC.SourceConfig
{ _scEndpoint = _dcoUri,
_scConfig = originalConfig,
_scTemplate = template,
_scCapabilities = _dciCapabilities,
_scSchema = schemaResponse,
_scManager = manager,
_scTimeoutMicroseconds = (DC.sourceTimeoutMicroseconds <$> timeout),
_scDataConnectorName = dataConnectorName
}
getDataConnectorInfo :: (MonadError QErr m) => DC.DataConnectorName -> HashMap DC.DataConnectorName DC.DataConnectorInfo -> m DC.DataConnectorInfo
getDataConnectorInfo dataConnectorName backendInfo =
onNothing (Map.lookup dataConnectorName backendInfo) $
throw400 DataConnectorError ("Data connector named " <> toTxt dataConnectorName <<> " was not found in the data connector backend info")
resolveDatabaseMetadata' ::
Applicative m =>
SourceMetadata 'DataConnector ->
DC.SourceConfig ->
server: plumb `StoredIntrospection` while building the Schema Cache We'd like to be able to build a Schema Cache from only serializable data. We already have Metadata. The data that's missing to build a Schema Cache is referred to as "stored introspection", and this includes: - DB introspection - User-defined enum values (i.e. contents of specific DB tables) - Remote schema introspection This PR introduces a new `StoredIntrospection` container that holds that data, and plumbs it through to the right parts of the schema cache building process, so that stored introspection can be used as a substitute for fresh introspection requests against live data sources. The serialization of `StoredIntrospection` is intended to be straightforward: just take the serialized source introspection results, and put them in an appropriate JSON object. Though I don't think that this PR achieves that entirely. In order for `StoredIntrospection` to be deserializable (through `aeson` instances), while keeping the required code changes low, this piggy-backs off of the `ResolvedSource` data type. `ResolvedSource` is _almost_ exactly what we want, and _almost_ deserializable, so this PR brings it across the finish line by moving a few things out of that type, and adding a `FromJSON (RawFunctionInfo b)` context to the `Backend` type class. [PLAT-270]: https://hasurahq.atlassian.net/browse/PLAT-270?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-270]: https://hasurahq.atlassian.net/browse/PLAT-270?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-276]: https://hasurahq.atlassian.net/browse/PLAT-276?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-276]: https://hasurahq.atlassian.net/browse/PLAT-276?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7053 GitOrigin-RevId: 5001b4ea086195cb5e65886747eac2a0a657b64c
2023-01-20 17:51:11 +03:00
m (Either QErr (DBObjectsIntrospection 'DataConnector))
resolveDatabaseMetadata' _ DC.SourceConfig {_scSchema = API.SchemaResponse {..}, ..} =
let tables = Map.fromList $ do
API.TableInfo {..} <- _srTables
let primaryKeyColumns = Seq.fromList $ coerce <$> _tiPrimaryKey
let meta =
RQL.T.T.DBTableMetadata
{ _ptmiOid = OID 0, -- TODO: This is wrong and needs to be fixed. It is used for diffing tables and seeing what's new/deleted/altered, so reusing 0 for all tables is problematic.
_ptmiColumns = do
API.ColumnInfo {..} <- _tiColumns
pure $
RQL.T.C.RawColumnInfo
{ rciName = Witch.from _ciName,
rciPosition = 1, -- TODO: This is very wrong and needs to be fixed. It is used for diffing tables and seeing what's new/deleted/altered, so reusing 1 for all columns is problematic.
rciType = DC.mkScalarType _scCapabilities _ciType,
rciIsNullable = _ciNullable,
rciDescription = fmap GQL.Description _ciDescription,
rciMutability = RQL.T.C.ColumnMutability _ciInsertable _ciUpdatable
},
_ptmiPrimaryKey = RQL.T.T.PrimaryKey (RQL.T.T.Constraint (DC.ConstraintName "") (OID 0)) <$> NESeq.nonEmptySeq primaryKeyColumns,
_ptmiUniqueConstraints = mempty,
_ptmiForeignKeys = buildForeignKeySet _tiForeignKeys,
_ptmiViewInfo =
( if _tiType == API.Table && _tiInsertable && _tiUpdatable && _tiDeletable
then Nothing
else Just $ RQL.T.T.ViewInfo _tiInsertable _tiUpdatable _tiDeletable
),
_ptmiDescription = fmap PGDescription _tiDescription,
_ptmiExtraTableMetadata = ()
}
pure (coerce _tiName, meta)
in pure $
pure $
server: plumb `StoredIntrospection` while building the Schema Cache We'd like to be able to build a Schema Cache from only serializable data. We already have Metadata. The data that's missing to build a Schema Cache is referred to as "stored introspection", and this includes: - DB introspection - User-defined enum values (i.e. contents of specific DB tables) - Remote schema introspection This PR introduces a new `StoredIntrospection` container that holds that data, and plumbs it through to the right parts of the schema cache building process, so that stored introspection can be used as a substitute for fresh introspection requests against live data sources. The serialization of `StoredIntrospection` is intended to be straightforward: just take the serialized source introspection results, and put them in an appropriate JSON object. Though I don't think that this PR achieves that entirely. In order for `StoredIntrospection` to be deserializable (through `aeson` instances), while keeping the required code changes low, this piggy-backs off of the `ResolvedSource` data type. `ResolvedSource` is _almost_ exactly what we want, and _almost_ deserializable, so this PR brings it across the finish line by moving a few things out of that type, and adding a `FromJSON (RawFunctionInfo b)` context to the `Backend` type class. [PLAT-270]: https://hasurahq.atlassian.net/browse/PLAT-270?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-270]: https://hasurahq.atlassian.net/browse/PLAT-270?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-276]: https://hasurahq.atlassian.net/browse/PLAT-276?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ [PLAT-276]: https://hasurahq.atlassian.net/browse/PLAT-276?atlOrigin=eyJpIjoiNWRkNTljNzYxNjVmNDY3MDlhMDU5Y2ZhYzA5YTRkZjUiLCJwIjoiZ2l0aHViLWNvbS1KU1cifQ PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7053 GitOrigin-RevId: 5001b4ea086195cb5e65886747eac2a0a657b64c
2023-01-20 17:51:11 +03:00
DBObjectsIntrospection
{ _rsTables = tables,
_rsFunctions = mempty,
_rsScalars = mempty
}
-- | Construct a 'HashSet' 'RQL.T.T.ForeignKeyMetadata'
-- 'DataConnector' to build the foreign key constraints in the table
-- metadata.
buildForeignKeySet :: API.ForeignKeys -> HashSet (RQL.T.T.ForeignKeyMetadata 'DataConnector)
buildForeignKeySet (API.ForeignKeys constraints) =
HashSet.fromList $
constraints & HashMap.foldMapWithKey @[RQL.T.T.ForeignKeyMetadata 'DataConnector]
\constraintName API.Constraint {..} -> maybeToList do
let columnMapAssocList = HashMap.foldrWithKey' (\(API.ColumnName k) (API.ColumnName v) acc -> (DC.ColumnName k, DC.ColumnName v) : acc) [] _cColumnMapping
columnMapping <- NEHashMap.fromList columnMapAssocList
let foreignKey =
RQL.T.T.ForeignKey
{ _fkConstraint = RQL.T.T.Constraint (Witch.from constraintName) (OID 1),
_fkForeignTable = Witch.from _cForeignTable,
_fkColumnMapping = columnMapping
}
pure $ RQL.T.T.ForeignKeyMetadata foreignKey
-- | This is needed to get permissions to work
parseBoolExpOperations' ::
forall m v.
(MonadError QErr m, SchemaCache.TableCoreInfoRM 'DataConnector m) =>
RQL.T.C.ValueParser 'DataConnector m v ->
DC.TableName ->
RQL.T.T.FieldInfoMap (RQL.T.T.FieldInfo 'DataConnector) ->
RQL.T.C.ColumnReference 'DataConnector ->
J.Value ->
m [OpExpG 'DataConnector v]
parseBoolExpOperations' rhsParser rootTable fieldInfoMap columnRef value =
withPathK (toTxt columnRef) $ parseOperations value
where
columnType :: RQL.T.C.ColumnType 'DataConnector
columnType = RQL.T.C.columnReferenceType columnRef
parseWithTy ty = rhsParser (CollectableTypeScalar ty)
parseOperations :: J.Value -> m [OpExpG 'DataConnector v]
parseOperations = \case
J.Object o -> traverse (parseOperation . first K.toText) $ KM.toList o
v -> pure . AEQ False <$> parseWithTy columnType v
parseOperation :: (Text, J.Value) -> m (OpExpG 'DataConnector v)
parseOperation (opStr, val) = withPathK opStr $
case opStr of
"_eq" -> parseEq
"$eq" -> parseEq
"_neq" -> parseNeq
"$neq" -> parseNeq
"_gt" -> parseGt
"$gt" -> parseGt
"_lt" -> parseLt
"$lt" -> parseLt
"_gte" -> parseGte
"$gte" -> parseGte
"_lte" -> parseLte
"$lte" -> parseLte
"_in" -> parseIn
"$in" -> parseIn
"_nin" -> parseNin
"$nin" -> parseNin
"_is_null" -> parseIsNull
"$is_null" -> parseIsNull
"_ceq" -> parseCeq
"$ceq" -> parseCeq
"_cneq" -> parseCne
"$cneq" -> parseCne
"_cgt" -> parseCgt
"$cgt" -> parseCgt
"_clt" -> parseClt
"$clt" -> parseClt
"_cgte" -> parseCgte
"$cgte" -> parseCgte
"_clte" -> parseClte
"$clte" -> parseClte
-- "_like" -> parseLike
-- "$like" -> parseLike
--
-- "_nlike" -> parseNlike
-- "$nlike" -> parseNlike
--
-- "_cast" -> parseCast
-- "$cast" -> parseCast
x -> throw400 UnexpectedPayload $ "Unknown operator: " <> x
where
parseOne = parseWithTy columnType val
parseManyWithType ty = rhsParser (CollectableTypeArray ty) val
parseEq = AEQ False <$> parseOne
parseNeq = ANE False <$> parseOne
parseIn = AIN <$> parseManyWithType columnType
parseNin = ANIN <$> parseManyWithType columnType
parseGt = AGT <$> parseOne
parseLt = ALT <$> parseOne
parseGte = AGTE <$> parseOne
parseLte = ALTE <$> parseOne
parseIsNull = bool ANISNOTNULL ANISNULL <$> decodeValue val
parseCeq = CEQ <$> decodeAndValidateRhsCol val
parseCne = CNE <$> decodeAndValidateRhsCol val
parseCgt = CGT <$> decodeAndValidateRhsCol val
parseClt = CLT <$> decodeAndValidateRhsCol val
parseCgte = CGTE <$> decodeAndValidateRhsCol val
parseClte = CLTE <$> decodeAndValidateRhsCol val
decodeAndValidateRhsCol :: J.Value -> m (RootOrCurrentColumn 'DataConnector)
decodeAndValidateRhsCol v = case v of
J.String _ -> go IsCurrent fieldInfoMap v
J.Array path -> case toList path of
[] -> throw400 Unexpected "path cannot be empty"
[col] -> go IsCurrent fieldInfoMap col
[J.String "$", col] -> do
rootTableInfo <-
SchemaCache.lookupTableCoreInfo rootTable
>>= flip onNothing (throw500 $ "unexpected: " <> rootTable <<> " doesn't exist")
go IsRoot (RQL.T.T._tciFieldInfoMap rootTableInfo) col
_ -> throw400 NotSupported "Relationship references are not supported in column comparison RHS"
_ -> throw400 Unexpected "a boolean expression JSON must be either a string or an array"
where
go rootInfo fieldInfoMap' columnValue = do
colName <- decodeValue columnValue
colInfo <- validateRhsColumn fieldInfoMap' colName
pure $ RootOrCurrentColumn rootInfo colInfo
validateRhsColumn :: RQL.T.T.FieldInfoMap (RQL.T.T.FieldInfo 'DataConnector) -> DC.ColumnName -> m DC.ColumnName
validateRhsColumn fieldInfoMap' rhsCol = do
rhsType <- RQL.T.T.askColumnType fieldInfoMap' rhsCol "column operators can only compare table columns"
when (columnType /= rhsType) $
throw400 UnexpectedPayload $
"incompatible column types: "
<> columnRef <<> " has type "
<> columnType <<> ", but "
<> rhsCol <<> " has type " <>> rhsType
pure rhsCol
parseCollectableType' ::
MonadError QErr m =>
CollectableType (RQL.T.C.ColumnType 'DataConnector) ->
J.Value ->
m (PartialSQLExp 'DataConnector)
parseCollectableType' collectableType = \case
J.String t
| HSU.isSessionVariable t -> pure $ mkTypedSessionVar collectableType $ mkSessionVariable t
| HSU.isReqUserId t -> pure $ mkTypedSessionVar collectableType HSU.userIdHeader
val -> case collectableType of
CollectableTypeScalar columnType ->
PSESQLExp . DC.ValueLiteral (columnTypeToScalarType columnType) <$> RQL.T.C.parseScalarValueColumnType columnType val
CollectableTypeArray _ ->
throw400 NotSupported "Array types are not supported by the Data Connector backend"
mkTypedSessionVar ::
CollectableType (RQL.T.C.ColumnType 'DataConnector) ->
SessionVariable ->
PartialSQLExp 'DataConnector
mkTypedSessionVar columnType =
PSESessVar (columnTypeToScalarType <$> columnType)
errorAction :: MonadError QErr m => API.ErrorResponse -> m a
errorAction e = throw400WithDetail DataConnectorError (errorResponseSummary e) (_crDetails e)