mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-17 20:41:49 +03:00
40db3d7eab
### Description This PR attempts to fix several issues with source customization as it relates to remote relationships. There were several issues regarding casing: at the relationship border, we didn't properly set the target source's case, we didn't have access to the list of supported features to decide whether the feature was allowed or not, and we didn't have access to the global default. However, all of that information is available when we build the schema cache, as we do resolve the case of some elements such as function names: we can therefore resolve source information at the same time, and simplify both the root of the schema and the remote relationship border. To do this, this PR introduces a new type, `ResolvedSourceCustomization`, to be used in the Schema Cache, as opposed to the metadata's `SourceCustomization`, following a pattern established by a lot of other types. ### Remaining work and open questions One major point of confusion: it seems to me that we didn't set the case at all across remote relationships, which would suggest we would use the case of the LHS source across the subset of the RHS one that is accessible through the remote relationship, which would in turn "corrupt" the parser cache and might result in the wrong case being used for that source later on. Is that assesment correct, and was I right to fix it? Another one is that we seem not to be using the local case of the RHS to name the field in an object relationship; unless I'm mistaken we only use it for array relationships? Is that intentional? This PR is also missing tests that would show-case the difference, and a changelog entry. To my knowledge, all the tests of this feature are in the python test suite; this could be the opportunity to move them to the hspec suite, but this might be a considerable amount of work? PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5619 GitOrigin-RevId: 51a81b713a74575e82d9f96b51633f158ce3a47b
225 lines
10 KiB
Haskell
225 lines
10 KiB
Haskell
{-# LANGUAGE TemplateHaskellQuotes #-}
|
|
|
|
-- | This module only exposes one function, 'nodeField', which is used at the
|
|
-- root level of the schema to create the 'node' field in the Relay API schema.
|
|
module Hasura.GraphQL.Schema.Relay
|
|
( nodeInterface,
|
|
nodeField,
|
|
)
|
|
where
|
|
|
|
import Control.Lens hiding (index)
|
|
import Data.Aeson qualified as J
|
|
import Data.Aeson.Types qualified as J
|
|
import Data.Align (align)
|
|
import Data.Has
|
|
import Data.HashMap.Strict.Extended qualified as Map
|
|
import Data.Sequence.NonEmpty qualified as NESeq
|
|
import Data.Text qualified as T
|
|
import Data.These (partitionThese)
|
|
import Hasura.Base.Error
|
|
import Hasura.Base.ErrorMessage
|
|
import Hasura.Base.ToErrorValue
|
|
import Hasura.GraphQL.Schema.Backend
|
|
import Hasura.GraphQL.Schema.Common
|
|
import Hasura.GraphQL.Schema.Instances ()
|
|
import Hasura.GraphQL.Schema.NamingCase (NamingCase)
|
|
import Hasura.GraphQL.Schema.Node
|
|
import Hasura.GraphQL.Schema.Options qualified as Options
|
|
import Hasura.GraphQL.Schema.Parser (Kind (..), Parser, memoizeOn)
|
|
import Hasura.GraphQL.Schema.Parser qualified as P
|
|
import Hasura.GraphQL.Schema.Select
|
|
import Hasura.GraphQL.Schema.Table
|
|
import Hasura.Name qualified as Name
|
|
import Hasura.Prelude
|
|
import Hasura.RQL.IR qualified as IR
|
|
import Hasura.RQL.Types.Backend
|
|
import Hasura.RQL.Types.Column
|
|
import Hasura.RQL.Types.Common
|
|
import Hasura.RQL.Types.SchemaCache hiding (askTableInfo)
|
|
import Hasura.RQL.Types.Source
|
|
import Hasura.RQL.Types.SourceCustomization
|
|
import Hasura.RQL.Types.Table
|
|
import Hasura.SQL.AnyBackend qualified as AB
|
|
import Hasura.SQL.Backend
|
|
import Language.GraphQL.Draft.Syntax qualified as G
|
|
|
|
-- | Constructs the parser for the node interface.
|
|
--
|
|
-- As mentioned in Note [Internal Relay HashMap], this function must parse an
|
|
-- incoming query for ANY potential matching table. Its resulting parser returns
|
|
-- a 'NodeMap': a container that, to a source name and a table name, associates
|
|
-- both the parsed fields and all the relevant table information required to
|
|
-- craft a request.
|
|
nodeInterface :: SourceCache -> NodeInterfaceParserBuilder
|
|
nodeInterface sourceCache = NodeInterfaceParserBuilder $ memoizeOn 'nodeInterface () do
|
|
let idDescription = G.Description "A globally unique identifier"
|
|
idField = P.selection_ Name._id (Just idDescription) P.identifier
|
|
nodeInterfaceDescription = G.Description "An object with globally unique ID"
|
|
roleName <- retrieve scRole
|
|
tables :: [Parser 'Output n (SourceName, AB.AnyBackend TableMap)] <-
|
|
catMaybes . concat <$> for (Map.toList sourceCache) \(sourceName, anySourceInfo) ->
|
|
AB.dispatchAnyBackendWithTwoConstraints @BackendSchema @BackendTableSelectSchema
|
|
anySourceInfo
|
|
\(sourceInfo :: SourceInfo b) ->
|
|
withSourceCustomization (_siCustomization sourceInfo) do
|
|
for (Map.toList $ takeValidTables $ _siTables sourceInfo) \(tableName, tableInfo) -> runMaybeT do
|
|
tablePkeyColumns <- hoistMaybe $ tableInfo ^? tiCoreInfo . tciPrimaryKey . _Just . pkColumns
|
|
selectPermissions <- hoistMaybe $ tableSelectPermissions roleName tableInfo
|
|
annotatedFieldsParser <- MaybeT $ tableSelectionSet sourceInfo tableInfo
|
|
pure $
|
|
annotatedFieldsParser <&> \fields ->
|
|
( sourceName,
|
|
AB.mkAnyBackend $
|
|
TableMap $
|
|
Map.singleton tableName $
|
|
NodeInfo (_siConfiguration sourceInfo) selectPermissions tablePkeyColumns fields
|
|
)
|
|
pure $
|
|
Map.fromListWith fuseAnyMaps
|
|
<$> P.selectionSetInterface
|
|
Name._Node
|
|
(Just nodeInterfaceDescription)
|
|
[idField]
|
|
tables
|
|
where
|
|
-- this can only ever fail if somehow, within the same source, we ran into
|
|
-- two tables of a different type b; in other words, it is impossible.
|
|
fuseAnyMaps :: AB.AnyBackend TableMap -> AB.AnyBackend TableMap -> AB.AnyBackend TableMap
|
|
fuseAnyMaps m1 m2 =
|
|
AB.composeAnyBackend @Backend fuseMaps m1 m2 $
|
|
error "panic: two tables of a different backend type within the same source"
|
|
|
|
fuseMaps :: forall b. Backend b => TableMap b -> TableMap b -> AB.AnyBackend TableMap
|
|
fuseMaps (TableMap m1) (TableMap m2) = AB.mkAnyBackend @b $ TableMap $ Map.union m1 m2
|
|
|
|
-- | Creates a field parser for the top-level "node" field in the QueryRoot.
|
|
--
|
|
-- It exepcts one argument, the node id. It looks for the targeted table in the
|
|
-- 'NodeMap' returned by 'nodeInterface', and, if successful, attempts to craft
|
|
-- a corresponding 'QueryRootField' that will extract the requested row.
|
|
nodeField ::
|
|
forall m n r.
|
|
SourceCache ->
|
|
MonadBuildSourceSchema r m n =>
|
|
SchemaT r m (P.FieldParser n (IR.QueryRootField IR.UnpreparedValue))
|
|
nodeField sourceCache = do
|
|
let idDescription = G.Description "A globally unique id"
|
|
idArgument = P.field Name._id (Just idDescription) P.identifier
|
|
stringifyNumbers <- retrieve Options.soStringifyNumbers
|
|
tCase <- asks getter
|
|
nodeObject <-
|
|
retrieve scSchemaKind >>= \case
|
|
HasuraSchema -> throw500 "internal error: the node field should only be built for the Relay schema"
|
|
RelaySchema nodeBuilder -> runNodeBuilder nodeBuilder
|
|
pure $
|
|
P.subselection Name._node Nothing idArgument nodeObject `P.bindField` \(ident, parseds) -> do
|
|
nodeId <- parseNodeId ident
|
|
case nodeId of
|
|
NodeIdV1 (V1NodeId tableName pKeys) -> do
|
|
-- Node id V1.
|
|
--
|
|
-- We don't have the source name in a V1 node; we attempt all of them
|
|
-- and pick the first one we find; there is a risk we might pick the
|
|
-- wrong one if two tables with the same name exist in different
|
|
-- sources! It is, however, unlikely; the engine emits V2 IDs, meaning
|
|
-- if ever encounter a V1 ID it means it has been manually entered bya
|
|
-- user, saved from an older version of the engine?
|
|
let matchingTables = flip mapMaybe (Map.keys sourceCache) \sourceName ->
|
|
(sourceName,) <$> findNode @('Postgres 'Vanilla) sourceName tableName parseds
|
|
case matchingTables of
|
|
[(sourceName, nodeValue)] -> createRootField stringifyNumbers sourceName tableName nodeValue pKeys (Just tCase)
|
|
[] -> throwInvalidNodeId $ "no such table found: " <> toErrorValue tableName
|
|
l ->
|
|
throwInvalidNodeId $
|
|
"this V1 node id matches more than one table across different sources: " <> toErrorValue tableName
|
|
<> " exists in sources "
|
|
<> toErrorValue (fst <$> l)
|
|
NodeIdV2 nodev2 ->
|
|
-- Node id V2.
|
|
--
|
|
-- We have the source name and table name, we can extract the relevant
|
|
-- info directly.
|
|
AB.dispatchAnyBackend @Backend nodev2 \(V2NodeId sourceName tableName pKeys :: V2NodeId b) -> do
|
|
nodeValue <-
|
|
findNode @b sourceName tableName parseds
|
|
`onNothing` throwInvalidNodeId ("no table " <> toErrorValue tableName <> " found in source " <> toErrorValue sourceName)
|
|
createRootField stringifyNumbers sourceName tableName nodeValue pKeys (Just tCase)
|
|
where
|
|
throwInvalidNodeId :: ErrorMessage -> n a
|
|
throwInvalidNodeId t = P.withKey (J.Key "args") $ P.withKey (J.Key "id") $ P.parseError $ "invalid node id: " <> t
|
|
|
|
parseNodeId :: Text -> n NodeId
|
|
parseNodeId = either (throwInvalidNodeId . toErrorMessage . T.pack) pure . J.eitherDecode . base64Decode
|
|
|
|
-- Given all the node id information about a table, and the extracted
|
|
-- 'NodeInfo', craft the top-level query. This relies on the assumption
|
|
-- that all backends that support relay use the same IR for single row
|
|
-- selection.
|
|
createRootField ::
|
|
Backend b =>
|
|
Options.StringifyNumbers ->
|
|
SourceName ->
|
|
TableName b ->
|
|
NodeInfo b ->
|
|
NESeq.NESeq J.Value ->
|
|
Maybe NamingCase ->
|
|
n (IR.QueryRootField IR.UnpreparedValue)
|
|
createRootField stringifyNumbers sourceName tableName (NodeInfo sourceConfig perms pKeys fields) columnValues tCase = do
|
|
whereExp <- buildNodeIdBoolExp columnValues pKeys
|
|
pure $
|
|
IR.RFDB sourceName $
|
|
AB.mkAnyBackend $
|
|
IR.SourceConfigWith sourceConfig Nothing $
|
|
IR.QDBR $
|
|
IR.QDBSingleRow $
|
|
IR.AnnSelectG
|
|
{ IR._asnFields = fields,
|
|
IR._asnFrom = IR.FromTable tableName,
|
|
IR._asnPerm = tablePermissionsInfo perms,
|
|
IR._asnArgs =
|
|
IR.SelectArgs
|
|
{ IR._saWhere = Just whereExp,
|
|
IR._saOrderBy = Nothing,
|
|
IR._saLimit = Nothing,
|
|
IR._saOffset = Nothing,
|
|
IR._saDistinct = Nothing
|
|
},
|
|
IR._asnStrfyNum = stringifyNumbers,
|
|
IR._asnNamingConvention = tCase
|
|
}
|
|
|
|
-- Craft the 'where' condition of the query by making an `AEQ` entry for
|
|
-- each primary key. This might fail if the given node id doesn't exactly
|
|
-- have a valid entry for each primary key.
|
|
buildNodeIdBoolExp ::
|
|
Backend b =>
|
|
NESeq.NESeq J.Value ->
|
|
NESeq.NESeq (ColumnInfo b) ->
|
|
n (IR.AnnBoolExp b (IR.UnpreparedValue b))
|
|
buildNodeIdBoolExp columnValues pkeyColumns = do
|
|
let firstPkColumn NESeq.:<|| remainingPkColumns = pkeyColumns
|
|
firstColumnValue NESeq.:<|| remainingColumns = columnValues
|
|
(nonAlignedPkColumns, nonAlignedColumnValues, alignedTuples) =
|
|
partitionThese $ toList $ align remainingPkColumns remainingColumns
|
|
|
|
unless (null nonAlignedPkColumns) $
|
|
throwInvalidNodeId $
|
|
"primary key columns " <> toErrorValue (map ciColumn nonAlignedPkColumns) <> " are missing"
|
|
|
|
unless (null nonAlignedColumnValues) $
|
|
throwInvalidNodeId $
|
|
"unexpected column values " <> toErrorValue nonAlignedColumnValues
|
|
|
|
let allTuples = (firstPkColumn, firstColumnValue) : alignedTuples
|
|
IR.BoolAnd <$> for allTuples \(columnInfo, columnValue) -> do
|
|
let columnType = ciType columnInfo
|
|
parsedValue <-
|
|
parseScalarValueColumnType columnType columnValue `onLeft` \e ->
|
|
P.parseErrorWith P.ParseFailed $ "value of column " <> toErrorValue (ciColumn columnInfo) <> " in node id: " <> toErrorMessage (qeError e)
|
|
pure $
|
|
IR.BoolField $
|
|
IR.AVColumn
|
|
columnInfo
|
|
[IR.AEQ True $ IR.UVParameter Nothing $ ColumnValue columnType parsedValue]
|