mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-24 16:03:37 +03:00
0a69db81c9
### Description I am not 100% sure about this PR; while I think the code is better this way, I'm willing to be convinced otherwise. In short, this PR moves the `RoleName` field into the `SchemaContext`, instead of being a nebulous `Has RoleName` constraint on the reader monad. The major upside of this is that it makes it an explicit named field, rather than something that must be given as part of a tuple of arguments when calling `runReader`. However, the downside is that it breaks the helper permissions functions of `Schema.Table`, which relied on `Has RoleName r`. This PR makes the choice of passing the role name explicitly to all of those functions, which in turn means first explicitly fetching the role name in a lot of places. It makes it more explicit when a schema building block relies on the role name, but is a bit verbose... ### Alternatives Some alternatives worth considering: - attempting something like `Has context r, Has RoleName context`, which would allow them to be independent from the context but still fetch the role name from the reader, but might require type annotations to not be ambiguous - keeping the permission functions the same, with `Has RoleName r`, and introducing a bunch of newtypes instead of using tuples to explicitly implement all the required `Has` instances - changing the permission functions to `Has SchemaContext r`, since they are functions used only to build the schema, and therefore may be allowed to be tied to the context. What do y'all think? PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5073 GitOrigin-RevId: 8fd09fafb54905a4d115ef30842d35da0c3db5d2
230 lines
11 KiB
Haskell
230 lines
11 KiB
Haskell
{-# LANGUAGE TemplateHaskellQuotes #-}
|
|
|
|
-- | This module only exposes one function, 'nodeField', which is used at the
|
|
-- root level of the schema to create the 'node' field in the Relay API schema.
|
|
module Hasura.GraphQL.Schema.Relay
|
|
( nodeInterface,
|
|
nodeField,
|
|
)
|
|
where
|
|
|
|
import Control.Lens hiding (index)
|
|
import Data.Aeson qualified as J
|
|
import Data.Aeson.Types qualified as J
|
|
import Data.Align (align)
|
|
import Data.Has
|
|
import Data.HashMap.Strict.Extended qualified as Map
|
|
import Data.Sequence.NonEmpty qualified as NESeq
|
|
import Data.Text qualified as T
|
|
import Data.These (partitionThese)
|
|
import Hasura.Base.Error
|
|
import Hasura.Base.ErrorMessage
|
|
import Hasura.Base.ToErrorValue
|
|
import Hasura.GraphQL.Schema.Backend
|
|
import Hasura.GraphQL.Schema.Common
|
|
import Hasura.GraphQL.Schema.Instances ()
|
|
import Hasura.GraphQL.Schema.NamingCase (NamingCase)
|
|
import Hasura.GraphQL.Schema.Node
|
|
import Hasura.GraphQL.Schema.Options qualified as Options
|
|
import Hasura.GraphQL.Schema.Parser (Kind (..), Parser, memoizeOn)
|
|
import Hasura.GraphQL.Schema.Parser qualified as P
|
|
import Hasura.GraphQL.Schema.Select
|
|
import Hasura.GraphQL.Schema.Table
|
|
import Hasura.GraphQL.Schema.Typename (withTypenameCustomization)
|
|
import Hasura.Name qualified as Name
|
|
import Hasura.Prelude
|
|
import Hasura.RQL.IR qualified as IR
|
|
import Hasura.RQL.Types.Backend
|
|
import Hasura.RQL.Types.Column
|
|
import Hasura.RQL.Types.Common
|
|
import Hasura.RQL.Types.SchemaCache hiding (askTableInfo)
|
|
import Hasura.RQL.Types.Source
|
|
import Hasura.RQL.Types.SourceCustomization
|
|
import Hasura.RQL.Types.Table
|
|
import Hasura.SQL.AnyBackend qualified as AB
|
|
import Hasura.SQL.Backend
|
|
import Language.GraphQL.Draft.Syntax qualified as G
|
|
|
|
-- | Constructs the parser for the node interface.
|
|
--
|
|
-- As mentioned in Note [Internal Relay HashMap], this function must parse an
|
|
-- incoming query for ANY potential matching table. Its resulting parser returns
|
|
-- a 'NodeMap': a container that, to a source name and a table name, associates
|
|
-- both the parsed fields and all the relevant table information required to
|
|
-- craft a request.
|
|
nodeInterface :: SourceCache -> NodeInterfaceParserBuilder
|
|
nodeInterface sourceCache = NodeInterfaceParserBuilder $ memoizeOn 'nodeInterface () do
|
|
let idDescription = G.Description "A globally unique identifier"
|
|
idField = P.selection_ Name._id (Just idDescription) P.identifier
|
|
nodeInterfaceDescription = G.Description "An object with globally unique ID"
|
|
tCase <- asks getter
|
|
roleName <- retrieve scRole
|
|
tables :: [Parser 'Output n (SourceName, AB.AnyBackend TableMap)] <-
|
|
catMaybes . concat <$> for (Map.toList sourceCache) \(sourceName, anySourceInfo) ->
|
|
AB.dispatchAnyBackendWithTwoConstraints @BackendSchema @BackendTableSelectSchema
|
|
anySourceInfo
|
|
\(sourceInfo :: SourceInfo b) ->
|
|
for (Map.toList $ takeValidTables $ _siTables sourceInfo) \(tableName, tableInfo) -> runMaybeT do
|
|
tablePkeyColumns <- hoistMaybe $ tableInfo ^? tiCoreInfo . tciPrimaryKey . _Just . pkColumns
|
|
selectPermissions <- hoistMaybe $ tableSelectPermissions roleName tableInfo
|
|
annotatedFieldsParser <-
|
|
MaybeT $
|
|
withTypenameCustomization
|
|
(mkCustomizedTypename (_scTypeNames $ _siCustomization sourceInfo) tCase)
|
|
(tableSelectionSet sourceInfo tableInfo)
|
|
pure $
|
|
annotatedFieldsParser <&> \fields ->
|
|
( sourceName,
|
|
AB.mkAnyBackend $
|
|
TableMap $
|
|
Map.singleton tableName $
|
|
NodeInfo (_siConfiguration sourceInfo) selectPermissions tablePkeyColumns fields
|
|
)
|
|
pure $
|
|
Map.fromListWith fuseAnyMaps
|
|
<$> P.selectionSetInterface
|
|
Name._Node
|
|
(Just nodeInterfaceDescription)
|
|
[idField]
|
|
tables
|
|
where
|
|
-- this can only ever fail if somehow, within the same source, we ran into
|
|
-- two tables of a different type b; in other words, it is impossible.
|
|
fuseAnyMaps :: AB.AnyBackend TableMap -> AB.AnyBackend TableMap -> AB.AnyBackend TableMap
|
|
fuseAnyMaps m1 m2 =
|
|
AB.composeAnyBackend @Backend fuseMaps m1 m2 $
|
|
error "panic: two tables of a different backend type within the same source"
|
|
|
|
fuseMaps :: forall b. Backend b => TableMap b -> TableMap b -> AB.AnyBackend TableMap
|
|
fuseMaps (TableMap m1) (TableMap m2) = AB.mkAnyBackend @b $ TableMap $ Map.union m1 m2
|
|
|
|
-- | Creates a field parser for the top-level "node" field in the QueryRoot.
|
|
--
|
|
-- It exepcts one argument, the node id. It looks for the targeted table in the
|
|
-- 'NodeMap' returned by 'nodeInterface', and, if successful, attempts to craft
|
|
-- a corresponding 'QueryRootField' that will extract the requested row.
|
|
nodeField ::
|
|
forall m n r.
|
|
SourceCache ->
|
|
MonadBuildSchemaBase r m n =>
|
|
m (P.FieldParser n (IR.QueryRootField IR.UnpreparedValue))
|
|
nodeField sourceCache = do
|
|
let idDescription = G.Description "A globally unique id"
|
|
idArgument = P.field Name._id (Just idDescription) P.identifier
|
|
stringifyNumbers <- retrieve Options.soStringifyNumbers
|
|
tCase <- asks getter
|
|
nodeObject <-
|
|
retrieve scSchemaKind >>= \case
|
|
HasuraSchema -> throw500 "internal error: the node field should only be built for the Relay schema"
|
|
RelaySchema nodeBuilder -> runNodeBuilder nodeBuilder
|
|
pure $
|
|
P.subselection Name._node Nothing idArgument nodeObject `P.bindField` \(ident, parseds) -> do
|
|
nodeId <- parseNodeId ident
|
|
case nodeId of
|
|
NodeIdV1 (V1NodeId tableName pKeys) -> do
|
|
-- Node id V1.
|
|
--
|
|
-- We don't have the source name in a V1 node; we attempt all of them
|
|
-- and pick the first one we find; there is a risk we might pick the
|
|
-- wrong one if two tables with the same name exist in different
|
|
-- sources! It is, however, unlikely; the engine emits V2 IDs, meaning
|
|
-- if ever encounter a V1 ID it means it has been manually entered bya
|
|
-- user, saved from an older version of the engine?
|
|
let matchingTables = flip mapMaybe (Map.keys sourceCache) \sourceName ->
|
|
(sourceName,) <$> findNode @('Postgres 'Vanilla) sourceName tableName parseds
|
|
case matchingTables of
|
|
[(sourceName, nodeValue)] -> createRootField stringifyNumbers sourceName tableName nodeValue pKeys (Just tCase)
|
|
[] -> throwInvalidNodeId $ "no such table found: " <> toErrorValue tableName
|
|
l ->
|
|
throwInvalidNodeId $
|
|
"this V1 node id matches more than one table across different sources: " <> toErrorValue tableName
|
|
<> " exists in sources "
|
|
<> toErrorValue (fst <$> l)
|
|
NodeIdV2 nodev2 ->
|
|
-- Node id V2.
|
|
--
|
|
-- We have the source name and table name, we can extract the relevant
|
|
-- info directly.
|
|
AB.dispatchAnyBackend @Backend nodev2 \(V2NodeId sourceName tableName pKeys :: V2NodeId b) -> do
|
|
nodeValue <-
|
|
findNode @b sourceName tableName parseds
|
|
`onNothing` throwInvalidNodeId ("no table " <> toErrorValue tableName <> " found in source " <> toErrorValue sourceName)
|
|
createRootField stringifyNumbers sourceName tableName nodeValue pKeys (Just tCase)
|
|
where
|
|
throwInvalidNodeId :: ErrorMessage -> n a
|
|
throwInvalidNodeId t = P.withKey (J.Key "args") $ P.withKey (J.Key "id") $ P.parseError $ "invalid node id: " <> t
|
|
|
|
parseNodeId :: Text -> n NodeId
|
|
parseNodeId = either (throwInvalidNodeId . toErrorMessage . T.pack) pure . J.eitherDecode . base64Decode
|
|
|
|
-- Given all the node id information about a table, and the extracted
|
|
-- 'NodeInfo', craft the top-level query. This relies on the assumption
|
|
-- that all backends that support relay use the same IR for single row
|
|
-- selection.
|
|
createRootField ::
|
|
Backend b =>
|
|
Options.StringifyNumbers ->
|
|
SourceName ->
|
|
TableName b ->
|
|
NodeInfo b ->
|
|
NESeq.NESeq J.Value ->
|
|
Maybe NamingCase ->
|
|
n (IR.QueryRootField IR.UnpreparedValue)
|
|
createRootField stringifyNumbers sourceName tableName (NodeInfo sourceConfig perms pKeys fields) columnValues tCase = do
|
|
whereExp <- buildNodeIdBoolExp columnValues pKeys
|
|
pure $
|
|
IR.RFDB sourceName $
|
|
AB.mkAnyBackend $
|
|
IR.SourceConfigWith sourceConfig Nothing $
|
|
IR.QDBR $
|
|
IR.QDBSingleRow $
|
|
IR.AnnSelectG
|
|
{ IR._asnFields = fields,
|
|
IR._asnFrom = IR.FromTable tableName,
|
|
IR._asnPerm = tablePermissionsInfo perms,
|
|
IR._asnArgs =
|
|
IR.SelectArgs
|
|
{ IR._saWhere = Just whereExp,
|
|
IR._saOrderBy = Nothing,
|
|
IR._saLimit = Nothing,
|
|
IR._saOffset = Nothing,
|
|
IR._saDistinct = Nothing
|
|
},
|
|
IR._asnStrfyNum = stringifyNumbers,
|
|
IR._asnNamingConvention = tCase
|
|
}
|
|
|
|
-- Craft the 'where' condition of the query by making an `AEQ` entry for
|
|
-- each primary key. This might fail if the given node id doesn't exactly
|
|
-- have a valid entry for each primary key.
|
|
buildNodeIdBoolExp ::
|
|
Backend b =>
|
|
NESeq.NESeq J.Value ->
|
|
NESeq.NESeq (ColumnInfo b) ->
|
|
n (IR.AnnBoolExp b (IR.UnpreparedValue b))
|
|
buildNodeIdBoolExp columnValues pkeyColumns = do
|
|
let firstPkColumn NESeq.:<|| remainingPkColumns = pkeyColumns
|
|
firstColumnValue NESeq.:<|| remainingColumns = columnValues
|
|
(nonAlignedPkColumns, nonAlignedColumnValues, alignedTuples) =
|
|
partitionThese $ toList $ align remainingPkColumns remainingColumns
|
|
|
|
unless (null nonAlignedPkColumns) $
|
|
throwInvalidNodeId $
|
|
"primary key columns " <> toErrorValue (map ciColumn nonAlignedPkColumns) <> " are missing"
|
|
|
|
unless (null nonAlignedColumnValues) $
|
|
throwInvalidNodeId $
|
|
"unexpected column values " <> toErrorValue nonAlignedColumnValues
|
|
|
|
let allTuples = (firstPkColumn, firstColumnValue) : alignedTuples
|
|
IR.BoolAnd <$> for allTuples \(columnInfo, columnValue) -> do
|
|
let columnType = ciType columnInfo
|
|
parsedValue <-
|
|
parseScalarValueColumnType columnType columnValue `onLeft` \e ->
|
|
P.parseErrorWith P.ParseFailed $ "value of column " <> toErrorValue (ciColumn columnInfo) <> " in node id: " <> toErrorMessage (qeError e)
|
|
pure $
|
|
IR.BoolField $
|
|
IR.AVColumn
|
|
columnInfo
|
|
[IR.AEQ True $ IR.UVParameter Nothing $ ColumnValue columnType parsedValue]
|