mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-17 20:41:49 +03:00
342391f39d
This upgrades the version of Ormolu required by the HGE repository to v0.5.0.1, and reformats all code accordingly. Ormolu v0.5 reformats code that uses infix operators. This is mostly useful, adding newlines and indentation to make it clear which operators are applied first, but in some cases, it's unpleasant. To make this easier on the eyes, I had to do the following: * Add a few fixity declarations (search for `infix`) * Add parentheses to make precedence clear, allowing Ormolu to keep everything on one line * Rename `relevantEq` to `(==~)` in #6651 and set it to `infix 4` * Add a few _.ormolu_ files (thanks to @hallettj for helping me get started), mostly for Autodocodec operators that don't have explicit fixity declarations In general, I think these changes are quite reasonable. They mostly affect indentation. PR-URL: https://github.com/hasura/graphql-engine-mono/pull/6675 GitOrigin-RevId: cd47d87f1d089fb0bc9dcbbe7798dbceedcd7d83
226 lines
10 KiB
Haskell
226 lines
10 KiB
Haskell
{-# LANGUAGE TemplateHaskellQuotes #-}
|
|
|
|
-- | This module only exposes one function, 'nodeField', which is used at the
|
|
-- root level of the schema to create the 'node' field in the Relay API schema.
|
|
module Hasura.GraphQL.Schema.Relay
|
|
( nodeInterface,
|
|
nodeField,
|
|
)
|
|
where
|
|
|
|
import Control.Lens hiding (index)
|
|
import Data.Aeson qualified as J
|
|
import Data.Aeson.Types qualified as J
|
|
import Data.Align (align)
|
|
import Data.Has
|
|
import Data.HashMap.Strict.Extended qualified as Map
|
|
import Data.Sequence.NonEmpty qualified as NESeq
|
|
import Data.Text qualified as T
|
|
import Data.These (partitionThese)
|
|
import Hasura.Base.Error
|
|
import Hasura.Base.ErrorMessage
|
|
import Hasura.Base.ToErrorValue
|
|
import Hasura.GraphQL.Schema.Backend
|
|
import Hasura.GraphQL.Schema.Common
|
|
import Hasura.GraphQL.Schema.Instances ()
|
|
import Hasura.GraphQL.Schema.NamingCase (NamingCase)
|
|
import Hasura.GraphQL.Schema.Node
|
|
import Hasura.GraphQL.Schema.Options qualified as Options
|
|
import Hasura.GraphQL.Schema.Parser (Kind (..), Parser, memoizeOn)
|
|
import Hasura.GraphQL.Schema.Parser qualified as P
|
|
import Hasura.GraphQL.Schema.Select
|
|
import Hasura.GraphQL.Schema.Table
|
|
import Hasura.Name qualified as Name
|
|
import Hasura.Prelude
|
|
import Hasura.RQL.IR qualified as IR
|
|
import Hasura.RQL.Types.Backend
|
|
import Hasura.RQL.Types.Column
|
|
import Hasura.RQL.Types.Common
|
|
import Hasura.RQL.Types.SchemaCache hiding (askTableInfo)
|
|
import Hasura.RQL.Types.Source
|
|
import Hasura.RQL.Types.SourceCustomization
|
|
import Hasura.RQL.Types.Table
|
|
import Hasura.SQL.AnyBackend qualified as AB
|
|
import Hasura.SQL.Backend
|
|
import Language.GraphQL.Draft.Syntax qualified as G
|
|
|
|
-- | Constructs the parser for the node interface.
|
|
--
|
|
-- As mentioned in Note [Internal Relay HashMap], this function must parse an
|
|
-- incoming query for ANY potential matching table. Its resulting parser returns
|
|
-- a 'NodeMap': a container that, to a source name and a table name, associates
|
|
-- both the parsed fields and all the relevant table information required to
|
|
-- craft a request.
|
|
nodeInterface :: SourceCache -> NodeInterfaceParserBuilder
|
|
nodeInterface sourceCache = NodeInterfaceParserBuilder $ memoizeOn 'nodeInterface () do
|
|
let idDescription = G.Description "A globally unique identifier"
|
|
idField = P.selection_ Name._id (Just idDescription) P.identifier
|
|
nodeInterfaceDescription = G.Description "An object with globally unique ID"
|
|
roleName <- retrieve scRole
|
|
tables :: [Parser 'Output n (SourceName, AB.AnyBackend TableMap)] <-
|
|
catMaybes . concat <$> for (Map.toList sourceCache) \(sourceName, anySourceInfo) ->
|
|
AB.dispatchAnyBackendWithTwoConstraints @BackendSchema @BackendTableSelectSchema
|
|
anySourceInfo
|
|
\(sourceInfo :: SourceInfo b) ->
|
|
withSourceCustomization (_siCustomization sourceInfo) do
|
|
for (Map.toList $ takeValidTables $ _siTables sourceInfo) \(tableName, tableInfo) -> runMaybeT do
|
|
tablePkeyColumns <- hoistMaybe $ tableInfo ^? tiCoreInfo . tciPrimaryKey . _Just . pkColumns
|
|
selectPermissions <- hoistMaybe $ tableSelectPermissions roleName tableInfo
|
|
annotatedFieldsParser <- MaybeT $ tableSelectionSet sourceInfo tableInfo
|
|
pure $
|
|
annotatedFieldsParser <&> \fields ->
|
|
( sourceName,
|
|
AB.mkAnyBackend $
|
|
TableMap $
|
|
Map.singleton tableName $
|
|
NodeInfo (_siConfiguration sourceInfo) selectPermissions tablePkeyColumns fields
|
|
)
|
|
pure $
|
|
Map.fromListWith fuseAnyMaps
|
|
<$> P.selectionSetInterface
|
|
Name._Node
|
|
(Just nodeInterfaceDescription)
|
|
[idField]
|
|
tables
|
|
where
|
|
-- this can only ever fail if somehow, within the same source, we ran into
|
|
-- two tables of a different type b; in other words, it is impossible.
|
|
fuseAnyMaps :: AB.AnyBackend TableMap -> AB.AnyBackend TableMap -> AB.AnyBackend TableMap
|
|
fuseAnyMaps m1 m2 =
|
|
AB.composeAnyBackend @Backend fuseMaps m1 m2 $
|
|
error "panic: two tables of a different backend type within the same source"
|
|
|
|
fuseMaps :: forall b. Backend b => TableMap b -> TableMap b -> AB.AnyBackend TableMap
|
|
fuseMaps (TableMap m1) (TableMap m2) = AB.mkAnyBackend @b $ TableMap $ Map.union m1 m2
|
|
|
|
-- | Creates a field parser for the top-level "node" field in the QueryRoot.
|
|
--
|
|
-- It exepcts one argument, the node id. It looks for the targeted table in the
|
|
-- 'NodeMap' returned by 'nodeInterface', and, if successful, attempts to craft
|
|
-- a corresponding 'QueryRootField' that will extract the requested row.
|
|
nodeField ::
|
|
forall m n r.
|
|
SourceCache ->
|
|
MonadBuildSourceSchema r m n =>
|
|
SchemaT r m (P.FieldParser n (IR.QueryRootField IR.UnpreparedValue))
|
|
nodeField sourceCache = do
|
|
let idDescription = G.Description "A globally unique id"
|
|
idArgument = P.field Name._id (Just idDescription) P.identifier
|
|
stringifyNumbers <- retrieve Options.soStringifyNumbers
|
|
tCase <- asks getter
|
|
nodeObject <-
|
|
retrieve scSchemaKind >>= \case
|
|
HasuraSchema -> throw500 "internal error: the node field should only be built for the Relay schema"
|
|
RelaySchema nodeBuilder -> runNodeBuilder nodeBuilder
|
|
pure $
|
|
P.subselection Name._node Nothing idArgument nodeObject `P.bindField` \(ident, parseds) -> do
|
|
nodeId <- parseNodeId ident
|
|
case nodeId of
|
|
NodeIdV1 (V1NodeId tableName pKeys) -> do
|
|
-- Node id V1.
|
|
--
|
|
-- We don't have the source name in a V1 node; we attempt all of them
|
|
-- and pick the first one we find; there is a risk we might pick the
|
|
-- wrong one if two tables with the same name exist in different
|
|
-- sources! It is, however, unlikely; the engine emits V2 IDs, meaning
|
|
-- if ever encounter a V1 ID it means it has been manually entered bya
|
|
-- user, saved from an older version of the engine?
|
|
let matchingTables = flip mapMaybe (Map.keys sourceCache) \sourceName ->
|
|
(sourceName,) <$> findNode @('Postgres 'Vanilla) sourceName tableName parseds
|
|
case matchingTables of
|
|
[(sourceName, nodeValue)] -> createRootField stringifyNumbers sourceName tableName nodeValue pKeys (Just tCase)
|
|
[] -> throwInvalidNodeId $ "no such table found: " <> toErrorValue tableName
|
|
l ->
|
|
throwInvalidNodeId $
|
|
"this V1 node id matches more than one table across different sources: "
|
|
<> toErrorValue tableName
|
|
<> " exists in sources "
|
|
<> toErrorValue (fst <$> l)
|
|
NodeIdV2 nodev2 ->
|
|
-- Node id V2.
|
|
--
|
|
-- We have the source name and table name, we can extract the relevant
|
|
-- info directly.
|
|
AB.dispatchAnyBackend @Backend nodev2 \(V2NodeId sourceName tableName pKeys :: V2NodeId b) -> do
|
|
nodeValue <-
|
|
findNode @b sourceName tableName parseds
|
|
`onNothing` throwInvalidNodeId ("no table " <> toErrorValue tableName <> " found in source " <> toErrorValue sourceName)
|
|
createRootField stringifyNumbers sourceName tableName nodeValue pKeys (Just tCase)
|
|
where
|
|
throwInvalidNodeId :: ErrorMessage -> n a
|
|
throwInvalidNodeId t = P.withKey (J.Key "args") $ P.withKey (J.Key "id") $ P.parseError $ "invalid node id: " <> t
|
|
|
|
parseNodeId :: Text -> n NodeId
|
|
parseNodeId = either (throwInvalidNodeId . toErrorMessage . T.pack) pure . J.eitherDecode . base64Decode
|
|
|
|
-- Given all the node id information about a table, and the extracted
|
|
-- 'NodeInfo', craft the top-level query. This relies on the assumption
|
|
-- that all backends that support relay use the same IR for single row
|
|
-- selection.
|
|
createRootField ::
|
|
Backend b =>
|
|
Options.StringifyNumbers ->
|
|
SourceName ->
|
|
TableName b ->
|
|
NodeInfo b ->
|
|
NESeq.NESeq J.Value ->
|
|
Maybe NamingCase ->
|
|
n (IR.QueryRootField IR.UnpreparedValue)
|
|
createRootField stringifyNumbers sourceName tableName (NodeInfo sourceConfig perms pKeys fields) columnValues tCase = do
|
|
whereExp <- buildNodeIdBoolExp columnValues pKeys
|
|
pure $
|
|
IR.RFDB sourceName $
|
|
AB.mkAnyBackend $
|
|
IR.SourceConfigWith sourceConfig Nothing $
|
|
IR.QDBR $
|
|
IR.QDBSingleRow $
|
|
IR.AnnSelectG
|
|
{ IR._asnFields = fields,
|
|
IR._asnFrom = IR.FromTable tableName,
|
|
IR._asnPerm = tablePermissionsInfo perms,
|
|
IR._asnArgs =
|
|
IR.SelectArgs
|
|
{ IR._saWhere = Just whereExp,
|
|
IR._saOrderBy = Nothing,
|
|
IR._saLimit = Nothing,
|
|
IR._saOffset = Nothing,
|
|
IR._saDistinct = Nothing
|
|
},
|
|
IR._asnStrfyNum = stringifyNumbers,
|
|
IR._asnNamingConvention = tCase
|
|
}
|
|
|
|
-- Craft the 'where' condition of the query by making an `AEQ` entry for
|
|
-- each primary key. This might fail if the given node id doesn't exactly
|
|
-- have a valid entry for each primary key.
|
|
buildNodeIdBoolExp ::
|
|
Backend b =>
|
|
NESeq.NESeq J.Value ->
|
|
NESeq.NESeq (ColumnInfo b) ->
|
|
n (IR.AnnBoolExp b (IR.UnpreparedValue b))
|
|
buildNodeIdBoolExp columnValues pkeyColumns = do
|
|
let firstPkColumn NESeq.:<|| remainingPkColumns = pkeyColumns
|
|
firstColumnValue NESeq.:<|| remainingColumns = columnValues
|
|
(nonAlignedPkColumns, nonAlignedColumnValues, alignedTuples) =
|
|
partitionThese $ toList $ align remainingPkColumns remainingColumns
|
|
|
|
unless (null nonAlignedPkColumns) $
|
|
throwInvalidNodeId $
|
|
"primary key columns " <> toErrorValue (map ciColumn nonAlignedPkColumns) <> " are missing"
|
|
|
|
unless (null nonAlignedColumnValues) $
|
|
throwInvalidNodeId $
|
|
"unexpected column values " <> toErrorValue nonAlignedColumnValues
|
|
|
|
let allTuples = (firstPkColumn, firstColumnValue) : alignedTuples
|
|
IR.BoolAnd <$> for allTuples \(columnInfo, columnValue) -> do
|
|
let columnType = ciType columnInfo
|
|
parsedValue <-
|
|
parseScalarValueColumnType columnType columnValue `onLeft` \e ->
|
|
P.parseErrorWith P.ParseFailed $ "value of column " <> toErrorValue (ciColumn columnInfo) <> " in node id: " <> toErrorMessage (qeError e)
|
|
pure $
|
|
IR.BoolField $
|
|
IR.AVColumn
|
|
columnInfo
|
|
[IR.AEQ True $ IR.UVParameter Nothing $ ColumnValue columnType parsedValue]
|