graphql-engine/server/src-lib/Hasura/Backends/Postgres/Translate/Select.hs

1468 lines
60 KiB
Haskell
Raw Normal View History

module Hasura.Backends.Postgres.Translate.Select
( selectQuerySQL
, selectAggregateQuerySQL
, connectionSelectQuerySQL
, asSingleRowJsonResp
, mkSQLSelect
, mkAggregateSelect
, mkConnectionSelect
, PostgresAnnotatedFieldJSON
) where
2020-08-27 19:36:39 +03:00
import Hasura.Prelude
import qualified Data.HashMap.Strict as HM
import qualified Data.List.NonEmpty as NE
import qualified Data.Text as T
import qualified Database.PG.Query as Q
import Control.Lens hiding (op)
2020-08-27 19:36:39 +03:00
import Control.Monad.Writer.Strict
import Data.Text.Extended
2020-08-27 19:36:39 +03:00
import qualified Hasura.Backends.Postgres.SQL.DML as S
2020-08-27 19:36:39 +03:00
import Hasura.Backends.Postgres.SQL.Rewrite
import Hasura.Backends.Postgres.SQL.Types
import Hasura.Backends.Postgres.Translate.BoolExp
import Hasura.Backends.Postgres.Translate.Types
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.GraphQL.Schema.Common (currentNodeIdVersion, nodeIdVersionInt)
import Hasura.RQL.DML.Internal
import Hasura.RQL.IR.OrderBy
import Hasura.RQL.IR.Select
import Hasura.RQL.Types hiding (Identifier)
import Hasura.SQL.Types
selectQuerySQL
:: forall pgKind
. (Backend ('Postgres pgKind), PostgresAnnotatedFieldJSON pgKind)
=> JsonAggSelect
-> AnnSimpleSelect ('Postgres pgKind)
-> Q.Query
selectQuerySQL jsonAggSelect sel =
Q.fromBuilder $ toSQL $ mkSQLSelect jsonAggSelect sel
selectAggregateQuerySQL
:: forall pgKind
. (Backend ('Postgres pgKind), PostgresAnnotatedFieldJSON pgKind)
=> AnnAggregateSelect ('Postgres pgKind)
-> Q.Query
selectAggregateQuerySQL =
Q.fromBuilder . toSQL . mkAggregateSelect
connectionSelectQuerySQL
:: forall pgKind
. ( Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> ConnectionSelect ('Postgres pgKind) (Const Void) S.SQLExp
-> Q.Query
connectionSelectQuerySQL =
Q.fromBuilder . toSQL . mkConnectionSelect
asSingleRowJsonResp
:: Q.Query
-> [Q.PrepArg]
-> Q.TxE QErr EncJSON
asSingleRowJsonResp query args =
encJFromBS . runIdentity . Q.getRow
<$> Q.rawQE dmlTxErrorHandler query args True
-- Conversion of SelectQ happens in 2 Stages.
-- Stage 1 : Convert input query into an annotated AST
-- Stage 2 : Convert annotated AST to SQL Select
functionToIdentifier :: QualifiedFunction -> Identifier
functionToIdentifier = Identifier . qualifiedObjectToText
selectFromToFromItem :: Identifier -> SelectFrom ('Postgres pgKind) -> S.FromItem
selectFromToFromItem pfx = \case
FromTable tn -> S.FISimple tn Nothing
FromIdentifier i -> S.FIIdentifier i
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
FromFunction qf args defListM ->
S.FIFunc $ S.FunctionExp qf (fromTableRowArgs pfx args) $
Just $ S.mkFunctionAlias (functionToIdentifier qf) defListM
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
-- This function shouldn't be present ideally
-- You should be able to retrieve this information
-- from the FromItem generated with selectFromToFromItem
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
-- however given from S.FromItem is modelled, it is not
-- possible currently
selectFromToQual :: SelectFrom ('Postgres pgKind) -> S.Qual
selectFromToQual = \case
FromTable table -> S.QualTable table
FromIdentifier i -> S.QualifiedIdentifier i Nothing
FromFunction qf _ _ -> S.QualifiedIdentifier (functionToIdentifier qf) Nothing
aggregateFieldToExp :: AggregateFields ('Postgres pgKind) -> Bool -> S.SQLExp
aggregateFieldToExp aggFlds strfyNum = jsonRow
where
jsonRow = S.applyJsonBuildObj (concatMap aggToFlds aggFlds)
withAls fldName sqlExp = [S.SELit fldName, sqlExp]
aggToFlds (FieldName t, fld) = withAls t $ case fld of
AFCount cty -> S.SECount cty
AFOp aggOp -> aggOpToObj aggOp
AFExp e -> S.SELit e
aggOpToObj (AggregateOp opText flds) =
S.applyJsonBuildObj $ concatMap (colFldsToExtr opText) flds
colFldsToExtr opText (FieldName t, CFCol col ty) =
[ S.SELit t
, toJSONableExp strfyNum ty False
$ S.SEFnApp opText [S.SEIdentifier $ toIdentifier col] Nothing
]
colFldsToExtr _ (FieldName t, CFExp e) =
[ S.SELit t , S.SELit e]
asSingleRowExtr :: S.Alias -> S.SQLExp
asSingleRowExtr col =
S.SEFnApp "coalesce" [jsonAgg, S.SELit "null"] Nothing
where
jsonAgg = S.SEOpApp (S.SQLOp "->")
[ S.SEFnApp "json_agg" [S.SEIdentifier $ toIdentifier col] Nothing
, S.SEUnsafe "0"
]
withJsonAggExtr
:: PermissionLimitSubQuery -> Maybe S.OrderByExp -> S.Alias -> S.SQLExp
withJsonAggExtr permLimitSubQuery ordBy alias =
-- if select has aggregations then use subquery to apply permission limit
case permLimitSubQuery of
PLSQRequired permLimit -> withPermLimit permLimit
PLSQNotRequired -> simpleJsonAgg
where
simpleJsonAgg = mkSimpleJsonAgg rowIdenExp ordBy
rowIdenExp = S.SEIdentifier $ S.getAlias alias
subSelAls = Identifier "sub_query"
unnestTable = Identifier "unnest_table"
mkSimpleJsonAgg rowExp ob =
let jsonAggExp = S.SEFnApp "json_agg" [rowExp] ob
in S.SEFnApp "coalesce" [jsonAggExp, S.SELit "[]"] Nothing
withPermLimit limit =
let subSelect = mkSubSelect limit
rowIdentifier = S.mkQIdenExp subSelAls alias
extr = S.Extractor (mkSimpleJsonAgg rowIdentifier newOrderBy) Nothing
fromExp = S.FromExp $ pure $
S.mkSelFromItem subSelect $ S.Alias subSelAls
in S.SESelect $ S.mkSelect { S.selExtr = pure extr
, S.selFrom = Just fromExp
}
mkSubSelect limit =
let jsonRowExtr = flip S.Extractor (Just alias) $
S.mkQIdenExp unnestTable alias
obExtrs = flip map newOBAliases $ \a ->
S.Extractor (S.mkQIdenExp unnestTable a) $ Just $ S.Alias a
in S.mkSelect { S.selExtr = jsonRowExtr : obExtrs
, S.selFrom = Just $ S.FromExp $ pure unnestFromItem
, S.selLimit = Just $ S.LimitExp $ S.intToSQLExp limit
, S.selOrderBy = newOrderBy
}
unnestFromItem =
let arrayAggItems = flip map (rowIdenExp : obCols) $
\s -> S.SEFnApp "array_agg" [s] Nothing
in S.FIUnnest arrayAggItems (S.Alias unnestTable) $
rowIdenExp : map S.SEIdentifier newOBAliases
newOrderBy = S.OrderByExp <$> NE.nonEmpty newOBItems
(newOBItems, obCols, newOBAliases) = maybe ([], [], []) transformOrderBy ordBy
transformOrderBy (S.OrderByExp l) = unzip3 $
flip map (zip (toList l) [1..]) $ \(obItem, i::Int) ->
let iden = Identifier $ "ob_col_" <> tshow i
in ( obItem{S.oColumn = S.SEIdentifier iden}
, S.oColumn obItem
, iden
)
asJsonAggExtr
:: JsonAggSelect -> S.Alias -> PermissionLimitSubQuery -> Maybe S.OrderByExp -> S.Extractor
asJsonAggExtr jsonAggSelect als permLimitSubQuery ordByExpM =
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
flip S.Extractor (Just als) $ case jsonAggSelect of
JASMultipleRows -> withJsonAggExtr permLimitSubQuery ordByExpM als
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
JASSingleObject -> asSingleRowExtr als
-- array relationships are not grouped, so have to be prefixed by
-- parent's alias
mkUniqArrayRelationAlias :: FieldName -> [FieldName] -> Identifier
mkUniqArrayRelationAlias parAls flds =
let sortedFields = sort flds
in Identifier $
getFieldNameTxt parAls <> "."
<> T.intercalate "." (map getFieldNameTxt sortedFields)
mkArrayRelationTableAlias :: Identifier -> FieldName -> [FieldName] -> Identifier
mkArrayRelationTableAlias pfx parAls flds =
pfx <> Identifier ".ar." <> uniqArrRelAls
where
uniqArrRelAls = mkUniqArrayRelationAlias parAls flds
mkObjectRelationTableAlias :: Identifier -> RelName -> Identifier
mkObjectRelationTableAlias pfx relName =
pfx <> Identifier ".or." <> toIdentifier relName
mkComputedFieldTableAlias :: Identifier -> FieldName -> Identifier
mkComputedFieldTableAlias pfx fldAls =
pfx <> Identifier ".cf." <> toIdentifier fldAls
mkBaseTableAlias :: Identifier -> Identifier
mkBaseTableAlias pfx =
pfx <> Identifier ".base"
mkBaseTableColumnAlias :: Identifier -> PGCol -> Identifier
mkBaseTableColumnAlias pfx pgColumn =
pfx <> Identifier ".pg." <> toIdentifier pgColumn
mkOrderByFieldName :: RelName -> FieldName
mkOrderByFieldName relName =
FieldName $ relNameToTxt relName <> "." <> "order_by"
mkAggregateOrderByAlias :: AnnAggregateOrderBy ('Postgres pgKind) -> S.Alias
mkAggregateOrderByAlias = (S.Alias . Identifier) . \case
AAOCount -> "count"
AAOOp opText col -> opText <> "." <> getPGColTxt (pgiColumn col)
mkArrayRelationSourcePrefix
:: Identifier
-> FieldName
-> HM.HashMap FieldName [FieldName]
-> FieldName
-> Identifier
mkArrayRelationSourcePrefix parentSourcePrefix parentFieldName similarFieldsMap fieldName =
mkArrayRelationTableAlias parentSourcePrefix parentFieldName $
HM.lookupDefault [fieldName] fieldName similarFieldsMap
mkArrayRelationAlias
:: FieldName
-> HM.HashMap FieldName [FieldName]
-> FieldName
-> S.Alias
mkArrayRelationAlias parentFieldName similarFieldsMap fieldName =
S.Alias $ mkUniqArrayRelationAlias parentFieldName $
HM.lookupDefault [fieldName] fieldName similarFieldsMap
fromTableRowArgs
:: Identifier -> FunctionArgsExpTableRow ('Postgres pgKind) S.SQLExp -> S.FunctionArgs
fromTableRowArgs pfx = toFunctionArgs . fmap toSQLExp
where
toFunctionArgs (FunctionArgsExp positional named) =
S.FunctionArgs positional named
toSQLExp (AETableRow Nothing) = S.SERowIdentifier $ mkBaseTableAlias pfx
toSQLExp (AETableRow (Just acc)) = S.mkQIdenExp (mkBaseTableAlias pfx) acc
toSQLExp (AESession s) = s
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
toSQLExp (AEInput s) = s
-- uses row_to_json to build a json object
withRowToJSON
:: FieldName -> [S.Extractor] -> (S.Alias, S.SQLExp)
withRowToJSON parAls extrs =
(S.toAlias parAls, jsonRow)
where
jsonRow = S.applyRowToJson extrs
-- uses json_build_object to build a json object
withJsonBuildObj
:: FieldName -> [S.SQLExp] -> (S.Alias, S.SQLExp)
withJsonBuildObj parAls exps =
(S.toAlias parAls, jsonRow)
where
jsonRow = S.applyJsonBuildObj exps
-- | Forces aggregation
withForceAggregation :: S.TypeAnn -> S.SQLExp -> S.SQLExp
withForceAggregation tyAnn e =
-- bool_or to force aggregation
S.SEFnApp "coalesce" [e, S.SETyAnn (S.SEUnsafe "bool_or('true')") tyAnn] Nothing
mkAggregateOrderByExtractorAndFields
:: forall pgKind
. Backend ('Postgres pgKind)
=> AnnAggregateOrderBy ('Postgres pgKind)
-> (S.Extractor, AggregateFields ('Postgres pgKind))
mkAggregateOrderByExtractorAndFields annAggOrderBy =
case annAggOrderBy of
AAOCount ->
( S.Extractor S.countStar alias
, [(FieldName "count", AFCount S.CTStar)]
)
AAOOp opText pgColumnInfo ->
let pgColumn = pgiColumn pgColumnInfo
pgType = pgiType pgColumnInfo
in ( S.Extractor (S.SEFnApp opText [S.SEIdentifier $ toIdentifier pgColumn] Nothing) alias
, [ ( FieldName opText
, AFOp $ AggregateOp opText
[ ( fromCol @('Postgres pgKind) pgColumn
, CFCol pgColumn pgType
)
]
)
]
)
where
alias = Just $ mkAggregateOrderByAlias annAggOrderBy
mkAnnOrderByAlias
:: Identifier -> FieldName -> SimilarArrayFields -> AnnOrderByElementG ('Postgres pgKind) v -> S.Alias
mkAnnOrderByAlias pfx parAls similarFields = \case
AOCColumn pgColumnInfo ->
let pgColumn = pgiColumn pgColumnInfo
obColAls = mkBaseTableColumnAlias pfx pgColumn
in S.Alias obColAls
-- "pfx.or.relname"."pfx.ob.or.relname.rest" AS "pfx.ob.or.relname.rest"
AOCObjectRelation relInfo _ rest ->
let rn = riName relInfo
relPfx = mkObjectRelationTableAlias pfx rn
ordByFldName = mkOrderByFieldName rn
nesAls = mkAnnOrderByAlias relPfx ordByFldName mempty rest
in nesAls
AOCArrayAggregation relInfo _ aggOrderBy ->
let rn = riName relInfo
arrPfx = mkArrayRelationSourcePrefix pfx parAls similarFields $
mkOrderByFieldName rn
obAls = arrPfx <> Identifier "." <> toIdentifier (mkAggregateOrderByAlias aggOrderBy)
in S.Alias obAls
processDistinctOnColumns
:: Identifier
-> NE.NonEmpty PGCol
-> ( S.DistinctExpr
, [(S.Alias, S.SQLExp)] -- additional column extractors
)
processDistinctOnColumns pfx neCols = (distOnExp, colExtrs)
where
cols = toList neCols
distOnExp = S.DistinctOn $ map (S.SEIdentifier . toIdentifier . mkQColAls) cols
mkQCol c = S.mkQIdenExp (mkBaseTableAlias pfx) $ toIdentifier c
mkQColAls = S.Alias . mkBaseTableColumnAlias pfx
colExtrs = flip map cols $ mkQColAls &&& mkQCol
type SimilarArrayFields = HM.HashMap FieldName [FieldName]
mkSimilarArrayFields
:: forall pgKind v
. (Backend ('Postgres pgKind), Eq v)
=> AnnFieldsG ('Postgres pgKind) (Const Void) v
-> Maybe (NE.NonEmpty (AnnOrderByItemG ('Postgres pgKind) v))
-> SimilarArrayFields
mkSimilarArrayFields annFields maybeOrderBys =
HM.fromList $ flip map allTuples $
\(relNameAndArgs, fieldName) -> (fieldName, getSimilarFields relNameAndArgs)
where
getSimilarFields relNameAndArgs = map snd $ filter ((== relNameAndArgs) . fst) allTuples
allTuples = arrayRelationTuples <> aggOrderByRelationTuples
arrayRelationTuples =
let arrayFields = mapMaybe getAnnArr annFields
in flip map arrayFields $
\(f, relSel) -> (getArrayRelNameAndSelectArgs relSel, f)
aggOrderByRelationTuples =
let mkItem (relName, fieldName) = ( (relName, noSelectArgs)
, fieldName
)
in map mkItem $ maybe []
(mapMaybe (fetchAggOrderByRels . obiColumn) . toList) maybeOrderBys
fetchAggOrderByRels (AOCArrayAggregation ri _ _) =
Just (riName ri, mkOrderByFieldName $ riName ri)
fetchAggOrderByRels _ = Nothing
getArrayRelNameAndSelectArgs
:: ArraySelectG ('Postgres pgKind) r v
-> (RelName, SelectArgsG ('Postgres pgKind) v)
getArrayRelNameAndSelectArgs = \case
ASSimple r -> (aarRelationshipName r, _asnArgs $ aarAnnSelect r)
ASAggregate r -> (aarRelationshipName r, _asnArgs $ aarAnnSelect r)
ASConnection r -> (aarRelationshipName r, _asnArgs $ _csSelect $ aarAnnSelect r)
getAnnArr
:: (a, AnnFieldG ('Postgres pgKind) r v)
-> Maybe (a, ArraySelectG ('Postgres pgKind) r v)
getAnnArr (f, annFld) = case annFld of
AFArrayRelation (ASConnection _) -> Nothing
AFArrayRelation ar -> Just (f, ar)
_ -> Nothing
withWriteJoinTree
:: (MonadWriter JoinTree m)
=> (JoinTree -> b -> JoinTree)
-> m (a, b)
-> m a
withWriteJoinTree joinTreeUpdater action =
pass $ do
(out, result) <- action
let fromJoinTree joinTree =
joinTreeUpdater joinTree result
pure (out, fromJoinTree)
withWriteObjectRelation
:: (MonadWriter JoinTree m)
=> m ( ObjectRelationSource
, HM.HashMap S.Alias S.SQLExp
, a
)
-> m a
withWriteObjectRelation action =
withWriteJoinTree updateJoinTree $ do
(source, nodeExtractors, out) <- action
pure (out, (source, nodeExtractors))
where
updateJoinTree joinTree (source, nodeExtractors) =
let selectNode = SelectNode nodeExtractors joinTree
in mempty{_jtObjectRelations = HM.singleton source selectNode}
withWriteArrayRelation
:: (MonadWriter JoinTree m)
=> m ( ArrayRelationSource
, S.Extractor
, HM.HashMap S.Alias S.SQLExp
, a
)
-> m a
withWriteArrayRelation action =
withWriteJoinTree updateJoinTree $ do
(source, topExtractor, nodeExtractors, out) <- action
pure (out, (source, topExtractor, nodeExtractors))
where
updateJoinTree joinTree (source, topExtractor, nodeExtractors) =
let arraySelectNode = ArraySelectNode [topExtractor] $
SelectNode nodeExtractors joinTree
in mempty{_jtArrayRelations = HM.singleton source arraySelectNode}
withWriteArrayConnection
:: (MonadWriter JoinTree m)
=> m ( ArrayConnectionSource
, S.Extractor
, HM.HashMap S.Alias S.SQLExp
, a
)
-> m a
withWriteArrayConnection action =
withWriteJoinTree updateJoinTree $ do
(source, topExtractor, nodeExtractors, out) <- action
pure (out, (source, topExtractor, nodeExtractors))
where
updateJoinTree joinTree (source, topExtractor, nodeExtractors) =
let arraySelectNode = ArraySelectNode [topExtractor] $
SelectNode nodeExtractors joinTree
in mempty{_jtArrayConnections = HM.singleton source arraySelectNode}
withWriteComputedFieldTableSet
:: (MonadWriter JoinTree m)
=> m ( ComputedFieldTableSetSource
, HM.HashMap S.Alias S.SQLExp
, a
)
-> m a
withWriteComputedFieldTableSet action =
withWriteJoinTree updateJoinTree $ do
(source, nodeExtractors, out) <- action
pure (out, (source, nodeExtractors))
where
updateJoinTree joinTree (source, nodeExtractors) =
let selectNode = SelectNode nodeExtractors joinTree
in mempty{_jtComputedFieldTableSets = HM.singleton source selectNode}
processAnnSimpleSelect
:: forall pgKind m
. ( MonadReader Bool m
, MonadWriter JoinTree m
, Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> SourcePrefixes
-> FieldName
-> PermissionLimitSubQuery
-> AnnSimpleSelect ('Postgres pgKind)
-> m ( SelectSource
, HM.HashMap S.Alias S.SQLExp
)
processAnnSimpleSelect sourcePrefixes fieldAlias permLimitSubQuery annSimpleSel = do
(selectSource, orderByAndDistinctExtrs, _) <-
processSelectParams sourcePrefixes fieldAlias similarArrayFields tableFrom
permLimitSubQuery tablePermissions tableArgs
annFieldsExtr <- processAnnFields (_pfThis sourcePrefixes) fieldAlias similarArrayFields annSelFields
let allExtractors = HM.fromList $ annFieldsExtr : orderByAndDistinctExtrs
pure (selectSource, allExtractors)
where
AnnSelectG annSelFields tableFrom tablePermissions tableArgs _ = annSimpleSel
similarArrayFields =
mkSimilarArrayFields annSelFields $ _saOrderBy tableArgs
processAnnAggregateSelect
:: forall pgKind m
. ( MonadReader Bool m
, MonadWriter JoinTree m
, Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> SourcePrefixes
-> FieldName
-> AnnAggregateSelect ('Postgres pgKind)
-> m ( SelectSource
, HM.HashMap S.Alias S.SQLExp
, S.Extractor
)
processAnnAggregateSelect sourcePrefixes fieldAlias annAggSel = do
(selectSource, orderByAndDistinctExtrs, _) <-
processSelectParams sourcePrefixes fieldAlias similarArrayFields tableFrom
permLimitSubQuery tablePermissions tableArgs
let thisSourcePrefix = _pfThis sourcePrefixes
processedFields <- forM aggSelFields $ \(fieldName, field) ->
(fieldName,) <$>
case field of
TAFAgg aggFields ->
pure ( aggregateFieldsToExtractorExps thisSourcePrefix aggFields
, aggregateFieldToExp aggFields strfyNum
)
TAFNodes _ annFields -> do
annFieldExtr <- processAnnFields thisSourcePrefix fieldName similarArrayFields annFields
pure ( [annFieldExtr]
, withJsonAggExtr permLimitSubQuery (_ssOrderBy selectSource) $
S.Alias $ toIdentifier fieldName
)
TAFExp e ->
pure ( []
, withForceAggregation S.textTypeAnn $ S.SELit e
)
let topLevelExtractor =
flip S.Extractor (Just $ S.Alias $ toIdentifier fieldAlias) $
S.applyJsonBuildObj $ flip concatMap (map (second snd) processedFields) $
\(FieldName fieldText, fieldExp) -> [S.SELit fieldText, fieldExp]
nodeExtractors = HM.fromList $
concatMap (fst . snd) processedFields <> orderByAndDistinctExtrs
pure (selectSource, nodeExtractors, topLevelExtractor)
where
AnnSelectG aggSelFields tableFrom tablePermissions tableArgs strfyNum = annAggSel
permLimit = _tpLimit tablePermissions
orderBy = _saOrderBy tableArgs
permLimitSubQuery = mkPermissionLimitSubQuery permLimit aggSelFields orderBy
similarArrayFields = HM.unions $
flip map (map snd aggSelFields) $ \case
TAFAgg _ -> mempty
TAFNodes _ annFlds ->
mkSimilarArrayFields annFlds orderBy
TAFExp _ -> mempty
mkPermissionLimitSubQuery
:: Maybe Int
-> TableAggregateFields ('Postgres pgKind)
-> Maybe (NE.NonEmpty (AnnOrderByItem ('Postgres pgKind)))
-> PermissionLimitSubQuery
mkPermissionLimitSubQuery permLimit aggFields orderBys =
case permLimit of
Nothing -> PLSQNotRequired
Just limit ->
if hasAggregateField || hasAggOrderBy then PLSQRequired limit
else PLSQNotRequired
where
hasAggregateField = flip any (map snd aggFields) $
\case
TAFAgg _ -> True
_ -> False
hasAggOrderBy = case orderBys of
Nothing -> False
Just l -> flip any (concatMap toList $ toList l) $
\case
AOCArrayAggregation{} -> True
_ -> False
processArrayRelation
:: forall pgKind m
. ( MonadReader Bool m
, MonadWriter JoinTree m
, Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> SourcePrefixes
-> FieldName
-> S.Alias
-> ArraySelect ('Postgres pgKind)
-> m ()
processArrayRelation sourcePrefixes fieldAlias relAlias arrSel =
case arrSel of
ASSimple annArrRel -> withWriteArrayRelation $ do
let AnnRelationSelectG _ colMapping sel = annArrRel
permLimitSubQuery =
maybe PLSQNotRequired PLSQRequired $ _tpLimit $ _asnPerm sel
(source, nodeExtractors) <-
processAnnSimpleSelect sourcePrefixes fieldAlias permLimitSubQuery sel
let topExtr = asJsonAggExtr JASMultipleRows (S.toAlias fieldAlias)
permLimitSubQuery $ _ssOrderBy source
pure ( ArrayRelationSource relAlias colMapping source
, topExtr
, nodeExtractors
, ()
)
ASAggregate aggSel -> withWriteArrayRelation $ do
let AnnRelationSelectG _ colMapping sel = aggSel
(source, nodeExtractors, topExtr) <-
processAnnAggregateSelect sourcePrefixes fieldAlias sel
pure ( ArrayRelationSource relAlias colMapping source
, topExtr
, nodeExtractors
, ()
)
ASConnection connSel -> withWriteArrayConnection $ do
let AnnRelationSelectG _ colMapping sel = connSel
(source, topExtractor, nodeExtractors) <-
processConnectionSelect sourcePrefixes fieldAlias relAlias colMapping sel
pure ( source
, topExtractor
, nodeExtractors
, ()
)
processSelectParams
:: forall pgKind m
. ( MonadReader Bool m
, MonadWriter JoinTree m
, Backend ('Postgres pgKind)
)
=> SourcePrefixes
-> FieldName
-> SimilarArrayFields
-> SelectFrom ('Postgres pgKind)
-> PermissionLimitSubQuery
-> TablePerm ('Postgres pgKind)
-> SelectArgs ('Postgres pgKind)
-> m ( SelectSource
, [(S.Alias, S.SQLExp)]
, Maybe S.SQLExp -- Order by cursor
)
processSelectParams sourcePrefixes fieldAlias similarArrFields selectFrom
permLimitSubQ tablePermissions tableArgs = do
maybeOrderBy <- mapM
(processOrderByItems thisSourcePrefix fieldAlias similarArrFields)
orderByM
let fromItem = selectFromToFromItem (_pfBase sourcePrefixes) selectFrom
(maybeDistinct, distinctExtrs) =
maybe (Nothing, []) (first Just) $ processDistinctOnColumns thisSourcePrefix <$> distM
finalWhere = toSQLBoolExp (selectFromToQual selectFrom) $
maybe permFilter (andAnnBoolExps permFilter) whereM
selectSource = SelectSource thisSourcePrefix fromItem maybeDistinct finalWhere
((^. _2) <$> maybeOrderBy) finalLimit offsetM
orderByExtrs = maybe [] (^. _1) maybeOrderBy
pure ( selectSource
, orderByExtrs <> distinctExtrs
, (^. _3) <$> maybeOrderBy
)
where
thisSourcePrefix = _pfThis sourcePrefixes
SelectArgs whereM orderByM inpLimitM offsetM distM = tableArgs
TablePerm permFilter permLimit = tablePermissions
finalLimit =
-- if sub query is required, then only use input limit
-- because permission limit is being applied in subquery
-- else compare input and permission limits
case permLimitSubQ of
PLSQRequired _ -> inpLimitM
PLSQNotRequired -> compareLimits
compareLimits =
case (inpLimitM, permLimit) of
(inpLim, Nothing) -> inpLim
(Nothing, permLim) -> permLim
(Just inp, Just perm) -> Just $ if inp < perm then inp else perm
processOrderByItems
:: forall pgKind m
. ( MonadReader Bool m
, MonadWriter JoinTree m
, Backend ('Postgres pgKind)
)
=> Identifier
-> FieldName
-> SimilarArrayFields
-> NE.NonEmpty (AnnOrderByItem ('Postgres pgKind))
-> m ( [(S.Alias, S.SQLExp)] -- Order by Extractors
, S.OrderByExp
, S.SQLExp -- The cursor expression
)
processOrderByItems sourcePrefix' fieldAlias' similarArrayFields orderByItems = do
orderByItemExps <- forM orderByItems processAnnOrderByItem
let orderByExp = S.OrderByExp $ toOrderByExp <$> orderByItemExps
orderByExtractors = concat $ toList $ map snd . toList <$> orderByItemExps
cursor = mkCursorExp $ toList orderByItemExps
pure (orderByExtractors, orderByExp, cursor)
where
processAnnOrderByItem :: AnnOrderByItem ('Postgres pgKind) -> m (OrderByItemExp ('Postgres pgKind))
processAnnOrderByItem orderByItem =
forM orderByItem $ \ordByCol -> (ordByCol,) <$>
processAnnOrderByElement sourcePrefix' fieldAlias' ordByCol
processAnnOrderByElement
:: Identifier -> FieldName -> AnnOrderByElement ('Postgres pgKind) S.SQLExp -> m (S.Alias, S.SQLExp)
processAnnOrderByElement sourcePrefix fieldAlias annObCol = do
let ordByAlias = mkAnnOrderByAlias sourcePrefix fieldAlias similarArrayFields annObCol
(ordByAlias, ) <$> case annObCol of
AOCColumn pgColInfo -> pure $
S.mkQIdenExp (mkBaseTableAlias sourcePrefix) $ toIdentifier $ pgiColumn pgColInfo
AOCObjectRelation relInfo relFilter rest -> withWriteObjectRelation $ do
let RelInfo relName _ colMapping relTable _ _ _ = relInfo
relSourcePrefix = mkObjectRelationTableAlias sourcePrefix relName
fieldName = mkOrderByFieldName relName
(relOrderByAlias, relOrdByExp) <-
processAnnOrderByElement relSourcePrefix fieldName rest
let selectSource = ObjectSelectSource relSourcePrefix
(S.FISimple relTable Nothing)
(toSQLBoolExp (S.QualTable relTable) relFilter)
relSource = ObjectRelationSource relName colMapping selectSource
pure ( relSource
, HM.singleton relOrderByAlias relOrdByExp
, S.mkQIdenExp relSourcePrefix relOrderByAlias
)
AOCArrayAggregation relInfo relFilter aggOrderBy -> withWriteArrayRelation $ do
let RelInfo relName _ colMapping relTable _ _ _ = relInfo
fieldName = mkOrderByFieldName relName
relSourcePrefix = mkArrayRelationSourcePrefix sourcePrefix fieldAlias
similarArrayFields fieldName
relAlias = mkArrayRelationAlias fieldAlias similarArrayFields fieldName
(topExtractor, fields) = mkAggregateOrderByExtractorAndFields aggOrderBy
selectSource = SelectSource relSourcePrefix
(S.FISimple relTable Nothing) Nothing
(toSQLBoolExp (S.QualTable relTable) relFilter)
Nothing Nothing Nothing
relSource = ArrayRelationSource relAlias colMapping selectSource
pure ( relSource
, topExtractor
, HM.fromList $ aggregateFieldsToExtractorExps relSourcePrefix fields
, S.mkQIdenExp relSourcePrefix (mkAggregateOrderByAlias aggOrderBy)
)
toOrderByExp :: OrderByItemExp ('Postgres pgKind) -> S.OrderByItem
toOrderByExp orderByItemExp =
let OrderByItemG obTyM expAlias obNullsM = fst . snd <$> orderByItemExp
in S.OrderByItem (S.SEIdentifier $ toIdentifier expAlias) obTyM obNullsM
mkCursorExp :: [OrderByItemExp ('Postgres pgKind)] -> S.SQLExp
mkCursorExp orderByItemExps =
S.applyJsonBuildObj $ flip concatMap orderByItemExps $
\orderByItemExp ->
let OrderByItemG _ (annObCol, (_, valExp)) _ = orderByItemExp
in annObColToJSONField valExp annObCol
where
annObColToJSONField valExp = \case
AOCColumn pgCol -> [S.SELit $ getPGColTxt $ pgiColumn pgCol, valExp]
AOCObjectRelation relInfo _ obCol ->
[ S.SELit $ relNameToTxt $ riName relInfo
, S.applyJsonBuildObj $ annObColToJSONField valExp obCol
]
AOCArrayAggregation relInfo _ aggOrderBy ->
[ S.SELit $ relNameToTxt (riName relInfo) <> "_aggregate"
, S.applyJsonBuildObj $
case aggOrderBy of
AAOCount -> [S.SELit "count", valExp]
AAOOp opText colInfo ->
[ S.SELit opText
, S.applyJsonBuildObj [S.SELit $ getPGColTxt $ pgiColumn colInfo, valExp]
]
]
aggregateFieldsToExtractorExps
:: Identifier -> AggregateFields ('Postgres pgKind) -> [(S.Alias, S.SQLExp)]
aggregateFieldsToExtractorExps sourcePrefix aggregateFields =
flip concatMap aggregateFields $ \(_, field) ->
case field of
AFCount cty -> case cty of
S.CTStar -> []
S.CTSimple cols -> colsToExps cols
S.CTDistinct cols -> colsToExps cols
AFOp aggOp -> aggOpToExps aggOp
AFExp _ -> []
where
colsToExps = fmap mkColExp
aggOpToExps = mapMaybe colToMaybeExp . _aoFields
colToMaybeExp = \case
(_, CFCol col _) -> Just $ mkColExp col
_ -> Nothing
mkColExp c =
let qualCol = S.mkQIdenExp (mkBaseTableAlias sourcePrefix) (toIdentifier c)
colAls = toIdentifier c
in (S.Alias colAls, qualCol)
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
{- Note: [SQL generation for inherited roles]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When a query is executed by an inherited role, each column may contain a predicate
(AnnColumnCaseBoolExp ('Postgres pgKind) SQLExp) along with it. The predicate is then
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
converted to a BoolExp, which will be used to check if the said column should
be nullified. For example,
Suppose there are two roles, role1 gives access only to the `addr` column with
row filter P1 and role2 gives access to both addr and phone column with row
filter P2. The `OR`ing of the predicates will have already been done while
the schema has been generated. The SQL generated will look like this:
select
(case when (P1 or P2) then addr else null end) as addr,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
-}
class PostgresAnnotatedFieldJSON (pgKind :: PostgresKind) where
annRowToJson :: FieldName -> [(FieldName, S.SQLExp)] -> (S.Alias, S.SQLExp)
instance PostgresAnnotatedFieldJSON 'Vanilla where
annRowToJson fieldAlias fieldExps =
-- postgres ignores anything beyond 63 chars for an iden
-- in this case, we'll need to use json_build_object function
-- json_build_object is slower than row_to_json hence it is only
-- used when needed
if any ( (> 63) . T.length . getFieldNameTxt . fst ) fieldExps then
withJsonBuildObj fieldAlias $ concatMap toJsonBuildObjectExps fieldExps
else
withRowToJSON fieldAlias $ map toRowToJsonExtr fieldExps
where
toJsonBuildObjectExps (fieldName, fieldExp) =
[S.SELit $ getFieldNameTxt fieldName, fieldExp]
toRowToJsonExtr (fieldName, fieldExp) =
S.Extractor fieldExp $ Just $ S.toAlias fieldName
instance PostgresAnnotatedFieldJSON 'Citus where
annRowToJson fieldAlias fieldExps =
-- Due to the restrictions Citus imposes on joins between tables of various
-- distribution types we cannot use row_to_json and have to only rely on
-- json_build_object.
withJsonBuildObj fieldAlias $ concatMap toJsonBuildObjectExps fieldExps
where
toJsonBuildObjectExps (fieldName, fieldExp) =
[S.SELit $ getFieldNameTxt fieldName, fieldExp]
processAnnFields
:: forall pgKind m
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
. ( MonadReader Bool m
, MonadWriter JoinTree m
, Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
)
=> Identifier
-> FieldName
-> SimilarArrayFields
-> AnnFields ('Postgres pgKind)
-> m (S.Alias, S.SQLExp)
processAnnFields sourcePrefix fieldAlias similarArrFields annFields = do
fieldExps <- forM annFields $ \(fieldName, field) ->
(fieldName,) <$>
case field of
AFExpression t -> pure $ S.SELit t
AFNodeId _ tn pKeys -> pure $ mkNodeId tn pKeys
AFColumn c -> toSQLCol c
server: support remote relationships on SQL Server and BigQuery (#1497) Remote relationships are now supported on SQL Server and BigQuery. The major change though is the re-architecture of remote join execution logic. Prior to this PR, each backend is responsible for processing the remote relationships that are part of their AST. This is not ideal as there is nothing specific about a remote join's execution that ties it to a backend. The only backend specific part is whether or not the specification of the remote relationship is valid (i.e, we'll need to validate whether the scalars are compatible). The approach now changes to this: 1. Before delegating the AST to the backend, we traverse the AST, collect all the remote joins while modifying the AST to add necessary join fields where needed. 1. Once the remote joins are collected from the AST, the database call is made to fetch the response. The necessary data for the remote join(s) is collected from the database's response and one or more remote schema calls are constructed as necessary. 1. The remote schema calls are then executed and the data from the database and from the remote schemas is joined to produce the final response. ### Known issues 1. Ideally the traversal of the IR to collect remote joins should return an AST which does not include remote join fields. This operation can be type safe but isn't taken up as part of the PR. 1. There is a lot of code duplication between `Transport/HTTP.hs` and `Transport/Websocket.hs` which needs to be fixed ASAP. This too hasn't been taken up by this PR. 1. The type which represents the execution plan is only modified to handle our current remote joins and as such it will have to be changed to accommodate general remote joins. 1. Use of lenses would have reduced the boilerplate code to collect remote joins from the base AST. 1. The current remote join logic assumes that the join columns of a remote relationship appear with their names in the database response. This however is incorrect as they could be aliased. This can be taken up by anyone, I've left a comment in the code. ### Notes to the reviewers I think it is best reviewed commit by commit. 1. The first one is very straight forward. 1. The second one refactors the remote join execution logic but other than moving things around, it doesn't change the user facing functionality. This moves Postgres specific parts to `Backends/Postgres` module from `Execute`. Some IR related code to `Hasura.RQL.IR` module. Simplifies various type class function signatures as a backend doesn't have to handle remote joins anymore 1. The third one fixes partial case matches that for some weird reason weren't shown as warnings before this refactor 1. The fourth one generalizes the validation logic of remote relationships and implements `scalarTypeGraphQLName` function on SQL Server and BigQuery which is used by the validation logic. This enables remote relationships on BigQuery and SQL Server. https://github.com/hasura/graphql-engine-mono/pull/1497 GitOrigin-RevId: 77dd8eed326602b16e9a8496f52f46d22b795598
2021-06-11 06:26:50 +03:00
-- this will be gone once the code which collects remote joins from the IR
-- emits a modified IR where remote relationships can't be reached
AFRemote _ -> pure $ S.SELit "null: remote field selected"
AFObjectRelation objSel -> withWriteObjectRelation $ do
let AnnRelationSelectG relName relMapping annObjSel = objSel
AnnObjectSelectG objAnnFields tableFrom tableFilter = annObjSel
objRelSourcePrefix = mkObjectRelationTableAlias sourcePrefix relName
sourcePrefixes = mkSourcePrefixes objRelSourcePrefix
annFieldsExtr <- processAnnFields (_pfThis sourcePrefixes) fieldName HM.empty objAnnFields
let selectSource = ObjectSelectSource (_pfThis sourcePrefixes)
(S.FISimple tableFrom Nothing)
(toSQLBoolExp (S.QualTable tableFrom) tableFilter)
objRelSource = ObjectRelationSource relName relMapping selectSource
pure ( objRelSource
, HM.fromList [annFieldsExtr]
, S.mkQIdenExp objRelSourcePrefix fieldName
)
AFArrayRelation arrSel -> do
let arrRelSourcePrefix = mkArrayRelationSourcePrefix sourcePrefix fieldAlias similarArrFields fieldName
arrRelAlias = mkArrayRelationAlias fieldAlias similarArrFields fieldName
processArrayRelation (mkSourcePrefixes arrRelSourcePrefix) fieldName arrRelAlias arrSel
pure $ S.mkQIdenExp arrRelSourcePrefix fieldName
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
AFComputedField _ (CFSScalar scalar caseBoolExpMaybe) -> do
computedFieldSQLExp <- fromScalarComputedField scalar
-- The computed field is conditionally outputed depending
-- on the presence of `caseBoolExpMaybe` and the value it
-- evaluates to. `caseBoolExpMaybe` will be set only in the
-- case of an inherited role.
-- See [SQL generation for inherited role]
case caseBoolExpMaybe of
Nothing -> pure computedFieldSQLExp
Just caseBoolExp ->
let boolExp = S.simplifyBoolExp $ toSQLBoolExp (S.QualifiedIdentifier baseTableIdentifier Nothing)
$ _accColCaseBoolExpField <$> caseBoolExp
in pure $ S.SECond boolExp computedFieldSQLExp S.SENull
AFComputedField _ (CFSTable selectTy sel) -> withWriteComputedFieldTableSet $ do
let computedFieldSourcePrefix =
mkComputedFieldTableAlias sourcePrefix fieldName
(selectSource, nodeExtractors) <-
processAnnSimpleSelect (mkSourcePrefixes computedFieldSourcePrefix)
fieldName PLSQNotRequired sel
let computedFieldTableSetSource =
ComputedFieldTableSetSource fieldName selectTy selectSource
pure ( computedFieldTableSetSource
, nodeExtractors
, S.mkQIdenExp computedFieldSourcePrefix fieldName
)
-- TODO: implement this
AFDBRemote _ -> error "FIXME"
pure $ annRowToJson @pgKind fieldAlias fieldExps
where
mkSourcePrefixes newPrefix = SourcePrefixes newPrefix sourcePrefix
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
baseTableIdentifier = mkBaseTableAlias sourcePrefix
toSQLCol :: AnnColumnField ('Postgres pgKind) S.SQLExp -> m S.SQLExp
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
toSQLCol (AnnColumnField col asText colOpM caseBoolExpMaybe) = do
strfyNum <- ask
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
let sqlExpression =
withColumnOp colOpM $
S.mkQIdenExp baseTableIdentifier $ pgiColumn col
finalSQLExpression =
-- Check out [SQL generation for inherited role]
case caseBoolExpMaybe of
Nothing -> sqlExpression
Just caseBoolExp ->
let boolExp =
S.simplifyBoolExp $ toSQLBoolExp (S.QualifiedIdentifier baseTableIdentifier Nothing) $
_accColCaseBoolExpField <$> caseBoolExp
in S.SECond boolExp sqlExpression S.SENull
pure $ toJSONableExp strfyNum (pgiType col) asText finalSQLExpression
fromScalarComputedField :: ComputedFieldScalarSelect ('Postgres pgKind) S.SQLExp -> m S.SQLExp
fromScalarComputedField computedFieldScalar = do
strfyNum <- ask
pure $ toJSONableExp strfyNum (ColumnScalar ty) False $ withColumnOp colOpM $
S.SEFunction $ S.FunctionExp fn (fromTableRowArgs sourcePrefix args) Nothing
where
ComputedFieldScalarSelect fn args ty colOpM = computedFieldScalar
withColumnOp :: Maybe (ColumnOp ('Postgres pgKind)) -> S.SQLExp -> S.SQLExp
withColumnOp colOpM sqlExp = case colOpM of
Nothing -> sqlExp
Just (ColumnOp opText cExp) -> S.mkSQLOpExp opText sqlExp cExp
mkNodeId :: QualifiedTable -> PrimaryKeyColumns ('Postgres pgKind) -> S.SQLExp
mkNodeId (QualifiedObject tableSchema tableName) pkeyColumns =
let columnInfoToSQLExp pgColumnInfo =
toJSONableExp False (pgiType pgColumnInfo) False $
S.mkQIdenExp (mkBaseTableAlias sourcePrefix) $ pgiColumn pgColumnInfo
-- See Note [Relay Node id].
in encodeBase64 $ flip S.SETyAnn S.textTypeAnn $ S.applyJsonBuildArray $
[ S.intToSQLExp $ nodeIdVersionInt currentNodeIdVersion
, S.SELit (getSchemaTxt tableSchema)
, S.SELit (toTxt tableName)
] <> map columnInfoToSQLExp (toList pkeyColumns)
injectJoinCond :: S.BoolExp -- ^ Join condition
-> S.BoolExp -- ^ Where condition
-> S.WhereFrag -- ^ New where frag
injectJoinCond joinCond whereCond =
S.WhereFrag $ S.simplifyBoolExp $ S.BEBin S.AndOp joinCond whereCond
mkJoinCond :: S.Alias -> HashMap PGCol PGCol -> S.BoolExp
mkJoinCond baseTablepfx colMapn =
foldl' (S.BEBin S.AndOp) (S.BELit True) $ flip map (HM.toList colMapn) $ \(lCol, rCol) ->
S.BECompare S.SEQ (S.mkQIdenExp baseTablepfx lCol) (S.mkSIdenExp rCol)
generateSQLSelect
:: S.BoolExp -- ^ Pre join condition
-> SelectSource
-> SelectNode
-> S.Select
generateSQLSelect joinCondition selectSource selectNode =
S.mkSelect
{ S.selExtr = [S.Extractor e $ Just a | (a, e) <- HM.toList extractors]
, S.selFrom = Just $ S.FromExp [joinedFrom]
, S.selOrderBy = maybeOrderby
, S.selLimit = S.LimitExp . S.intToSQLExp <$> maybeLimit
, S.selOffset = S.OffsetExp . S.int64ToSQLExp <$> maybeOffset
, S.selDistinct = maybeDistinct
}
where
SelectSource sourcePrefix fromItem maybeDistinct whereExp
maybeOrderby maybeLimit maybeOffset = selectSource
SelectNode extractors joinTree = selectNode
JoinTree objectRelations arrayRelations arrayConnections computedFields = joinTree
-- this is the table which is aliased as "sourcePrefix.base"
baseSelect = S.mkSelect
{ S.selExtr = [S.Extractor (S.SEStar Nothing) Nothing]
, S.selFrom = Just $ S.FromExp [fromItem]
, S.selWhere = Just $ injectJoinCond joinCondition whereExp
}
baseSelectAlias = S.Alias $ mkBaseTableAlias sourcePrefix
baseFromItem = S.mkSelFromItem baseSelect baseSelectAlias
-- function to create a joined from item from two from items
leftOuterJoin current new =
S.FIJoin $ S.JoinExpr current S.LeftOuter new $
S.JoinOn $ S.BELit True
-- this is the from eexp for the final select
joinedFrom :: S.FromItem
joinedFrom = foldl' leftOuterJoin baseFromItem $
map objectRelationToFromItem (HM.toList objectRelations) <>
map arrayRelationToFromItem (HM.toList arrayRelations) <>
map arrayConnectionToFromItem (HM.toList arrayConnections) <>
map computedFieldToFromItem (HM.toList computedFields)
objectRelationToFromItem
:: (ObjectRelationSource, SelectNode) -> S.FromItem
objectRelationToFromItem (objectRelationSource, node) =
let ObjectRelationSource _ colMapping objectSelectSource = objectRelationSource
alias = S.Alias $ _ossPrefix objectSelectSource
source = objectSelectSourceToSelectSource objectSelectSource
select = generateSQLSelect (mkJoinCond baseSelectAlias colMapping) source node
in S.mkLateralFromItem select alias
arrayRelationToFromItem
:: (ArrayRelationSource, ArraySelectNode) -> S.FromItem
arrayRelationToFromItem (arrayRelationSource, arraySelectNode) =
let ArrayRelationSource _ colMapping source = arrayRelationSource
alias = S.Alias $ _ssPrefix source
select = generateSQLSelectFromArrayNode source arraySelectNode $
mkJoinCond baseSelectAlias colMapping
in S.mkLateralFromItem select alias
arrayConnectionToFromItem
:: (ArrayConnectionSource, ArraySelectNode) -> S.FromItem
arrayConnectionToFromItem (arrayConnectionSource, arraySelectNode) =
let selectWith = connectionToSelectWith baseSelectAlias arrayConnectionSource arraySelectNode
alias = S.Alias $ _ssPrefix $ _acsSource arrayConnectionSource
in S.FISelectWith (S.Lateral True) selectWith alias
computedFieldToFromItem
:: (ComputedFieldTableSetSource, SelectNode) -> S.FromItem
computedFieldToFromItem (computedFieldTableSource, node) =
let ComputedFieldTableSetSource fieldName selectTy source = computedFieldTableSource
internalSelect = generateSQLSelect (S.BELit True) source node
extractor = asJsonAggExtr selectTy (S.toAlias fieldName) PLSQNotRequired $
_ssOrderBy source
alias = S.Alias $ _ssPrefix source
select = S.mkSelect
{ S.selExtr = [extractor]
, S.selFrom = Just $ S.FromExp [S.mkSelFromItem internalSelect alias]
}
in S.mkLateralFromItem select alias
generateSQLSelectFromArrayNode
:: SelectSource
-> ArraySelectNode
-> S.BoolExp
-> S.Select
generateSQLSelectFromArrayNode selectSource arraySelectNode joinCondition =
S.mkSelect
{ S.selExtr = topExtractors
, S.selFrom = Just $ S.FromExp [selectFrom]
}
where
ArraySelectNode topExtractors selectNode = arraySelectNode
selectFrom = S.mkSelFromItem
(generateSQLSelect joinCondition selectSource selectNode) $
S.Alias $ _ssPrefix selectSource
mkAggregateSelect
:: forall pgKind
. ( Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> AnnAggregateSelect ('Postgres pgKind)
-> S.Select
mkAggregateSelect annAggSel =
let ((selectSource, nodeExtractors, topExtractor), joinTree) =
runWriter $ flip runReaderT strfyNum $
processAnnAggregateSelect sourcePrefixes rootFieldName annAggSel
selectNode = SelectNode nodeExtractors joinTree
arrayNode = ArraySelectNode [topExtractor] selectNode
in prefixNumToAliases $
generateSQLSelectFromArrayNode selectSource arrayNode $ S.BELit True
where
strfyNum = _asnStrfyNum annAggSel
rootFieldName = FieldName "root"
rootIdentifier = toIdentifier rootFieldName
sourcePrefixes = SourcePrefixes rootIdentifier rootIdentifier
mkSQLSelect
:: forall pgKind
. ( Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> JsonAggSelect
-> AnnSimpleSelect ('Postgres pgKind)
-> S.Select
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
mkSQLSelect jsonAggSelect annSel =
let permLimitSubQuery = PLSQNotRequired
((selectSource, nodeExtractors), joinTree) =
runWriter $ flip runReaderT strfyNum $
processAnnSimpleSelect sourcePrefixes rootFldName permLimitSubQuery annSel
selectNode = SelectNode nodeExtractors joinTree
topExtractor = asJsonAggExtr jsonAggSelect rootFldAls permLimitSubQuery
$ _ssOrderBy selectSource
arrayNode = ArraySelectNode [topExtractor] selectNode
in prefixNumToAliases $
generateSQLSelectFromArrayNode selectSource arrayNode $ S.BELit True
where
strfyNum = _asnStrfyNum annSel
rootFldIdentifier = toIdentifier rootFldName
sourcePrefixes = SourcePrefixes rootFldIdentifier rootFldIdentifier
rootFldName = FieldName "root"
rootFldAls = S.Alias $ toIdentifier rootFldName
mkConnectionSelect
:: forall pgKind
. ( Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> ConnectionSelect ('Postgres pgKind) (Const Void) S.SQLExp
-> S.SelectWithG S.Select
mkConnectionSelect connectionSelect =
let ((connectionSource, topExtractor, nodeExtractors), joinTree) =
runWriter $ flip runReaderT strfyNum $
processConnectionSelect sourcePrefixes rootFieldName
(S.Alias rootIdentifier) mempty connectionSelect
selectNode = ArraySelectNode [topExtractor] $
SelectNode nodeExtractors joinTree
in prefixNumToAliasesSelectWith $
connectionToSelectWith (S.Alias rootIdentifier) connectionSource selectNode
where
strfyNum = _asnStrfyNum $ _csSelect connectionSelect
rootFieldName = FieldName "root"
rootIdentifier = toIdentifier rootFieldName
sourcePrefixes = SourcePrefixes rootIdentifier rootIdentifier
-- | First element extractor expression from given record set
-- For example:- To get first "id" column from given row set,
-- the function generates the SQL expression AS `(array_agg("id"))[1]`
mkFirstElementExp :: S.SQLExp -> S.SQLExp
mkFirstElementExp expIdentifier =
-- For Example
S.SEArrayIndex (S.SEFnApp "array_agg" [expIdentifier] Nothing) (S.intToSQLExp 1)
-- | Last element extractor expression from given record set.
-- For example:- To get first "id" column from given row set,
-- the function generates the SQL expression AS `(array_agg("id"))[array_length(array_agg("id"), 1)]`
mkLastElementExp :: S.SQLExp -> S.SQLExp
mkLastElementExp expIdentifier =
let arrayExp = S.SEFnApp "array_agg" [expIdentifier] Nothing
in S.SEArrayIndex arrayExp $
S.SEFnApp "array_length" [arrayExp, S.intToSQLExp 1] Nothing
cursorIdentifier :: Identifier
cursorIdentifier = Identifier "__cursor"
startCursorIdentifier :: Identifier
startCursorIdentifier = Identifier "__start_cursor"
endCursorIdentifier :: Identifier
endCursorIdentifier = Identifier "__end_cursor"
hasPreviousPageIdentifier :: Identifier
hasPreviousPageIdentifier = Identifier "__has_previous_page"
hasNextPageIdentifier :: Identifier
hasNextPageIdentifier = Identifier "__has_next_page"
pageInfoSelectAliasIdentifier :: Identifier
pageInfoSelectAliasIdentifier = Identifier "__page_info"
cursorsSelectAliasIdentifier :: Identifier
cursorsSelectAliasIdentifier = Identifier "__cursors_select"
encodeBase64 :: S.SQLExp -> S.SQLExp
encodeBase64 =
removeNewline . bytesToBase64Text . convertToBytes
where
convertToBytes e =
S.SEFnApp "convert_to" [e, S.SELit "UTF8"] Nothing
bytesToBase64Text e =
S.SEFnApp "encode" [e, S.SELit "base64"] Nothing
removeNewline e =
S.SEFnApp "regexp_replace" [e, S.SELit "\\n", S.SELit "", S.SELit "g"] Nothing
processConnectionSelect
:: forall pgKind m
. ( MonadReader Bool m
, MonadWriter JoinTree m
, Backend ('Postgres pgKind)
, PostgresAnnotatedFieldJSON pgKind
)
=> SourcePrefixes
-> FieldName
-> S.Alias
-> HM.HashMap PGCol PGCol
-> ConnectionSelect ('Postgres pgKind) (Const Void) S.SQLExp
-> m ( ArrayConnectionSource
, S.Extractor
, HM.HashMap S.Alias S.SQLExp
)
processConnectionSelect sourcePrefixes fieldAlias relAlias colMapping connectionSelect = do
(selectSource, orderByAndDistinctExtrs, maybeOrderByCursor) <-
processSelectParams sourcePrefixes fieldAlias similarArrayFields selectFrom
permLimitSubQuery tablePermissions tableArgs
let mkCursorExtractor = (S.Alias cursorIdentifier,) . (`S.SETyAnn` S.textTypeAnn)
cursorExtractors = case maybeOrderByCursor of
Just orderByCursor -> [mkCursorExtractor orderByCursor]
Nothing ->
-- Extract primary key columns from base select along with cursor expression.
-- Those columns are required to perform connection split via a WHERE clause.
mkCursorExtractor primaryKeyColumnsObjectExp : primaryKeyColumnExtractors
orderByExp = _ssOrderBy selectSource
(topExtractorExp, exps) <- flip runStateT [] $ processFields orderByExp
let topExtractor = S.Extractor topExtractorExp $ Just $ S.Alias fieldIdentifier
allExtractors = HM.fromList $ cursorExtractors <> exps <> orderByAndDistinctExtrs
arrayConnectionSource = ArrayConnectionSource relAlias colMapping
(mkSplitBoolExp <$> maybeSplit) maybeSlice selectSource
pure ( arrayConnectionSource
, topExtractor
, allExtractors
)
where
ConnectionSelect _ primaryKeyColumns maybeSplit maybeSlice select = connectionSelect
AnnSelectG fields selectFrom tablePermissions tableArgs _ = select
fieldIdentifier = toIdentifier fieldAlias
thisPrefix = _pfThis sourcePrefixes
permLimitSubQuery = PLSQNotRequired
primaryKeyColumnsObjectExp =
S.applyJsonBuildObj $ flip concatMap (toList primaryKeyColumns) $
\pgColumnInfo ->
[ S.SELit $ getPGColTxt $ pgiColumn pgColumnInfo
, toJSONableExp False (pgiType pgColumnInfo) False $
S.mkQIdenExp (mkBaseTableAlias thisPrefix) $ pgiColumn pgColumnInfo
]
primaryKeyColumnExtractors =
flip map (toList primaryKeyColumns) $
\pgColumnInfo ->
let pgColumn = pgiColumn pgColumnInfo
in ( S.Alias $ mkBaseTableColumnAlias thisPrefix pgColumn
, S.mkQIdenExp (mkBaseTableAlias thisPrefix) pgColumn
)
mkSplitBoolExp (firstSplit NE.:| rest) =
S.BEBin S.OrOp (mkSplitCompareExp firstSplit) $ mkBoolExpFromRest firstSplit rest
where
mkBoolExpFromRest previousSplit =
S.BEBin S.AndOp (mkEqualityCompareExp previousSplit) . \case
[] -> S.BELit False
(thisSplit:remainingSplit) -> mkSplitBoolExp (thisSplit NE.:| remainingSplit)
mkSplitCompareExp (ConnectionSplit kind v (OrderByItemG obTyM obCol _)) =
let obAlias = mkAnnOrderByAlias thisPrefix fieldAlias similarArrayFields obCol
obTy = fromMaybe S.OTAsc obTyM
compareOp = case (kind, obTy) of
(CSKAfter, S.OTAsc) -> S.SGT
(CSKAfter, S.OTDesc) -> S.SLT
(CSKBefore, S.OTAsc) -> S.SLT
(CSKBefore, S.OTDesc) -> S.SGT
in S.BECompare compareOp (S.SEIdentifier $ toIdentifier obAlias) v
mkEqualityCompareExp (ConnectionSplit _ v orderByItem) =
let obAlias = mkAnnOrderByAlias thisPrefix fieldAlias similarArrayFields $
obiColumn orderByItem
in S.BECompare S.SEQ (S.SEIdentifier $ toIdentifier obAlias) v
similarArrayFields = HM.unions $
flip map (map snd fields) $ \case
ConnectionTypename{} -> mempty
ConnectionPageInfo{} -> mempty
ConnectionEdges edges -> HM.unions $
flip map (map snd edges) $ \case
EdgeTypename{} -> mempty
EdgeCursor{} -> mempty
EdgeNode annFields ->
mkSimilarArrayFields annFields $ _saOrderBy tableArgs
mkSimpleJsonAgg rowExp ob =
let jsonAggExp = S.SEFnApp "json_agg" [rowExp] ob
in S.SEFnApp "coalesce" [jsonAggExp, S.SELit "[]"] Nothing
processFields
:: forall n
. ( MonadReader Bool n
, MonadWriter JoinTree n
, MonadState [(S.Alias, S.SQLExp)] n
)
=> Maybe S.OrderByExp -> n S.SQLExp
processFields orderByExp =
fmap (S.applyJsonBuildObj . concat) $
forM fields $
\(FieldName fieldText, field) -> (S.SELit fieldText:) . pure <$>
case field of
ConnectionTypename t -> pure $ withForceAggregation S.textTypeAnn $ S.SELit t
ConnectionPageInfo pageInfoFields -> pure $ processPageInfoFields pageInfoFields
ConnectionEdges edges ->
fmap (flip mkSimpleJsonAgg orderByExp . S.applyJsonBuildObj . concat) $ forM edges $
\(FieldName edgeText, edge) -> (S.SELit edgeText:) . pure <$>
case edge of
EdgeTypename t -> pure $ S.SELit t
EdgeCursor -> pure $ encodeBase64 $ S.SEIdentifier (toIdentifier cursorIdentifier)
EdgeNode annFields -> do
let edgeFieldName = FieldName $
getFieldNameTxt fieldAlias <> "." <> fieldText <> "." <> edgeText
edgeFieldIdentifier = toIdentifier edgeFieldName
annFieldsExtrExp <- processAnnFields thisPrefix edgeFieldName similarArrayFields annFields
modify' (<> [annFieldsExtrExp])
pure $ S.SEIdentifier edgeFieldIdentifier
processPageInfoFields infoFields =
S.applyJsonBuildObj $ flip concatMap infoFields $
\(FieldName fieldText, field) -> (:) (S.SELit fieldText) $ pure case field of
PageInfoTypename t -> withForceAggregation S.textTypeAnn $ S.SELit t
PageInfoHasNextPage -> withForceAggregation S.boolTypeAnn $
mkSingleFieldSelect (S.SEIdentifier hasNextPageIdentifier) pageInfoSelectAliasIdentifier
PageInfoHasPreviousPage -> withForceAggregation S.boolTypeAnn $
mkSingleFieldSelect (S.SEIdentifier hasPreviousPageIdentifier) pageInfoSelectAliasIdentifier
PageInfoStartCursor -> withForceAggregation S.textTypeAnn $
encodeBase64 $ mkSingleFieldSelect (S.SEIdentifier startCursorIdentifier) cursorsSelectAliasIdentifier
PageInfoEndCursor -> withForceAggregation S.textTypeAnn $
encodeBase64 $ mkSingleFieldSelect (S.SEIdentifier endCursorIdentifier) cursorsSelectAliasIdentifier
where
mkSingleFieldSelect field fromIdentifier = S.SESelect
S.mkSelect { S.selExtr = [S.Extractor field Nothing]
, S.selFrom = Just $ S.FromExp [S.FIIdentifier fromIdentifier]
}
connectionToSelectWith
:: S.Alias
-> ArrayConnectionSource
-> ArraySelectNode
-> S.SelectWithG S.Select
connectionToSelectWith baseSelectAlias arrayConnectionSource arraySelectNode =
let extractionSelect = S.mkSelect
{ S.selExtr = topExtractors
, S.selFrom = Just $ S.FromExp [S.FIIdentifier finalSelectIdentifier]
}
in S.SelectWith fromBaseSelections extractionSelect
where
ArrayConnectionSource _ columnMapping maybeSplit maybeSlice selectSource =
arrayConnectionSource
ArraySelectNode topExtractors selectNode = arraySelectNode
baseSelectIdentifier = Identifier "__base_select"
splitSelectIdentifier = Identifier "__split_select"
sliceSelectIdentifier = Identifier "__slice_select"
finalSelectIdentifier = Identifier "__final_select"
rowNumberIdentifier = Identifier "__row_number"
rowNumberExp = S.SEUnsafe "(row_number() over (partition by 1))"
startRowNumberIdentifier = Identifier "__start_row_number"
endRowNumberIdentifier = Identifier "__end_row_number"
startCursorExp = mkFirstElementExp $ S.SEIdentifier cursorIdentifier
endCursorExp = mkLastElementExp $ S.SEIdentifier cursorIdentifier
startRowNumberExp = mkFirstElementExp $ S.SEIdentifier rowNumberIdentifier
endRowNumberExp = mkLastElementExp $ S.SEIdentifier rowNumberIdentifier
fromBaseSelections =
let joinCond = mkJoinCond baseSelectAlias columnMapping
baseSelectFrom = S.mkSelFromItem
(generateSQLSelect joinCond selectSource selectNode)
$ S.Alias $ _ssPrefix selectSource
select =
S.mkSelect { S.selExtr = [ S.selectStar
, S.Extractor rowNumberExp $ Just $ S.Alias rowNumberIdentifier
]
, S.selFrom = Just $ S.FromExp [baseSelectFrom]
}
in (S.Alias baseSelectIdentifier, select):fromSplitSelection
mkStarSelect fromIdentifier =
S.mkSelect { S.selExtr = [S.selectStar]
, S.selFrom = Just $ S.FromExp [S.FIIdentifier fromIdentifier]
}
fromSplitSelection = case maybeSplit of
Nothing -> fromSliceSelection baseSelectIdentifier
Just splitBool ->
let select =
(mkStarSelect baseSelectIdentifier){S.selWhere = Just $ S.WhereFrag splitBool}
in (S.Alias splitSelectIdentifier, select):fromSliceSelection splitSelectIdentifier
fromSliceSelection prevSelect = case maybeSlice of
Nothing -> fromFinalSelect prevSelect
Just slice ->
let select = case slice of
SliceFirst limit ->
(mkStarSelect prevSelect)
{S.selLimit = (Just . S.LimitExp . S.intToSQLExp) limit}
SliceLast limit ->
let mkRowNumberOrderBy obType =
let orderByItem =
S.OrderByItem (S.SEIdentifier rowNumberIdentifier) (Just obType) Nothing
in S.OrderByExp $ orderByItem NE.:| []
sliceLastSelect = (mkStarSelect prevSelect)
{ S.selLimit = (Just . S.LimitExp . S.intToSQLExp) limit
, S.selOrderBy = Just $ mkRowNumberOrderBy S.OTDesc
}
sliceLastSelectFrom =
S.mkSelFromItem sliceLastSelect $ S.Alias sliceSelectIdentifier
in S.mkSelect { S.selExtr = [S.selectStar]
, S.selFrom = Just $ S.FromExp [sliceLastSelectFrom]
, S.selOrderBy = Just $ mkRowNumberOrderBy S.OTAsc
}
in (S.Alias sliceSelectIdentifier, select):fromFinalSelect sliceSelectIdentifier
fromFinalSelect prevSelect =
let select = mkStarSelect prevSelect
in (S.Alias finalSelectIdentifier, select):fromCursorSelection
fromCursorSelection =
let extrs = [ S.Extractor startCursorExp $ Just $ S.Alias startCursorIdentifier
, S.Extractor endCursorExp $ Just $ S.Alias endCursorIdentifier
, S.Extractor startRowNumberExp $ Just $ S.Alias startRowNumberIdentifier
, S.Extractor endRowNumberExp $ Just $ S.Alias endRowNumberIdentifier
]
select =
S.mkSelect { S.selExtr = extrs
, S.selFrom = Just $ S.FromExp [S.FIIdentifier finalSelectIdentifier]
}
in (S.Alias cursorsSelectAliasIdentifier, select):fromPageInfoSelection
fromPageInfoSelection =
let hasPrevPage = S.SEBool $
S.mkExists (S.FIIdentifier baseSelectIdentifier) $
S.BECompare S.SLT (S.SEIdentifier rowNumberIdentifier) $
S.SESelect $ S.mkSelect { S.selFrom = Just $ S.FromExp [S.FIIdentifier cursorsSelectAliasIdentifier]
, S.selExtr = [S.Extractor (S.SEIdentifier startRowNumberIdentifier) Nothing]
}
hasNextPage = S.SEBool $
S.mkExists (S.FIIdentifier baseSelectIdentifier) $
S.BECompare S.SGT (S.SEIdentifier rowNumberIdentifier) $
S.SESelect $ S.mkSelect { S.selFrom = Just $ S.FromExp [S.FIIdentifier cursorsSelectAliasIdentifier]
, S.selExtr = [S.Extractor (S.SEIdentifier endRowNumberIdentifier) Nothing]
}
select =
S.mkSelect { S.selExtr = [ S.Extractor hasPrevPage $ Just $ S.Alias hasPreviousPageIdentifier
, S.Extractor hasNextPage $ Just $ S.Alias hasNextPageIdentifier
]
}
in pure (S.Alias pageInfoSelectAliasIdentifier, select)