2021-04-12 13:18:29 +03:00
|
|
|
-- | Translate from the DML to the BigQuery dialect.
|
|
|
|
module Hasura.Backends.BigQuery.FromIr
|
2021-09-24 01:56:37 +03:00
|
|
|
( fromSelectRows,
|
|
|
|
mkSQLSelect,
|
|
|
|
fromRootField,
|
|
|
|
fromSelectAggregate,
|
|
|
|
Error (..),
|
|
|
|
runFromIr,
|
|
|
|
FromIr,
|
|
|
|
FromIrConfig (..),
|
|
|
|
defaultFromIrConfig,
|
|
|
|
bigQuerySourceConfigToFromIrConfig,
|
|
|
|
Top (..), -- Re-export for FromIrConfig.
|
|
|
|
)
|
|
|
|
where
|
|
|
|
|
|
|
|
import Control.Monad.Validate
|
|
|
|
import Data.HashMap.Strict qualified as HM
|
|
|
|
import Data.List.NonEmpty qualified as NE
|
|
|
|
import Data.Map.Strict (Map)
|
|
|
|
import Data.Map.Strict qualified as M
|
|
|
|
import Data.Proxy
|
|
|
|
import Data.Text qualified as T
|
|
|
|
import Hasura.Backends.BigQuery.Instances.Types ()
|
|
|
|
import Hasura.Backends.BigQuery.Source (BigQuerySourceConfig (..))
|
|
|
|
import Hasura.Backends.BigQuery.Types as BigQuery
|
|
|
|
import Hasura.Prelude
|
|
|
|
import Hasura.RQL.IR qualified as Ir
|
|
|
|
import Hasura.RQL.Types.Column qualified as Rql
|
|
|
|
import Hasura.RQL.Types.Common qualified as Rql
|
2021-12-01 07:53:34 +03:00
|
|
|
import Hasura.RQL.Types.Relationships.Local qualified as Rql
|
2021-09-24 01:56:37 +03:00
|
|
|
import Hasura.SQL.Backend
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Types
|
|
|
|
|
|
|
|
-- | Most of these errors should be checked for legitimacy.
|
|
|
|
data Error
|
|
|
|
= FromTypeUnsupported (Ir.SelectFromG 'BigQuery Expression)
|
|
|
|
| NoOrderSpecifiedInOrderBy
|
|
|
|
| MalformedAgg
|
2021-12-07 16:12:02 +03:00
|
|
|
| FieldTypeUnsupportedForNow (Ir.AnnFieldG 'BigQuery Void Expression)
|
|
|
|
| AggTypeUnsupportedForNow (Ir.TableAggregateFieldG 'BigQuery Void Expression)
|
|
|
|
| NodesUnsupportedForNow (Ir.TableAggregateFieldG 'BigQuery Void Expression)
|
2021-04-12 13:18:29 +03:00
|
|
|
| NoProjectionFields
|
|
|
|
| NoAggregatesMustBeABug
|
2021-12-07 16:12:02 +03:00
|
|
|
| UnsupportedArraySelect (Ir.ArraySelectG 'BigQuery Void Expression)
|
2021-04-12 13:18:29 +03:00
|
|
|
| UnsupportedOpExpG (Ir.OpExpG 'BigQuery Expression)
|
|
|
|
| UnsupportedSQLExp Expression
|
|
|
|
| UnsupportedDistinctOn
|
|
|
|
| InvalidIntegerishSql Expression
|
|
|
|
| DistinctIsn'tSupported
|
|
|
|
| ConnectionsNotSupported
|
|
|
|
| ActionsNotSupported
|
|
|
|
|
|
|
|
instance Show Error where
|
|
|
|
show =
|
|
|
|
\case
|
2021-09-24 01:56:37 +03:00
|
|
|
FromTypeUnsupported {} -> "FromTypeUnsupported"
|
|
|
|
NoOrderSpecifiedInOrderBy {} -> "NoOrderSpecifiedInOrderBy"
|
|
|
|
MalformedAgg {} -> "MalformedAgg"
|
2021-04-12 13:18:29 +03:00
|
|
|
FieldTypeUnsupportedForNow {} -> "FieldTypeUnsupportedForNow"
|
2021-09-24 01:56:37 +03:00
|
|
|
AggTypeUnsupportedForNow {} -> "AggTypeUnsupportedForNow"
|
|
|
|
NodesUnsupportedForNow {} -> "NodesUnsupportedForNow"
|
|
|
|
NoProjectionFields {} -> "NoProjectionFields"
|
|
|
|
NoAggregatesMustBeABug {} -> "NoAggregatesMustBeABug"
|
|
|
|
UnsupportedArraySelect {} -> "UnsupportedArraySelect"
|
|
|
|
UnsupportedOpExpG {} -> "UnsupportedOpExpG"
|
|
|
|
UnsupportedSQLExp {} -> "UnsupportedSQLExp"
|
|
|
|
UnsupportedDistinctOn {} -> "UnsupportedDistinctOn"
|
|
|
|
InvalidIntegerishSql {} -> "InvalidIntegerishSql"
|
|
|
|
DistinctIsn'tSupported {} -> "DistinctIsn'tSupported"
|
|
|
|
ConnectionsNotSupported {} -> "ConnectionsNotSupported"
|
|
|
|
ActionsNotSupported {} -> "ActionsNotSupported"
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- | The base monad used throughout this module for all conversion
|
|
|
|
-- functions.
|
|
|
|
--
|
|
|
|
-- It's a Validate, so it'll continue going when it encounters errors
|
|
|
|
-- to accumulate as many as possible.
|
|
|
|
--
|
|
|
|
-- It also contains a mapping from entity prefixes to counters. So if
|
|
|
|
-- my prefix is "table" then there'll be a counter that lets me
|
|
|
|
-- generate table1, table2, etc. Same for any other prefix needed
|
|
|
|
-- (e.g. names for joins).
|
|
|
|
--
|
|
|
|
-- A ReaderT is used around this in most of the module too, for
|
|
|
|
-- setting the current entity that a given field name refers to. See
|
|
|
|
-- @fromPGCol@.
|
|
|
|
newtype FromIr a = FromIr
|
2021-06-25 16:35:39 +03:00
|
|
|
{ unFromIr :: ReaderT FromIrReader (StateT FromIrState (Validate (NonEmpty Error))) a
|
2021-09-24 01:56:37 +03:00
|
|
|
}
|
|
|
|
deriving (Functor, Applicative, Monad, MonadValidate (NonEmpty Error))
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-06-25 16:35:39 +03:00
|
|
|
data FromIrState = FromIrState
|
|
|
|
{ indices :: !(Map Text Int)
|
|
|
|
}
|
|
|
|
|
|
|
|
data FromIrReader = FromIrReader
|
|
|
|
{ config :: !FromIrConfig
|
|
|
|
}
|
|
|
|
|
|
|
|
-- | Config values for the from-IR translator.
|
|
|
|
data FromIrConfig = FromIrConfig
|
2021-09-24 01:56:37 +03:00
|
|
|
{ -- | Applies globally to all selects, and may be reduced to
|
2021-06-25 16:35:39 +03:00
|
|
|
-- something even smaller by permission/user args.
|
2021-09-24 01:56:37 +03:00
|
|
|
globalSelectLimit :: !Top
|
2021-06-25 16:35:39 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
-- | A default config.
|
|
|
|
defaultFromIrConfig :: FromIrConfig
|
|
|
|
defaultFromIrConfig = FromIrConfig {globalSelectLimit = NoTop}
|
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
data StringifyNumbers
|
|
|
|
= StringifyNumbers
|
|
|
|
| LeaveNumbersAlone
|
|
|
|
deriving (Eq)
|
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Runners
|
|
|
|
|
2021-06-25 16:35:39 +03:00
|
|
|
runFromIr :: FromIrConfig -> FromIr a -> Validate (NonEmpty Error) a
|
|
|
|
runFromIr config fromIr =
|
|
|
|
evalStateT
|
|
|
|
(runReaderT (unFromIr fromIr) (FromIrReader {config}))
|
|
|
|
(FromIrState {indices = mempty})
|
|
|
|
|
|
|
|
bigQuerySourceConfigToFromIrConfig :: BigQuerySourceConfig -> FromIrConfig
|
|
|
|
bigQuerySourceConfigToFromIrConfig BigQuerySourceConfig {_scGlobalSelectLimit} =
|
2021-07-19 14:39:22 +03:00
|
|
|
FromIrConfig {globalSelectLimit = Top _scGlobalSelectLimit}
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Similar rendition of old API
|
|
|
|
|
2021-06-15 11:58:21 +03:00
|
|
|
-- | Here is where we apply a top-level annotation to the select to
|
|
|
|
-- indicate to the data loader that this select ought to produce a
|
|
|
|
-- single object or an array.
|
2021-09-24 01:56:37 +03:00
|
|
|
mkSQLSelect ::
|
|
|
|
Rql.JsonAggSelect ->
|
2021-12-07 16:12:02 +03:00
|
|
|
Ir.AnnSelectG 'BigQuery Void (Ir.AnnFieldG 'BigQuery Void) Expression ->
|
2021-09-24 01:56:37 +03:00
|
|
|
FromIr BigQuery.Select
|
2021-06-15 11:58:21 +03:00
|
|
|
mkSQLSelect jsonAggSelect annSimpleSel = do
|
|
|
|
select <- fromSelectRows annSimpleSel
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( select
|
|
|
|
{ selectCardinality =
|
|
|
|
case jsonAggSelect of
|
|
|
|
Rql.JASMultipleRows -> Many
|
|
|
|
Rql.JASSingleObject -> One
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- | Convert from the IR database query into a select.
|
2021-12-07 16:12:02 +03:00
|
|
|
fromRootField :: Ir.QueryDB 'BigQuery Void Expression -> FromIr Select
|
2021-04-12 13:18:29 +03:00
|
|
|
fromRootField =
|
|
|
|
\case
|
2021-09-24 01:56:37 +03:00
|
|
|
(Ir.QDBSingleRow s) -> mkSQLSelect Rql.JASSingleObject s
|
2021-06-11 06:26:50 +03:00
|
|
|
(Ir.QDBMultipleRows s) -> mkSQLSelect Rql.JASMultipleRows s
|
2021-09-24 01:56:37 +03:00
|
|
|
(Ir.QDBAggregation s) -> fromSelectAggregate Nothing s
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Top-level exported functions
|
|
|
|
|
2021-11-24 19:21:59 +03:00
|
|
|
fromUnnestedJSON :: Expression -> [(ColumnName, ScalarType)] -> [Rql.FieldName] -> FromIr From
|
|
|
|
fromUnnestedJSON json columns _fields = do
|
|
|
|
alias <- generateEntityAlias UnnestTemplate
|
|
|
|
pure
|
|
|
|
( FromSelectJson
|
|
|
|
( Aliased
|
|
|
|
{ aliasedThing =
|
|
|
|
SelectJson
|
|
|
|
{ selectJsonBody = json,
|
|
|
|
selectJsonFields = columns
|
|
|
|
},
|
|
|
|
aliasedAlias = entityAliasText alias
|
|
|
|
}
|
|
|
|
)
|
|
|
|
)
|
|
|
|
|
2021-12-07 16:12:02 +03:00
|
|
|
fromSelectRows :: Ir.AnnSelectG 'BigQuery Void (Ir.AnnFieldG 'BigQuery Void) Expression -> FromIr BigQuery.Select
|
2021-04-12 13:18:29 +03:00
|
|
|
fromSelectRows annSelectG = do
|
|
|
|
selectFrom <-
|
|
|
|
case from of
|
|
|
|
Ir.FromTable qualifiedObject -> fromQualifiedTable qualifiedObject
|
2021-11-24 19:21:59 +03:00
|
|
|
Ir.FromFunction nm (Ir.FunctionArgsExp [Ir.AEInput json] _) (Just columns)
|
|
|
|
| nm == FunctionName "unnest" -> fromUnnestedJSON json columns (map fst fields)
|
2021-09-24 01:56:37 +03:00
|
|
|
_ -> refute (pure (FromTypeUnsupported from))
|
|
|
|
Args
|
|
|
|
{ argsOrderBy,
|
|
|
|
argsWhere,
|
|
|
|
argsJoins,
|
|
|
|
argsTop,
|
|
|
|
argsDistinct = Proxy,
|
|
|
|
argsOffset,
|
|
|
|
argsExistingJoins
|
|
|
|
} <-
|
|
|
|
runReaderT (fromSelectArgsG args) (fromAlias selectFrom)
|
2021-04-12 13:18:29 +03:00
|
|
|
fieldSources <-
|
|
|
|
runReaderT
|
|
|
|
(traverse (fromAnnFieldsG argsExistingJoins stringifyNumbers) fields)
|
|
|
|
(fromAlias selectFrom)
|
|
|
|
filterExpression <-
|
|
|
|
runReaderT (fromAnnBoolExp permFilter) (fromAlias selectFrom)
|
|
|
|
selectProjections <-
|
2021-07-08 18:41:59 +03:00
|
|
|
NE.nonEmpty (concatMap (toList . fieldSourceProjections) fieldSources)
|
|
|
|
`onNothing` refute (pure NoProjectionFields)
|
2021-06-25 16:35:39 +03:00
|
|
|
globalTop <- getGlobalTop
|
2021-04-12 13:18:29 +03:00
|
|
|
pure
|
|
|
|
Select
|
2021-09-24 01:56:37 +03:00
|
|
|
{ selectCardinality = Many,
|
|
|
|
selectFinalWantedFields = pure (fieldTextNames fields),
|
|
|
|
selectGroupBy = mempty,
|
|
|
|
selectOrderBy = argsOrderBy,
|
|
|
|
-- We DO APPLY the global top here, because this pulls down all rows.
|
|
|
|
selectTop = globalTop <> permissionBasedTop <> argsTop,
|
|
|
|
selectProjections,
|
|
|
|
selectFrom,
|
|
|
|
selectJoins = argsJoins <> mapMaybe fieldSourceJoin fieldSources,
|
|
|
|
selectWhere = argsWhere <> Where [filterExpression],
|
|
|
|
selectOffset = argsOffset
|
2021-04-12 13:18:29 +03:00
|
|
|
}
|
|
|
|
where
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnSelectG
|
|
|
|
{ _asnFields = fields,
|
|
|
|
_asnFrom = from,
|
|
|
|
_asnPerm = perm,
|
|
|
|
_asnArgs = args,
|
|
|
|
_asnStrfyNum = num
|
|
|
|
} = annSelectG
|
2021-04-12 13:18:29 +03:00
|
|
|
Ir.TablePerm {_tpLimit = mPermLimit, _tpFilter = permFilter} = perm
|
|
|
|
permissionBasedTop =
|
2021-05-18 16:06:42 +03:00
|
|
|
maybe NoTop Top mPermLimit
|
2021-04-12 13:18:29 +03:00
|
|
|
stringifyNumbers =
|
|
|
|
if num
|
|
|
|
then StringifyNumbers
|
|
|
|
else LeaveNumbersAlone
|
|
|
|
|
2021-09-24 01:56:37 +03:00
|
|
|
fromSelectAggregate ::
|
|
|
|
Maybe (EntityAlias, HashMap ColumnName ColumnName) ->
|
2021-12-07 16:12:02 +03:00
|
|
|
Ir.AnnSelectG 'BigQuery Void (Ir.TableAggregateFieldG 'BigQuery Void) Expression ->
|
2021-09-24 01:56:37 +03:00
|
|
|
FromIr BigQuery.Select
|
Bigquery/fix limit offset for array aggregates
Blocked on https://github.com/hasura/graphql-engine-mono/pull/1640.
While fiddling with BigQuery I noticed a severe issue with offset/limit for array-aggregates. I've fixed it now.
The basic problem was that I was using a query like this:
```graphql
query MyQuery {
hasura_Artist(order_by: {artist_self_id: asc}) {
artist_self_id
albums_aggregate(order_by: {album_self_id: asc}, limit: 2) {
nodes {
album_self_id
}
aggregate {
count
}
}
}
}
```
Producing this SQL:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
-- PROBLEM HERE
LIMIT @param0) AS `t_Album1`
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
Note the `LIMIT @param0` -- that is incorrect because we want to limit
per artist. Instead, we want:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *,
-- ADDED
ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
) AS `t_Album1`
-- CHANGED
WHERE artist_album_index <= @param
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
That serves both the LIMIT/OFFSET function in the where clause. Then,
both the ARRAY_AGG and the COUNT are correct per artist.
I've updated my Haskell test suite to add regression tests for this. I'll push a commit for Python tests shortly. The tests still pass there.
This just fixes a case that we hadn't noticed.
https://github.com/hasura/graphql-engine-mono/pull/1641
GitOrigin-RevId: 49933fa5e09a9306c89565743ecccf2cb54eaa80
2021-07-06 11:28:42 +03:00
|
|
|
fromSelectAggregate minnerJoinFields annSelectG = do
|
2021-04-12 13:18:29 +03:00
|
|
|
selectFrom <-
|
|
|
|
case from of
|
|
|
|
Ir.FromTable qualifiedObject -> fromQualifiedTable qualifiedObject
|
2021-09-24 01:56:37 +03:00
|
|
|
_ -> refute (pure (FromTypeUnsupported from))
|
2021-06-15 11:58:21 +03:00
|
|
|
args'@Args {argsWhere, argsOrderBy, argsJoins, argsTop, argsOffset, argsDistinct = Proxy} <-
|
|
|
|
runReaderT (fromSelectArgsG args) (fromAlias selectFrom)
|
2021-04-12 13:18:29 +03:00
|
|
|
filterExpression <-
|
|
|
|
runReaderT (fromAnnBoolExp permFilter) (fromAlias selectFrom)
|
Bigquery/fix limit offset for array aggregates
Blocked on https://github.com/hasura/graphql-engine-mono/pull/1640.
While fiddling with BigQuery I noticed a severe issue with offset/limit for array-aggregates. I've fixed it now.
The basic problem was that I was using a query like this:
```graphql
query MyQuery {
hasura_Artist(order_by: {artist_self_id: asc}) {
artist_self_id
albums_aggregate(order_by: {album_self_id: asc}, limit: 2) {
nodes {
album_self_id
}
aggregate {
count
}
}
}
}
```
Producing this SQL:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
-- PROBLEM HERE
LIMIT @param0) AS `t_Album1`
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
Note the `LIMIT @param0` -- that is incorrect because we want to limit
per artist. Instead, we want:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *,
-- ADDED
ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
) AS `t_Album1`
-- CHANGED
WHERE artist_album_index <= @param
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
That serves both the LIMIT/OFFSET function in the where clause. Then,
both the ARRAY_AGG and the COUNT are correct per artist.
I've updated my Haskell test suite to add regression tests for this. I'll push a commit for Python tests shortly. The tests still pass there.
This just fixes a case that we hadn't noticed.
https://github.com/hasura/graphql-engine-mono/pull/1641
GitOrigin-RevId: 49933fa5e09a9306c89565743ecccf2cb54eaa80
2021-07-06 11:28:42 +03:00
|
|
|
mforeignKeyConditions <-
|
|
|
|
for minnerJoinFields $ \(entityAlias, mapping) ->
|
|
|
|
runReaderT
|
|
|
|
(fromMappingFieldNames (fromAlias selectFrom) mapping)
|
|
|
|
entityAlias
|
2021-06-15 11:58:21 +03:00
|
|
|
fieldSources <-
|
|
|
|
runReaderT
|
2021-09-24 01:56:37 +03:00
|
|
|
( traverse
|
|
|
|
( fromTableAggregateFieldG
|
|
|
|
args'
|
|
|
|
permissionBasedTop
|
|
|
|
stringifyNumbers
|
|
|
|
)
|
|
|
|
fields
|
|
|
|
)
|
2021-06-15 11:58:21 +03:00
|
|
|
(fromAlias selectFrom)
|
2021-04-12 13:18:29 +03:00
|
|
|
selectProjections <-
|
2021-07-07 14:58:37 +03:00
|
|
|
onNothing
|
2021-09-24 01:56:37 +03:00
|
|
|
( NE.nonEmpty
|
|
|
|
(concatMap (toList . fieldSourceProjections) fieldSources)
|
|
|
|
)
|
|
|
|
(refute (pure NoProjectionFields))
|
Bigquery/fix limit offset for array aggregates
Blocked on https://github.com/hasura/graphql-engine-mono/pull/1640.
While fiddling with BigQuery I noticed a severe issue with offset/limit for array-aggregates. I've fixed it now.
The basic problem was that I was using a query like this:
```graphql
query MyQuery {
hasura_Artist(order_by: {artist_self_id: asc}) {
artist_self_id
albums_aggregate(order_by: {album_self_id: asc}, limit: 2) {
nodes {
album_self_id
}
aggregate {
count
}
}
}
}
```
Producing this SQL:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
-- PROBLEM HERE
LIMIT @param0) AS `t_Album1`
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
Note the `LIMIT @param0` -- that is incorrect because we want to limit
per artist. Instead, we want:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *,
-- ADDED
ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
) AS `t_Album1`
-- CHANGED
WHERE artist_album_index <= @param
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
That serves both the LIMIT/OFFSET function in the where clause. Then,
both the ARRAY_AGG and the COUNT are correct per artist.
I've updated my Haskell test suite to add regression tests for this. I'll push a commit for Python tests shortly. The tests still pass there.
This just fixes a case that we hadn't noticed.
https://github.com/hasura/graphql-engine-mono/pull/1641
GitOrigin-RevId: 49933fa5e09a9306c89565743ecccf2cb54eaa80
2021-07-06 11:28:42 +03:00
|
|
|
indexAlias <- generateEntityAlias IndexTemplate
|
2021-04-12 13:18:29 +03:00
|
|
|
pure
|
|
|
|
Select
|
2021-09-24 01:56:37 +03:00
|
|
|
{ selectCardinality = One,
|
|
|
|
selectFinalWantedFields = Nothing,
|
|
|
|
selectGroupBy = mempty,
|
|
|
|
selectProjections,
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectFrom =
|
2021-06-15 11:58:21 +03:00
|
|
|
FromSelect
|
2021-09-24 01:56:37 +03:00
|
|
|
( Aliased
|
|
|
|
{ aliasedThing =
|
|
|
|
Select
|
|
|
|
{ selectProjections =
|
|
|
|
case mforeignKeyConditions of
|
|
|
|
Nothing -> pure StarProjection
|
|
|
|
Just innerJoinFields ->
|
|
|
|
pure StarProjection
|
|
|
|
<>
|
|
|
|
-- We setup an index over every row in
|
|
|
|
-- the sub select. Then if you look at
|
|
|
|
-- the outer Select, you can see we apply
|
|
|
|
-- a WHERE that uses this index for
|
|
|
|
-- LIMIT/OFFSET.
|
|
|
|
pure
|
|
|
|
( WindowProjection
|
|
|
|
( Aliased
|
|
|
|
{ aliasedAlias = unEntityAlias indexAlias,
|
|
|
|
aliasedThing =
|
|
|
|
RowNumberOverPartitionBy
|
|
|
|
-- The row numbers start from 1.
|
|
|
|
(NE.fromList (map fst innerJoinFields))
|
|
|
|
argsOrderBy
|
|
|
|
-- Above: Having the order by
|
|
|
|
-- in here ensures that the
|
|
|
|
-- row numbers are ordered by
|
|
|
|
-- this ordering. Below, we
|
|
|
|
-- order again for the
|
|
|
|
-- general row order. Both
|
|
|
|
-- are needed!
|
|
|
|
}
|
|
|
|
)
|
|
|
|
),
|
|
|
|
selectFrom,
|
|
|
|
selectJoins =
|
|
|
|
argsJoins <> mapMaybe fieldSourceJoin fieldSources,
|
|
|
|
selectWhere = argsWhere <> (Where [filterExpression]),
|
|
|
|
selectOrderBy = argsOrderBy,
|
|
|
|
-- Above: This is important to have here, because
|
|
|
|
-- offset/top apply AFTER ordering is applied, so
|
|
|
|
-- you can't put an order by in afterwards in a
|
|
|
|
-- parent query. Therefore be careful about
|
|
|
|
-- putting this elsewhere.
|
|
|
|
selectFinalWantedFields = Nothing,
|
|
|
|
selectCardinality = Many,
|
|
|
|
selectTop = maybe argsTop (const NoTop) mforeignKeyConditions,
|
|
|
|
selectOffset = maybe argsOffset (const Nothing) mforeignKeyConditions,
|
|
|
|
selectGroupBy = mempty
|
|
|
|
},
|
|
|
|
aliasedAlias = entityAliasText (fromAlias selectFrom)
|
|
|
|
}
|
|
|
|
),
|
|
|
|
selectJoins = mempty,
|
|
|
|
selectWhere =
|
Bigquery/fix limit offset for array aggregates
Blocked on https://github.com/hasura/graphql-engine-mono/pull/1640.
While fiddling with BigQuery I noticed a severe issue with offset/limit for array-aggregates. I've fixed it now.
The basic problem was that I was using a query like this:
```graphql
query MyQuery {
hasura_Artist(order_by: {artist_self_id: asc}) {
artist_self_id
albums_aggregate(order_by: {album_self_id: asc}, limit: 2) {
nodes {
album_self_id
}
aggregate {
count
}
}
}
}
```
Producing this SQL:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
-- PROBLEM HERE
LIMIT @param0) AS `t_Album1`
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
Note the `LIMIT @param0` -- that is incorrect because we want to limit
per artist. Instead, we want:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *,
-- ADDED
ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
) AS `t_Album1`
-- CHANGED
WHERE artist_album_index <= @param
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
That serves both the LIMIT/OFFSET function in the where clause. Then,
both the ARRAY_AGG and the COUNT are correct per artist.
I've updated my Haskell test suite to add regression tests for this. I'll push a commit for Python tests shortly. The tests still pass there.
This just fixes a case that we hadn't noticed.
https://github.com/hasura/graphql-engine-mono/pull/1641
GitOrigin-RevId: 49933fa5e09a9306c89565743ecccf2cb54eaa80
2021-07-06 11:28:42 +03:00
|
|
|
case mforeignKeyConditions of
|
|
|
|
Nothing -> mempty
|
|
|
|
Just {} ->
|
|
|
|
let offset =
|
|
|
|
case argsOffset of
|
|
|
|
Nothing -> mempty
|
|
|
|
Just offset' ->
|
|
|
|
Where
|
|
|
|
-- Apply an offset using the row_number from above.
|
|
|
|
[ OpExpression
|
|
|
|
MoreOp
|
2021-09-24 01:56:37 +03:00
|
|
|
( ColumnExpression
|
|
|
|
FieldName
|
|
|
|
{ fieldNameEntity =
|
|
|
|
coerce (fromAlias selectFrom),
|
|
|
|
fieldName = unEntityAlias indexAlias
|
|
|
|
}
|
|
|
|
)
|
Bigquery/fix limit offset for array aggregates
Blocked on https://github.com/hasura/graphql-engine-mono/pull/1640.
While fiddling with BigQuery I noticed a severe issue with offset/limit for array-aggregates. I've fixed it now.
The basic problem was that I was using a query like this:
```graphql
query MyQuery {
hasura_Artist(order_by: {artist_self_id: asc}) {
artist_self_id
albums_aggregate(order_by: {album_self_id: asc}, limit: 2) {
nodes {
album_self_id
}
aggregate {
count
}
}
}
}
```
Producing this SQL:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
-- PROBLEM HERE
LIMIT @param0) AS `t_Album1`
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
Note the `LIMIT @param0` -- that is incorrect because we want to limit
per artist. Instead, we want:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *,
-- ADDED
ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
) AS `t_Album1`
-- CHANGED
WHERE artist_album_index <= @param
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
That serves both the LIMIT/OFFSET function in the where clause. Then,
both the ARRAY_AGG and the COUNT are correct per artist.
I've updated my Haskell test suite to add regression tests for this. I'll push a commit for Python tests shortly. The tests still pass there.
This just fixes a case that we hadn't noticed.
https://github.com/hasura/graphql-engine-mono/pull/1641
GitOrigin-RevId: 49933fa5e09a9306c89565743ecccf2cb54eaa80
2021-07-06 11:28:42 +03:00
|
|
|
offset'
|
|
|
|
]
|
|
|
|
limit =
|
|
|
|
case argsTop of
|
|
|
|
NoTop -> mempty
|
|
|
|
Top limit' ->
|
|
|
|
Where
|
|
|
|
-- Apply a limit using the row_number from above.
|
|
|
|
[ OpExpression
|
|
|
|
LessOp
|
2021-09-24 01:56:37 +03:00
|
|
|
( ColumnExpression
|
|
|
|
FieldName
|
|
|
|
{ fieldNameEntity =
|
|
|
|
coerce (fromAlias selectFrom),
|
|
|
|
fieldName = unEntityAlias indexAlias
|
|
|
|
}
|
|
|
|
)
|
|
|
|
( ValueExpression . IntegerValue . Int64 . tshow $
|
|
|
|
limit' + 1 -- Because the row_number() indexing starts at 1.
|
|
|
|
-- So idx<l+1 means idx<2 where l = 1 i.e. "limit to 1 row".
|
|
|
|
)
|
Bigquery/fix limit offset for array aggregates
Blocked on https://github.com/hasura/graphql-engine-mono/pull/1640.
While fiddling with BigQuery I noticed a severe issue with offset/limit for array-aggregates. I've fixed it now.
The basic problem was that I was using a query like this:
```graphql
query MyQuery {
hasura_Artist(order_by: {artist_self_id: asc}) {
artist_self_id
albums_aggregate(order_by: {album_self_id: asc}, limit: 2) {
nodes {
album_self_id
}
aggregate {
count
}
}
}
}
```
Producing this SQL:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
-- PROBLEM HERE
LIMIT @param0) AS `t_Album1`
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
Note the `LIMIT @param0` -- that is incorrect because we want to limit
per artist. Instead, we want:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *,
-- ADDED
ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
) AS `t_Album1`
-- CHANGED
WHERE artist_album_index <= @param
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
That serves both the LIMIT/OFFSET function in the where clause. Then,
both the ARRAY_AGG and the COUNT are correct per artist.
I've updated my Haskell test suite to add regression tests for this. I'll push a commit for Python tests shortly. The tests still pass there.
This just fixes a case that we hadn't noticed.
https://github.com/hasura/graphql-engine-mono/pull/1641
GitOrigin-RevId: 49933fa5e09a9306c89565743ecccf2cb54eaa80
2021-07-06 11:28:42 +03:00
|
|
|
]
|
2021-09-24 01:56:37 +03:00
|
|
|
in offset <> limit,
|
|
|
|
selectOrderBy = Nothing,
|
|
|
|
selectOffset = Nothing
|
2021-04-12 13:18:29 +03:00
|
|
|
}
|
|
|
|
where
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnSelectG
|
|
|
|
{ _asnFields = fields,
|
|
|
|
_asnFrom = from,
|
|
|
|
_asnPerm = perm,
|
|
|
|
_asnArgs = args,
|
|
|
|
_asnStrfyNum = num -- TODO: Do we ignore this for aggregates?
|
|
|
|
} = annSelectG
|
2021-04-12 13:18:29 +03:00
|
|
|
Ir.TablePerm {_tpLimit = mPermLimit, _tpFilter = permFilter} = perm
|
|
|
|
permissionBasedTop =
|
2021-07-07 14:58:37 +03:00
|
|
|
maybe NoTop Top mPermLimit
|
2021-06-15 11:58:21 +03:00
|
|
|
stringifyNumbers =
|
|
|
|
if num
|
|
|
|
then StringifyNumbers
|
|
|
|
else LeaveNumbersAlone
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- GraphQL Args
|
|
|
|
|
|
|
|
data Args = Args
|
2021-09-24 01:56:37 +03:00
|
|
|
{ argsWhere :: Where,
|
|
|
|
argsOrderBy :: Maybe (NonEmpty OrderBy),
|
|
|
|
argsJoins :: [Join],
|
|
|
|
argsTop :: Top,
|
|
|
|
argsOffset :: Maybe Expression,
|
|
|
|
argsDistinct :: Proxy (Maybe (NonEmpty FieldName)),
|
|
|
|
argsExistingJoins :: Map TableName EntityAlias
|
|
|
|
}
|
|
|
|
deriving (Show)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
data UnfurledJoin = UnfurledJoin
|
2021-09-24 01:56:37 +03:00
|
|
|
{ unfurledJoin :: Join,
|
|
|
|
-- | Recorded if we joined onto an object relation.
|
|
|
|
unfurledObjectTableAlias :: Maybe (TableName, EntityAlias)
|
|
|
|
}
|
|
|
|
deriving (Show)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
fromSelectArgsG :: Ir.SelectArgsG 'BigQuery Expression -> ReaderT EntityAlias FromIr Args
|
|
|
|
fromSelectArgsG selectArgsG = do
|
2021-06-10 19:13:20 +03:00
|
|
|
let argsOffset = ValueExpression . IntegerValue . Int64 . tshow <$> moffset
|
2021-04-12 13:18:29 +03:00
|
|
|
argsWhere <-
|
|
|
|
maybe (pure mempty) (fmap (Where . pure) . fromAnnBoolExp) mannBoolExp
|
|
|
|
argsTop <- maybe (pure mempty) (pure . Top) mlimit
|
|
|
|
-- Not supported presently, per Vamshi:
|
|
|
|
--
|
|
|
|
-- > It is hardly used and we don't have to go to great lengths to support it.
|
|
|
|
--
|
|
|
|
-- But placeholdering the code so that when it's ready to be used,
|
|
|
|
-- you can just drop the Proxy wrapper.
|
|
|
|
argsDistinct <-
|
|
|
|
case mdistinct of
|
|
|
|
Nothing -> pure Proxy
|
|
|
|
Just {} -> refute (pure DistinctIsn'tSupported)
|
|
|
|
(argsOrderBy, joins) <-
|
2021-07-27 19:27:28 +03:00
|
|
|
runWriterT (traverse fromAnnotatedOrderByItemG (maybe [] toList orders))
|
2021-04-12 13:18:29 +03:00
|
|
|
-- Any object-relation joins that we generated, we record their
|
|
|
|
-- generated names into a mapping.
|
|
|
|
let argsExistingJoins =
|
|
|
|
M.fromList (mapMaybe unfurledObjectTableAlias (toList joins))
|
|
|
|
pure
|
|
|
|
Args
|
2021-09-24 01:56:37 +03:00
|
|
|
{ argsJoins = toList (fmap unfurledJoin joins),
|
|
|
|
argsOrderBy = NE.nonEmpty argsOrderBy,
|
|
|
|
..
|
2021-04-12 13:18:29 +03:00
|
|
|
}
|
|
|
|
where
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.SelectArgs
|
|
|
|
{ _saWhere = mannBoolExp,
|
|
|
|
_saLimit = mlimit,
|
|
|
|
_saOffset = moffset,
|
|
|
|
_saDistinct = mdistinct,
|
|
|
|
_saOrderBy = orders
|
|
|
|
} = selectArgsG
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- | Produce a valid ORDER BY construct, telling about any joins
|
|
|
|
-- needed on the side.
|
2021-07-27 19:27:28 +03:00
|
|
|
fromAnnotatedOrderByItemG ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnotatedOrderByItemG 'BigQuery Expression -> WriterT (Seq UnfurledJoin) (ReaderT EntityAlias FromIr) OrderBy
|
2021-07-27 19:27:28 +03:00
|
|
|
fromAnnotatedOrderByItemG Ir.OrderByItemG {obiType, obiColumn, obiNulls} = do
|
|
|
|
orderByFieldName <- unfurlAnnotatedOrderByElement obiColumn
|
2021-04-12 13:18:29 +03:00
|
|
|
let morderByOrder =
|
|
|
|
obiType
|
|
|
|
let orderByNullsOrder =
|
2021-05-18 16:06:42 +03:00
|
|
|
fromMaybe NullsAnyOrder obiNulls
|
2021-04-12 13:18:29 +03:00
|
|
|
case morderByOrder of
|
|
|
|
Just orderByOrder -> pure OrderBy {..}
|
2021-09-24 01:56:37 +03:00
|
|
|
Nothing -> refute (pure NoOrderSpecifiedInOrderBy)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- | Unfurl the nested set of object relations (tell'd in the writer)
|
|
|
|
-- that are terminated by field name (Ir.AOCColumn and
|
|
|
|
-- Ir.AOCArrayAggregation).
|
2021-07-27 19:27:28 +03:00
|
|
|
unfurlAnnotatedOrderByElement ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnotatedOrderByElement 'BigQuery Expression -> WriterT (Seq UnfurledJoin) (ReaderT EntityAlias FromIr) FieldName
|
2021-07-27 19:27:28 +03:00
|
|
|
unfurlAnnotatedOrderByElement =
|
2021-04-12 13:18:29 +03:00
|
|
|
\case
|
|
|
|
Ir.AOCColumn pgColumnInfo -> lift (fromPGColumnInfo pgColumnInfo)
|
2021-05-18 16:06:42 +03:00
|
|
|
Ir.AOCObjectRelation Rql.RelInfo {riMapping = mapping, riRTable = tableName} annBoolExp annOrderByElementG -> do
|
|
|
|
selectFrom <- lift (lift (fromQualifiedTable tableName))
|
2021-04-12 13:18:29 +03:00
|
|
|
joinAliasEntity <-
|
2021-05-18 16:06:42 +03:00
|
|
|
lift (lift (generateEntityAlias (ForOrderAlias (tableNameText tableName))))
|
2021-04-12 13:18:29 +03:00
|
|
|
joinOn <- lift (fromMappingFieldNames joinAliasEntity mapping)
|
|
|
|
whereExpression <-
|
|
|
|
lift (local (const (fromAlias selectFrom)) (fromAnnBoolExp annBoolExp))
|
|
|
|
tell
|
2021-09-24 01:56:37 +03:00
|
|
|
( pure
|
|
|
|
UnfurledJoin
|
|
|
|
{ unfurledJoin =
|
|
|
|
Join
|
|
|
|
{ joinSource =
|
|
|
|
JoinSelect
|
|
|
|
Select
|
|
|
|
{ selectCardinality = One,
|
|
|
|
selectFinalWantedFields = Nothing,
|
|
|
|
selectGroupBy = mempty,
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectProjections = NE.fromList [StarProjection],
|
|
|
|
selectFrom,
|
|
|
|
selectJoins = [],
|
|
|
|
selectWhere = Where ([whereExpression]),
|
|
|
|
selectOrderBy = Nothing,
|
|
|
|
selectOffset = Nothing
|
|
|
|
},
|
|
|
|
joinRightTable = fromAlias selectFrom,
|
|
|
|
joinAlias = joinAliasEntity,
|
|
|
|
joinOn,
|
|
|
|
joinProvenance = OrderByJoinProvenance,
|
|
|
|
joinFieldName = tableNameText tableName, -- TODO: not needed.
|
|
|
|
joinExtractPath = Nothing
|
|
|
|
},
|
|
|
|
unfurledObjectTableAlias = Just (tableName, joinAliasEntity)
|
|
|
|
}
|
|
|
|
)
|
2021-07-27 19:27:28 +03:00
|
|
|
local (const joinAliasEntity) (unfurlAnnotatedOrderByElement annOrderByElementG)
|
2021-05-18 16:06:42 +03:00
|
|
|
Ir.AOCArrayAggregation Rql.RelInfo {riMapping = mapping, riRTable = tableName} annBoolExp annAggregateOrderBy -> do
|
|
|
|
selectFrom <- lift (lift (fromQualifiedTable tableName))
|
2021-04-12 13:18:29 +03:00
|
|
|
let alias = aggFieldName
|
|
|
|
joinAlias <-
|
2021-05-18 16:06:42 +03:00
|
|
|
lift (lift (generateEntityAlias (ForOrderAlias (tableNameText tableName))))
|
2021-04-12 13:18:29 +03:00
|
|
|
joinOn <- lift (fromMappingFieldNames joinAlias mapping)
|
|
|
|
innerJoinFields <-
|
|
|
|
lift (fromMappingFieldNames (fromAlias selectFrom) mapping)
|
|
|
|
whereExpression <-
|
|
|
|
lift (local (const (fromAlias selectFrom)) (fromAnnBoolExp annBoolExp))
|
|
|
|
aggregate <-
|
|
|
|
lift
|
2021-09-24 01:56:37 +03:00
|
|
|
( local
|
|
|
|
(const (fromAlias selectFrom))
|
|
|
|
( case annAggregateOrderBy of
|
|
|
|
Ir.AAOCount -> pure (CountAggregate StarCountable)
|
|
|
|
Ir.AAOOp text pgColumnInfo -> do
|
|
|
|
fieldName <- fromPGColumnInfo pgColumnInfo
|
|
|
|
pure (OpAggregate text (ColumnExpression fieldName))
|
|
|
|
)
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
tell
|
2021-09-24 01:56:37 +03:00
|
|
|
( pure
|
|
|
|
( UnfurledJoin
|
|
|
|
{ unfurledJoin =
|
|
|
|
Join
|
|
|
|
{ joinSource =
|
|
|
|
JoinSelect
|
|
|
|
Select
|
|
|
|
{ selectCardinality = One,
|
|
|
|
selectFinalWantedFields = Nothing,
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectProjections =
|
|
|
|
AggregateProjection
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing = aggregate,
|
|
|
|
aliasedAlias = alias
|
|
|
|
}
|
|
|
|
:|
|
|
|
|
-- These are group by'd below in selectGroupBy.
|
|
|
|
map
|
|
|
|
( \(fieldName', _) ->
|
|
|
|
FieldNameProjection
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing = fieldName',
|
|
|
|
aliasedAlias = fieldName fieldName'
|
|
|
|
}
|
|
|
|
)
|
|
|
|
innerJoinFields,
|
|
|
|
selectFrom,
|
|
|
|
selectJoins = [],
|
|
|
|
selectWhere = Where [whereExpression],
|
|
|
|
selectOrderBy = Nothing,
|
|
|
|
selectOffset = Nothing,
|
|
|
|
-- This group by corresponds to the field name projections above.
|
|
|
|
selectGroupBy = map fst innerJoinFields
|
|
|
|
},
|
|
|
|
joinRightTable = fromAlias selectFrom,
|
|
|
|
joinProvenance = OrderByJoinProvenance,
|
|
|
|
joinAlias = joinAlias,
|
|
|
|
joinOn,
|
|
|
|
joinFieldName = tableNameText tableName, -- TODO: not needed.
|
|
|
|
joinExtractPath = Nothing
|
|
|
|
},
|
|
|
|
unfurledObjectTableAlias = Nothing
|
|
|
|
}
|
|
|
|
)
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
pure
|
|
|
|
FieldName
|
2021-09-24 01:56:37 +03:00
|
|
|
{ fieldNameEntity = entityAliasText joinAlias,
|
|
|
|
fieldName = alias
|
|
|
|
}
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Conversion functions
|
|
|
|
|
|
|
|
tableNameText :: TableName -> Text
|
2021-09-24 01:56:37 +03:00
|
|
|
tableNameText (TableName {tableName = qname}) = qname
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- | This is really the start where you query the base table,
|
|
|
|
-- everything else is joins attached to it.
|
|
|
|
fromQualifiedTable :: TableName -> FromIr From
|
2021-09-24 01:56:37 +03:00
|
|
|
fromQualifiedTable (TableName {tableNameSchema = schemaName, tableName = qname}) = do
|
2021-04-12 13:18:29 +03:00
|
|
|
alias <- generateEntityAlias (TableTemplate qname)
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( FromQualifiedTable
|
|
|
|
( Aliased
|
|
|
|
{ aliasedThing =
|
|
|
|
TableName {tableName = qname, tableNameSchema = schemaName},
|
|
|
|
aliasedAlias = entityAliasText alias
|
|
|
|
}
|
|
|
|
)
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
fromAnnBoolExp ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.GBoolExp 'BigQuery (Ir.AnnBoolExpFld 'BigQuery Expression) ->
|
|
|
|
ReaderT EntityAlias FromIr Expression
|
2021-04-12 13:18:29 +03:00
|
|
|
fromAnnBoolExp = traverse fromAnnBoolExpFld >=> fromGBoolExp
|
|
|
|
|
|
|
|
fromAnnBoolExpFld ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnBoolExpFld 'BigQuery Expression -> ReaderT EntityAlias FromIr Expression
|
2021-04-12 13:18:29 +03:00
|
|
|
fromAnnBoolExpFld =
|
|
|
|
\case
|
2021-07-07 14:58:37 +03:00
|
|
|
Ir.AVColumn pgColumnInfo opExpGs -> do
|
2021-04-12 13:18:29 +03:00
|
|
|
expression <- fmap ColumnExpression (fromPGColumnInfo pgColumnInfo)
|
|
|
|
expressions <- traverse (lift . fromOpExpG expression) opExpGs
|
|
|
|
pure (AndExpression expressions)
|
2021-07-07 14:58:37 +03:00
|
|
|
Ir.AVRelationship Rql.RelInfo {riMapping = mapping, riRTable = table} annBoolExp -> do
|
2021-04-12 13:18:29 +03:00
|
|
|
selectFrom <- lift (fromQualifiedTable table)
|
|
|
|
foreignKeyConditions <- fromMapping selectFrom mapping
|
|
|
|
whereExpression <-
|
|
|
|
local (const (fromAlias selectFrom)) (fromAnnBoolExp annBoolExp)
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( ExistsExpression
|
|
|
|
Select
|
|
|
|
{ selectCardinality = One,
|
|
|
|
selectFinalWantedFields = Nothing,
|
|
|
|
selectGroupBy = mempty,
|
|
|
|
selectOrderBy = Nothing,
|
|
|
|
selectProjections =
|
|
|
|
NE.fromList
|
|
|
|
[ ExpressionProjection
|
|
|
|
( Aliased
|
|
|
|
{ aliasedThing = trueExpression,
|
|
|
|
aliasedAlias = existsFieldName
|
|
|
|
}
|
|
|
|
)
|
|
|
|
],
|
|
|
|
selectFrom,
|
|
|
|
selectJoins = mempty,
|
|
|
|
selectWhere = Where (foreignKeyConditions <> [whereExpression]),
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectOffset = Nothing
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
fromPGColumnInfo :: Rql.ColumnInfo 'BigQuery -> ReaderT EntityAlias FromIr FieldName
|
|
|
|
fromPGColumnInfo Rql.ColumnInfo {pgiColumn = ColumnName pgCol} = do
|
|
|
|
EntityAlias {entityAliasText} <- ask
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( FieldName
|
|
|
|
{ fieldName = pgCol,
|
|
|
|
fieldNameEntity = entityAliasText
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
fromGExists :: Ir.GExists 'BigQuery Expression -> ReaderT EntityAlias FromIr Select
|
|
|
|
fromGExists Ir.GExists {_geTable, _geWhere} = do
|
|
|
|
selectFrom <- lift (fromQualifiedTable _geTable)
|
|
|
|
whereExpression <-
|
|
|
|
local (const (fromAlias selectFrom)) (fromGBoolExp _geWhere)
|
|
|
|
pure
|
|
|
|
Select
|
2021-09-24 01:56:37 +03:00
|
|
|
{ selectCardinality = One,
|
|
|
|
selectFinalWantedFields = Nothing,
|
|
|
|
selectGroupBy = mempty,
|
|
|
|
selectOrderBy = Nothing,
|
|
|
|
selectProjections =
|
2021-04-12 13:18:29 +03:00
|
|
|
NE.fromList
|
|
|
|
[ ExpressionProjection
|
2021-09-24 01:56:37 +03:00
|
|
|
( Aliased
|
|
|
|
{ aliasedThing = trueExpression,
|
|
|
|
aliasedAlias = existsFieldName
|
|
|
|
}
|
|
|
|
)
|
|
|
|
],
|
|
|
|
selectFrom,
|
|
|
|
selectJoins = mempty,
|
|
|
|
selectWhere = Where [whereExpression],
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectOffset = Nothing
|
2021-04-12 13:18:29 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Sources of projected fields
|
|
|
|
--
|
|
|
|
-- Because in the IR, a field projected can be a foreign object, we
|
|
|
|
-- have to both generate a projection AND on the side generate a join.
|
|
|
|
--
|
|
|
|
-- So a @FieldSource@ couples the idea of the projected thing and the
|
|
|
|
-- source of it (via 'Aliased').
|
|
|
|
|
|
|
|
data FieldSource
|
|
|
|
= ExpressionFieldSource (Aliased Expression)
|
|
|
|
| JoinFieldSource (Aliased Join)
|
2021-06-15 11:58:21 +03:00
|
|
|
| AggregateFieldSource Text (NonEmpty (Aliased Aggregate))
|
|
|
|
| ArrayAggFieldSource (Aliased ArrayAgg)
|
2021-04-12 13:18:29 +03:00
|
|
|
deriving (Eq, Show)
|
|
|
|
|
|
|
|
-- Example:
|
|
|
|
--
|
|
|
|
-- @
|
|
|
|
-- Track_aggregate {
|
|
|
|
-- aggregate {
|
|
|
|
-- count(columns: AlbumId)
|
|
|
|
-- foo: count(columns: AlbumId)
|
|
|
|
-- max {
|
|
|
|
-- AlbumId
|
|
|
|
-- TrackId
|
|
|
|
-- }
|
|
|
|
-- }
|
|
|
|
-- }
|
|
|
|
-- @
|
|
|
|
--
|
|
|
|
-- field =
|
|
|
|
-- @
|
|
|
|
-- TAFAgg
|
|
|
|
-- [ ( FieldName {getFieldNameTxt = "count"}
|
|
|
|
-- , AFCount (CTSimple [PGCol {getPGColTxt = "AlbumId"}]))
|
|
|
|
-- , ( FieldName {getFieldNameTxt = "foo"}
|
|
|
|
-- , AFCount (CTSimple [PGCol {getPGColTxt = "AlbumId"}]))
|
|
|
|
-- , ( FieldName {getFieldNameTxt = "max"}
|
|
|
|
-- , AFOp
|
|
|
|
-- (AggregateOp
|
|
|
|
-- { _aoOp = "max"
|
|
|
|
-- , _aoFields =
|
|
|
|
-- [ ( FieldName {getFieldNameTxt = "AlbumId"}
|
|
|
|
-- , CFCol (PGCol {getPGColTxt = "AlbumId"}))
|
|
|
|
-- , ( FieldName {getFieldNameTxt = "TrackId"}
|
|
|
|
-- , CFCol (PGCol {getPGColTxt = "TrackId"}))
|
|
|
|
-- ]
|
|
|
|
-- }))
|
|
|
|
-- ]
|
|
|
|
-- @
|
|
|
|
--
|
|
|
|
-- should produce:
|
|
|
|
--
|
|
|
|
-- SELECT COUNT(`t_Track1`.`AlbumId`) AS `count`,
|
|
|
|
-- COUNT(`t_Track1`.`AlbumId`) AS `foo`,
|
|
|
|
-- struct(max(`t_Track1`.`AlbumId`) AS `AlbumId`, max(`t_Track1`.`TrackId`) as TrackId) as `max`
|
|
|
|
-- FROM chinook.`Track` AS `t_Track1`
|
|
|
|
--
|
|
|
|
fromTableAggregateFieldG ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Args ->
|
|
|
|
Top ->
|
|
|
|
StringifyNumbers ->
|
2021-12-07 16:12:02 +03:00
|
|
|
(Rql.FieldName, Ir.TableAggregateFieldG 'BigQuery Void Expression) ->
|
2021-09-24 01:56:37 +03:00
|
|
|
ReaderT EntityAlias FromIr FieldSource
|
2021-06-15 11:58:21 +03:00
|
|
|
fromTableAggregateFieldG args permissionBasedTop stringifyNumbers (Rql.FieldName name, field) =
|
2021-04-12 13:18:29 +03:00
|
|
|
case field of
|
|
|
|
Ir.TAFAgg (aggregateFields :: [(Rql.FieldName, Ir.AggregateField 'BigQuery)]) ->
|
|
|
|
case NE.nonEmpty aggregateFields of
|
|
|
|
Nothing -> refute (pure NoAggregatesMustBeABug)
|
|
|
|
Just fields -> do
|
|
|
|
aggregates <-
|
|
|
|
traverse
|
2021-09-24 01:56:37 +03:00
|
|
|
( \(fieldName, aggregateField) -> do
|
|
|
|
fmap
|
|
|
|
( \aliasedThing ->
|
|
|
|
Aliased {aliasedAlias = Rql.getFieldNameTxt fieldName, ..}
|
|
|
|
)
|
|
|
|
(fromAggregateField aggregateField)
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
fields
|
2021-06-15 11:58:21 +03:00
|
|
|
pure (AggregateFieldSource name aggregates)
|
2021-04-12 13:18:29 +03:00
|
|
|
Ir.TAFExp text ->
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( ExpressionFieldSource
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing = BigQuery.ValueExpression (StringValue text),
|
|
|
|
aliasedAlias = name
|
|
|
|
}
|
|
|
|
)
|
2021-12-07 16:12:02 +03:00
|
|
|
Ir.TAFNodes _ (fields :: [(Rql.FieldName, Ir.AnnFieldG 'BigQuery Void Expression)]) -> do
|
2021-06-15 11:58:21 +03:00
|
|
|
fieldSources <-
|
|
|
|
traverse
|
|
|
|
(fromAnnFieldsG (argsExistingJoins args) stringifyNumbers)
|
|
|
|
fields
|
|
|
|
arrayAggProjections <-
|
2021-07-08 18:41:59 +03:00
|
|
|
NE.nonEmpty (concatMap (toList . fieldSourceProjections) fieldSources)
|
|
|
|
`onNothing` refute (pure NoProjectionFields)
|
2021-06-25 16:35:39 +03:00
|
|
|
globalTop <- lift getGlobalTop
|
2021-06-15 11:58:21 +03:00
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( ArrayAggFieldSource
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing =
|
|
|
|
ArrayAgg
|
|
|
|
{ arrayAggProjections,
|
|
|
|
arrayAggOrderBy = argsOrderBy args,
|
|
|
|
arrayAggTop = globalTop <> argsTop args <> permissionBasedTop
|
|
|
|
},
|
|
|
|
aliasedAlias = name
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
fromAggregateField :: Ir.AggregateField 'BigQuery -> ReaderT EntityAlias FromIr Aggregate
|
|
|
|
fromAggregateField aggregateField =
|
|
|
|
case aggregateField of
|
|
|
|
Ir.AFExp text -> pure (TextAggregate text)
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AFCount countType ->
|
|
|
|
CountAggregate <$> case countType of
|
|
|
|
StarCountable -> pure StarCountable
|
|
|
|
NonNullFieldCountable names -> NonNullFieldCountable <$> traverse fromPGCol names
|
|
|
|
DistinctCountable names -> DistinctCountable <$> traverse fromPGCol names
|
2021-04-12 13:18:29 +03:00
|
|
|
Ir.AFOp Ir.AggregateOp {_aoOp = op, _aoFields = fields} -> do
|
2021-05-18 16:06:42 +03:00
|
|
|
fs <- NE.nonEmpty fields `onNothing` refute (pure MalformedAgg)
|
2021-04-12 13:18:29 +03:00
|
|
|
args <-
|
|
|
|
traverse
|
2021-09-24 01:56:37 +03:00
|
|
|
( \(Rql.FieldName fieldName, pgColFld) -> do
|
|
|
|
expression' <-
|
|
|
|
case pgColFld of
|
|
|
|
Ir.CFCol pgCol _columnType -> fmap ColumnExpression (fromPGCol pgCol)
|
|
|
|
Ir.CFExp text -> pure (ValueExpression (StringValue text))
|
|
|
|
pure (fieldName, expression')
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
fs
|
|
|
|
pure (OpAggregates op args)
|
|
|
|
|
|
|
|
-- | The main sources of fields, either constants, fields or via joins.
|
|
|
|
fromAnnFieldsG ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Map TableName EntityAlias ->
|
|
|
|
StringifyNumbers ->
|
2021-12-07 16:12:02 +03:00
|
|
|
(Rql.FieldName, Ir.AnnFieldG 'BigQuery Void Expression) ->
|
2021-09-24 01:56:37 +03:00
|
|
|
ReaderT EntityAlias FromIr FieldSource
|
2021-04-12 13:18:29 +03:00
|
|
|
fromAnnFieldsG existingJoins stringifyNumbers (Rql.FieldName name, field) =
|
|
|
|
case field of
|
|
|
|
Ir.AFColumn annColumnField -> do
|
|
|
|
expression <- fromAnnColumnField stringifyNumbers annColumnField
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( ExpressionFieldSource
|
|
|
|
Aliased {aliasedThing = expression, aliasedAlias = name}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
Ir.AFExpression text ->
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( ExpressionFieldSource
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing = BigQuery.ValueExpression (StringValue text),
|
|
|
|
aliasedAlias = name
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
Ir.AFObjectRelation objectRelationSelectG ->
|
|
|
|
fmap
|
2021-09-24 01:56:37 +03:00
|
|
|
( \aliasedThing ->
|
|
|
|
JoinFieldSource (Aliased {aliasedThing, aliasedAlias = name})
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
(fromObjectRelationSelectG existingJoins objectRelationSelectG)
|
|
|
|
Ir.AFArrayRelation arraySelectG ->
|
|
|
|
fmap
|
2021-09-24 01:56:37 +03:00
|
|
|
( \aliasedThing ->
|
|
|
|
JoinFieldSource (Aliased {aliasedThing, aliasedAlias = name})
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
(fromArraySelectG arraySelectG)
|
server: IR for DB-DB joins
### Description
This PR adds the required IR for DB to DB joins, based on @paf31 and @0x777 's `feature/db-to-db` branch.
To do so, it also refactors the IR to introduce a new type parameter, `r`, which is used to recursively constructs the `v` parameter of remote QueryDBs. When collecting remote joins, we replace `r` with `Const Void`, indicating at the type level that there cannot be any leftover remote join.
Furthermore, this PR refactors IR.Select for readability, moves some code from IR.Root to IR.Select to avoid having to deal with circular dependencies, and makes it compile by adding `error` in all new cases in the execution pipeline.
The diff doesn't make it clear, but most of Select.hs is actually unchanged. Declarations have just been reordered by topic, in the following order:
- type declarations
- instance declarations
- type aliases
- constructor functions
- traverse functions
https://github.com/hasura/graphql-engine-mono/pull/1580
Co-authored-by: Phil Freeman <630306+paf31@users.noreply.github.com>
GitOrigin-RevId: bbdcb4119cec8bb3fc32f1294f91b8dea0728721
2021-06-18 02:12:11 +03:00
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
-- | Here is where we project a field as a column expression. If
|
|
|
|
-- number stringification is on, then we wrap it in a
|
|
|
|
-- 'ToStringExpression' so that it's casted when being projected.
|
|
|
|
fromAnnColumnField ::
|
2021-09-24 01:56:37 +03:00
|
|
|
StringifyNumbers ->
|
|
|
|
Ir.AnnColumnField 'BigQuery Expression ->
|
|
|
|
ReaderT EntityAlias FromIr Expression
|
2021-04-12 13:18:29 +03:00
|
|
|
fromAnnColumnField _stringifyNumbers annColumnField = do
|
|
|
|
fieldName <- fromPGCol pgCol
|
|
|
|
if asText || False -- TOOD: (Rql.isScalarColumnWhere Psql.isBigNum typ && stringifyNumbers == StringifyNumbers)
|
2021-09-24 01:56:37 +03:00
|
|
|
then pure (ToStringExpression (ColumnExpression fieldName))
|
|
|
|
else case caseBoolExpMaybe of
|
|
|
|
Nothing -> pure (ColumnExpression fieldName)
|
|
|
|
Just ex -> do
|
|
|
|
ex' <- (traverse fromAnnBoolExpFld >=> fromGBoolExp) (coerce ex)
|
|
|
|
pure (ConditionalProjection ex' fieldName)
|
2021-04-12 13:18:29 +03:00
|
|
|
where
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnColumnField
|
|
|
|
{ _acfColumn = pgCol,
|
|
|
|
_acfAsText = asText :: Bool,
|
|
|
|
_acfOp = _ :: Maybe (Ir.ColumnOp 'BigQuery), -- TODO: What's this?
|
|
|
|
_acfCaseBoolExpression = caseBoolExpMaybe :: Maybe (Ir.AnnColumnCaseBoolExp 'BigQuery Expression)
|
|
|
|
} = annColumnField
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- | This is where a field name "foo" is resolved to a fully qualified
|
|
|
|
-- field name [table].[foo]. The table name comes from EntityAlias in
|
|
|
|
-- the ReaderT.
|
|
|
|
fromPGCol :: ColumnName -> ReaderT EntityAlias FromIr FieldName
|
|
|
|
fromPGCol (ColumnName txt) = do
|
|
|
|
EntityAlias {entityAliasText} <- ask
|
|
|
|
pure (FieldName {fieldName = txt, fieldNameEntity = entityAliasText})
|
|
|
|
|
|
|
|
fieldSourceProjections :: FieldSource -> NonEmpty Projection
|
|
|
|
fieldSourceProjections =
|
|
|
|
\case
|
|
|
|
ExpressionFieldSource aliasedExpression ->
|
|
|
|
pure (ExpressionProjection aliasedExpression)
|
|
|
|
JoinFieldSource aliasedJoin ->
|
|
|
|
NE.fromList
|
|
|
|
-- Here we're producing all join fields needed later for
|
2021-06-15 11:58:21 +03:00
|
|
|
-- Haskell-native joining. They will be removed by upstream
|
|
|
|
-- code.
|
2021-09-24 01:56:37 +03:00
|
|
|
( [ FieldNameProjection
|
|
|
|
(Aliased {aliasedThing = right, aliasedAlias = fieldNameText right})
|
|
|
|
| (_left, right) <- joinOn join'
|
|
|
|
]
|
|
|
|
<>
|
|
|
|
-- Below:
|
|
|
|
-- When we're doing an array-aggregate, e.g.
|
|
|
|
--
|
|
|
|
-- query MyQuery {
|
|
|
|
-- hasura_Artist {
|
|
|
|
-- albums_aggregate {
|
|
|
|
-- aggregate {
|
|
|
|
-- count
|
|
|
|
-- }
|
|
|
|
-- }
|
|
|
|
-- }
|
|
|
|
-- }
|
|
|
|
--
|
|
|
|
-- we're going to do a join on the albums table, and that
|
|
|
|
-- join query will produce a single-row result. Therefore we
|
|
|
|
-- can grab the whole entity as a STRUCT-typed object. See
|
|
|
|
-- also the docs for 'fromArrayRelationSelectG' and for
|
|
|
|
-- 'fromArrayAggregateSelectG'.
|
|
|
|
case joinProvenance join' of
|
|
|
|
ArrayJoinProvenance fields ->
|
|
|
|
pure
|
|
|
|
( ArrayEntityProjection
|
|
|
|
(joinAlias join')
|
|
|
|
aliasedJoin
|
|
|
|
{ aliasedThing =
|
|
|
|
fmap
|
|
|
|
( \name ->
|
|
|
|
FieldName
|
|
|
|
{ fieldName = name,
|
|
|
|
fieldNameEntity =
|
|
|
|
entityAliasText (joinAlias join')
|
|
|
|
}
|
|
|
|
)
|
|
|
|
fields,
|
|
|
|
aliasedAlias = aliasedAlias aliasedJoin
|
|
|
|
}
|
|
|
|
)
|
|
|
|
ObjectJoinProvenance fields ->
|
|
|
|
pure
|
|
|
|
( EntityProjection
|
|
|
|
aliasedJoin
|
|
|
|
{ aliasedThing =
|
|
|
|
fmap
|
|
|
|
( \name ->
|
|
|
|
( FieldName
|
|
|
|
{ fieldName = name,
|
|
|
|
fieldNameEntity =
|
|
|
|
entityAliasText (joinAlias join')
|
|
|
|
},
|
|
|
|
NoOrigin
|
|
|
|
)
|
|
|
|
)
|
|
|
|
fields,
|
|
|
|
aliasedAlias = aliasedAlias aliasedJoin
|
|
|
|
}
|
|
|
|
)
|
|
|
|
ArrayAggregateJoinProvenance fields ->
|
|
|
|
pure
|
|
|
|
( EntityProjection
|
|
|
|
aliasedJoin
|
|
|
|
{ aliasedThing =
|
|
|
|
fmap
|
|
|
|
( \(name, fieldOrigin) ->
|
|
|
|
( FieldName
|
|
|
|
{ fieldName = name,
|
|
|
|
fieldNameEntity =
|
|
|
|
entityAliasText (joinAlias join')
|
|
|
|
},
|
|
|
|
fieldOrigin
|
|
|
|
)
|
|
|
|
)
|
|
|
|
fields,
|
|
|
|
aliasedAlias = aliasedAlias aliasedJoin
|
|
|
|
}
|
|
|
|
)
|
|
|
|
_ -> []
|
|
|
|
)
|
|
|
|
where
|
|
|
|
join' = aliasedThing aliasedJoin
|
2021-06-15 11:58:21 +03:00
|
|
|
AggregateFieldSource name aggregates ->
|
|
|
|
pure
|
2021-09-24 01:56:37 +03:00
|
|
|
( AggregateProjections
|
|
|
|
(Aliased {aliasedThing = aggregates, aliasedAlias = name})
|
|
|
|
)
|
2021-06-15 11:58:21 +03:00
|
|
|
ArrayAggFieldSource arrayAgg -> pure (ArrayAggProjection arrayAgg)
|
2021-04-12 13:18:29 +03:00
|
|
|
where
|
|
|
|
fieldNameText FieldName {fieldName} = fieldName
|
|
|
|
|
|
|
|
fieldSourceJoin :: FieldSource -> Maybe Join
|
|
|
|
fieldSourceJoin =
|
|
|
|
\case
|
|
|
|
JoinFieldSource aliasedJoin -> pure (aliasedThing aliasedJoin)
|
2021-09-24 01:56:37 +03:00
|
|
|
ExpressionFieldSource {} -> Nothing
|
|
|
|
AggregateFieldSource {} -> Nothing
|
|
|
|
ArrayAggFieldSource {} -> Nothing
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Joins
|
|
|
|
|
2021-06-15 11:58:21 +03:00
|
|
|
-- | Produce the join for an object relation. We produce a normal
|
|
|
|
-- select, but then include join fields. Then downstream, the
|
|
|
|
-- DataLoader will execute the lhs select and rhs join in separate
|
|
|
|
-- server queries, then do a Haskell-native join on the join fields.
|
|
|
|
--
|
|
|
|
-- See also 'fromArrayRelationSelectG' for similar example.
|
2021-04-12 13:18:29 +03:00
|
|
|
fromObjectRelationSelectG ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Map TableName EntityAlias ->
|
2021-12-07 16:12:02 +03:00
|
|
|
Ir.ObjectRelationSelectG 'BigQuery Void Expression ->
|
2021-09-24 01:56:37 +03:00
|
|
|
ReaderT EntityAlias FromIr Join
|
2021-04-12 13:18:29 +03:00
|
|
|
-- We're not using existingJoins at the moment, which was used to
|
|
|
|
-- avoid re-joining on the same table twice.
|
|
|
|
fromObjectRelationSelectG _existingJoins annRelationSelectG = do
|
|
|
|
selectFrom <- lift (fromQualifiedTable tableFrom)
|
|
|
|
let entityAlias :: EntityAlias = fromAlias selectFrom
|
|
|
|
fieldSources <-
|
|
|
|
local
|
|
|
|
(const entityAlias)
|
|
|
|
(traverse (fromAnnFieldsG mempty LeaveNumbersAlone) fields)
|
|
|
|
selectProjections <-
|
2021-05-18 16:06:42 +03:00
|
|
|
NE.nonEmpty (concatMap (toList . fieldSourceProjections) fieldSources)
|
|
|
|
`onNothing` refute (pure NoProjectionFields)
|
2021-04-12 13:18:29 +03:00
|
|
|
joinFieldName <- lift (fromRelName aarRelationshipName)
|
|
|
|
joinAlias <-
|
|
|
|
lift (generateEntityAlias (ObjectRelationTemplate joinFieldName))
|
|
|
|
filterExpression <- local (const entityAlias) (fromAnnBoolExp tableFilter)
|
|
|
|
innerJoinFields <- fromMappingFieldNames (fromAlias selectFrom) mapping
|
|
|
|
joinOn <-
|
|
|
|
fromMappingFieldNames joinAlias mapping
|
|
|
|
let joinFieldProjections =
|
|
|
|
map
|
2021-09-24 01:56:37 +03:00
|
|
|
( \(fieldName', _) ->
|
|
|
|
FieldNameProjection
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing = fieldName',
|
|
|
|
aliasedAlias = fieldName fieldName'
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
innerJoinFields
|
2021-06-15 11:58:21 +03:00
|
|
|
let selectFinalWantedFields = pure (fieldTextNames fields)
|
2021-04-12 13:18:29 +03:00
|
|
|
pure
|
|
|
|
Join
|
2021-09-24 01:56:37 +03:00
|
|
|
{ joinAlias,
|
|
|
|
joinSource =
|
2021-04-12 13:18:29 +03:00
|
|
|
JoinSelect
|
|
|
|
Select
|
2021-09-24 01:56:37 +03:00
|
|
|
{ selectCardinality = One,
|
|
|
|
selectFinalWantedFields,
|
|
|
|
selectGroupBy = mempty,
|
|
|
|
selectOrderBy = Nothing,
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectProjections =
|
|
|
|
NE.fromList joinFieldProjections <> selectProjections,
|
|
|
|
selectFrom,
|
|
|
|
selectJoins = mapMaybe fieldSourceJoin fieldSources,
|
|
|
|
selectWhere = Where [filterExpression],
|
|
|
|
selectOffset = Nothing
|
|
|
|
},
|
|
|
|
joinOn,
|
|
|
|
joinRightTable = fromAlias selectFrom,
|
|
|
|
joinProvenance =
|
2021-06-15 11:58:21 +03:00
|
|
|
ObjectJoinProvenance
|
2021-09-24 01:56:37 +03:00
|
|
|
(fromMaybe [] selectFinalWantedFields), -- TODO: OK?
|
|
|
|
-- Above: Needed by DataLoader to determine the type of
|
|
|
|
-- Haskell-native join to perform.
|
|
|
|
joinFieldName,
|
|
|
|
joinExtractPath = Nothing
|
2021-04-12 13:18:29 +03:00
|
|
|
}
|
|
|
|
where
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnObjectSelectG
|
2021-12-07 16:12:02 +03:00
|
|
|
{ _aosFields = fields :: Ir.AnnFieldsG 'BigQuery Void Expression,
|
2021-09-24 01:56:37 +03:00
|
|
|
_aosTableFrom = tableFrom :: TableName,
|
|
|
|
_aosTableFilter = tableFilter :: Ir.AnnBoolExp 'BigQuery Expression
|
|
|
|
} = annObjectSelectG
|
|
|
|
Ir.AnnRelationSelectG
|
|
|
|
{ aarRelationshipName,
|
|
|
|
aarColumnMapping = mapping :: HashMap ColumnName ColumnName,
|
2021-12-07 16:12:02 +03:00
|
|
|
aarAnnSelect = annObjectSelectG :: Ir.AnnObjectSelectG 'BigQuery Void Expression
|
2021-09-24 01:56:37 +03:00
|
|
|
} = annRelationSelectG
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
-- We're not using existingJoins at the moment, which was used to
|
|
|
|
-- avoid re-joining on the same table twice.
|
|
|
|
_lookupTableFrom ::
|
2021-09-24 01:56:37 +03:00
|
|
|
Map TableName EntityAlias ->
|
|
|
|
TableName ->
|
|
|
|
FromIr (Either EntityAlias From)
|
2021-04-12 13:18:29 +03:00
|
|
|
_lookupTableFrom existingJoins tableFrom = do
|
|
|
|
case M.lookup tableFrom existingJoins of
|
|
|
|
Just entityAlias -> pure (Left entityAlias)
|
2021-09-24 01:56:37 +03:00
|
|
|
Nothing -> fmap Right (fromQualifiedTable tableFrom)
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-12-07 16:12:02 +03:00
|
|
|
fromArraySelectG :: Ir.ArraySelectG 'BigQuery Void Expression -> ReaderT EntityAlias FromIr Join
|
2021-04-12 13:18:29 +03:00
|
|
|
fromArraySelectG =
|
|
|
|
\case
|
|
|
|
Ir.ASSimple arrayRelationSelectG ->
|
|
|
|
fromArrayRelationSelectG arrayRelationSelectG
|
|
|
|
Ir.ASAggregate arrayAggregateSelectG ->
|
|
|
|
fromArrayAggregateSelectG arrayAggregateSelectG
|
|
|
|
|
2021-06-15 11:58:21 +03:00
|
|
|
-- | Produce the join for an array aggregate relation. We produce a
|
|
|
|
-- normal select, but then include join fields. Then downstream, the
|
|
|
|
-- DataLoader will execute the lhs select and rhs join in separate
|
|
|
|
-- server queries, then do a Haskell-native join on the join fields.
|
|
|
|
--
|
|
|
|
-- See also 'fromArrayRelationSelectG' for similar example.
|
2021-04-12 13:18:29 +03:00
|
|
|
fromArrayAggregateSelectG ::
|
2021-12-07 16:12:02 +03:00
|
|
|
Ir.AnnRelationSelectG 'BigQuery (Ir.AnnAggregateSelectG 'BigQuery Void Expression) ->
|
2021-09-24 01:56:37 +03:00
|
|
|
ReaderT EntityAlias FromIr Join
|
2021-04-12 13:18:29 +03:00
|
|
|
fromArrayAggregateSelectG annRelationSelectG = do
|
|
|
|
joinFieldName <- lift (fromRelName aarRelationshipName)
|
Bigquery/fix limit offset for array aggregates
Blocked on https://github.com/hasura/graphql-engine-mono/pull/1640.
While fiddling with BigQuery I noticed a severe issue with offset/limit for array-aggregates. I've fixed it now.
The basic problem was that I was using a query like this:
```graphql
query MyQuery {
hasura_Artist(order_by: {artist_self_id: asc}) {
artist_self_id
albums_aggregate(order_by: {album_self_id: asc}, limit: 2) {
nodes {
album_self_id
}
aggregate {
count
}
}
}
}
```
Producing this SQL:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
-- PROBLEM HERE
LIMIT @param0) AS `t_Album1`
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
Note the `LIMIT @param0` -- that is incorrect because we want to limit
per artist. Instead, we want:
```sql
SELECT `t_Artist1`.`artist_self_id` AS `artist_self_id`,
STRUCT(IFNULL(`aa_albums1`.`nodes`, NULL) AS `nodes`, IFNULL(`aa_albums1`.`aggregate`, STRUCT(0 AS `count`)) AS `aggregate`) AS `albums_aggregate`
FROM `hasura`.`Artist` AS `t_Artist1`
LEFT OUTER JOIN (SELECT ARRAY_AGG(STRUCT(`t_Album1`.`album_self_id` AS `album_self_id`) ORDER BY (`t_Album1`.`album_self_id`) ASC) AS `nodes`,
STRUCT(COUNT(*) AS `count`) AS `aggregate`,
`t_Album1`.`artist_other_id` AS `artist_other_id`
FROM (SELECT *,
-- ADDED
ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
FROM `hasura`.`Album` AS `t_Album1`
ORDER BY (`t_Album1`.`album_self_id`) ASC NULLS FIRST
) AS `t_Album1`
-- CHANGED
WHERE artist_album_index <= @param
GROUP BY `t_Album1`.`artist_other_id`)
AS `aa_albums1`
ON (`aa_albums1`.`artist_other_id` = `t_Artist1`.`artist_self_id`)
ORDER BY (`t_Artist1`.`artist_self_id`) ASC NULLS FIRST
```
That serves both the LIMIT/OFFSET function in the where clause. Then,
both the ARRAY_AGG and the COUNT are correct per artist.
I've updated my Haskell test suite to add regression tests for this. I'll push a commit for Python tests shortly. The tests still pass there.
This just fixes a case that we hadn't noticed.
https://github.com/hasura/graphql-engine-mono/pull/1641
GitOrigin-RevId: 49933fa5e09a9306c89565743ecccf2cb54eaa80
2021-07-06 11:28:42 +03:00
|
|
|
select <- do
|
|
|
|
lhsEntityAlias <- ask
|
|
|
|
lift (fromSelectAggregate (pure (lhsEntityAlias, mapping)) annSelectG)
|
2021-04-12 13:18:29 +03:00
|
|
|
alias <- lift (generateEntityAlias (ArrayAggregateTemplate joinFieldName))
|
|
|
|
joinOn <- fromMappingFieldNames alias mapping
|
|
|
|
innerJoinFields <-
|
|
|
|
fromMappingFieldNames (fromAlias (selectFrom select)) mapping
|
|
|
|
let joinFieldProjections =
|
|
|
|
map
|
2021-09-24 01:56:37 +03:00
|
|
|
( \(fieldName', _) ->
|
|
|
|
FieldNameProjection
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing = fieldName',
|
|
|
|
aliasedAlias = fieldName fieldName'
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
innerJoinFields
|
2021-06-15 11:58:21 +03:00
|
|
|
let projections =
|
2021-09-24 01:56:37 +03:00
|
|
|
(selectProjections select <> NE.fromList joinFieldProjections)
|
2021-07-07 14:58:37 +03:00
|
|
|
joinSelect =
|
2021-09-24 01:56:37 +03:00
|
|
|
select
|
|
|
|
{ selectWhere = selectWhere select,
|
|
|
|
selectGroupBy = map fst innerJoinFields,
|
|
|
|
selectProjections = projections
|
|
|
|
}
|
2021-04-12 13:18:29 +03:00
|
|
|
pure
|
|
|
|
Join
|
2021-09-24 01:56:37 +03:00
|
|
|
{ joinAlias = alias,
|
|
|
|
joinSource = JoinSelect joinSelect,
|
|
|
|
joinRightTable = fromAlias (selectFrom select),
|
|
|
|
joinOn,
|
|
|
|
joinProvenance =
|
2021-06-15 11:58:21 +03:00
|
|
|
ArrayAggregateJoinProvenance $
|
2021-09-24 01:56:37 +03:00
|
|
|
mapMaybe (\p -> (,aggregateProjectionsFieldOrigin p) <$> projectionAlias p) . toList . selectProjections $ select,
|
|
|
|
-- Above: Needed by DataLoader to determine the type of
|
|
|
|
-- Haskell-native join to perform.
|
|
|
|
joinFieldName,
|
|
|
|
joinExtractPath = Nothing
|
2021-04-12 13:18:29 +03:00
|
|
|
}
|
|
|
|
where
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnRelationSelectG
|
|
|
|
{ aarRelationshipName,
|
|
|
|
aarColumnMapping = mapping :: HashMap ColumnName ColumnName,
|
|
|
|
aarAnnSelect = annSelectG
|
|
|
|
} = annRelationSelectG
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-06-15 11:58:21 +03:00
|
|
|
-- | Produce a join for an array relation.
|
|
|
|
--
|
|
|
|
-- Array relations in PG/MSSQL are expressed using LEFT OUTER JOIN
|
|
|
|
-- LATERAL or OUTER APPLY, which are essentially producing for each
|
|
|
|
-- row on the left an array of the result from the right. Which is
|
|
|
|
-- absolutely what you want for the array relationship.
|
|
|
|
--
|
|
|
|
-- BigQuery doesn't support that. Therefore we are instead performing
|
|
|
|
-- one big array aggregation, for ALL rows in the table - there is no
|
|
|
|
-- join occurring on the left-hand-side table, grouped by join
|
|
|
|
-- fields. The data-loader will perform the LHS query and the RHS query
|
|
|
|
-- separately.
|
|
|
|
--
|
|
|
|
-- What we do have is a GROUP BY and make sure that the join fields
|
|
|
|
-- are included in the output. Finally, in the
|
|
|
|
-- DataLoader.Plan/DataLoader.Execute, we implement a Haskell-native
|
|
|
|
-- join of the left-hand-side table and the right-hand-side table.
|
|
|
|
--
|
|
|
|
-- Data looks like:
|
|
|
|
--
|
|
|
|
-- join_field_a | join_field_b | aggFieldName (array type)
|
|
|
|
-- 1 | 1 | [ { x: 1, y: 2 }, ... ]
|
|
|
|
-- 1 | 2 | [ { x: 1, y: 2 }, ... ]
|
|
|
|
--
|
|
|
|
-- etc.
|
|
|
|
--
|
|
|
|
-- We want to produce a query that looks like:
|
|
|
|
--
|
|
|
|
-- SELECT artist_other_id, -- For joining.
|
|
|
|
--
|
|
|
|
-- array_agg(struct(album_self_id, title)) as aggFieldName
|
|
|
|
--
|
|
|
|
-- -- ^ Aggregating the actual data.
|
|
|
|
--
|
|
|
|
-- FROM (SELECT *, -- Get everything, plus the row number:
|
|
|
|
--
|
|
|
|
-- ROW_NUMBER() OVER(PARTITION BY artist_other_id) artist_album_index
|
|
|
|
--
|
|
|
|
-- FROM hasura.Album
|
|
|
|
-- ORDER BY album_self_id ASC
|
|
|
|
--
|
|
|
|
-- -- ^ Order by here is important for stable results. Any
|
|
|
|
-- order by clauses for the album should appear here, NOT IN
|
|
|
|
-- THE ARRAY_AGG.
|
|
|
|
--
|
|
|
|
-- )
|
|
|
|
--
|
|
|
|
-- AS indexed_album
|
|
|
|
--
|
|
|
|
-- WHERE artist_album_index > 1
|
|
|
|
-- -- ^ Here is where offsetting occurs.
|
|
|
|
--
|
|
|
|
-- GROUP BY artist_other_id
|
|
|
|
-- -- ^ Group by for joining.
|
|
|
|
--
|
|
|
|
-- ORDER BY artist_other_id;
|
|
|
|
-- ^ Ordering for the artist table should appear here.
|
2021-04-12 13:18:29 +03:00
|
|
|
fromArrayRelationSelectG ::
|
2021-12-07 16:12:02 +03:00
|
|
|
Ir.ArrayRelationSelectG 'BigQuery Void Expression ->
|
2021-09-24 01:56:37 +03:00
|
|
|
ReaderT EntityAlias FromIr Join
|
2021-04-12 13:18:29 +03:00
|
|
|
fromArrayRelationSelectG annRelationSelectG = do
|
2021-06-15 11:58:21 +03:00
|
|
|
select <- lift (fromSelectRows annSelectG) -- Take the original select.
|
2021-04-12 13:18:29 +03:00
|
|
|
joinFieldName <- lift (fromRelName aarRelationshipName)
|
|
|
|
alias <- lift (generateEntityAlias (ArrayRelationTemplate joinFieldName))
|
2021-06-15 11:58:21 +03:00
|
|
|
indexAlias <- lift (generateEntityAlias IndexTemplate)
|
2021-04-12 13:18:29 +03:00
|
|
|
joinOn <- fromMappingFieldNames alias mapping
|
|
|
|
innerJoinFields <-
|
|
|
|
fromMappingFieldNames (fromAlias (selectFrom select)) mapping
|
|
|
|
let joinFieldProjections =
|
|
|
|
map
|
2021-09-24 01:56:37 +03:00
|
|
|
( \(fieldName', _) ->
|
|
|
|
FieldNameProjection
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing = fieldName',
|
|
|
|
aliasedAlias = fieldName fieldName'
|
|
|
|
}
|
|
|
|
)
|
2021-04-12 13:18:29 +03:00
|
|
|
innerJoinFields
|
2021-07-08 18:41:59 +03:00
|
|
|
joinSelect =
|
2021-09-24 01:56:37 +03:00
|
|
|
Select
|
|
|
|
{ selectCardinality = One,
|
|
|
|
selectFinalWantedFields = selectFinalWantedFields select,
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectProjections =
|
|
|
|
NE.fromList joinFieldProjections
|
|
|
|
<> pure
|
|
|
|
( ArrayAggProjection
|
|
|
|
Aliased
|
|
|
|
{ aliasedThing =
|
|
|
|
ArrayAgg
|
|
|
|
{ arrayAggProjections =
|
|
|
|
fmap
|
2021-12-15 18:18:56 +03:00
|
|
|
(aliasToFieldProjection (fromAlias (selectFrom select)))
|
2021-09-24 01:56:37 +03:00
|
|
|
(selectProjections select),
|
|
|
|
arrayAggOrderBy = Nothing,
|
|
|
|
arrayAggTop = selectTop select
|
|
|
|
-- The sub-select takes care of caring about global top.
|
|
|
|
--
|
|
|
|
-- This handles the LIMIT need.
|
|
|
|
},
|
|
|
|
aliasedAlias = aggFieldName
|
|
|
|
}
|
|
|
|
),
|
|
|
|
selectFrom =
|
|
|
|
FromSelect
|
|
|
|
( Aliased
|
|
|
|
{ aliasedAlias = coerce (fromAlias (selectFrom select)),
|
|
|
|
aliasedThing =
|
|
|
|
Select
|
|
|
|
{ selectProjections =
|
|
|
|
selectProjections select
|
|
|
|
<> NE.fromList joinFieldProjections
|
|
|
|
<> pure
|
|
|
|
( WindowProjection
|
|
|
|
( Aliased
|
|
|
|
{ aliasedAlias = unEntityAlias indexAlias,
|
|
|
|
aliasedThing =
|
|
|
|
RowNumberOverPartitionBy
|
|
|
|
-- The row numbers start from 1.
|
|
|
|
( NE.fromList
|
|
|
|
(map fst innerJoinFields)
|
|
|
|
)
|
|
|
|
(selectOrderBy select)
|
|
|
|
-- Above: Having the order by
|
|
|
|
-- in here ensures that the
|
|
|
|
-- row numbers are ordered by
|
|
|
|
-- this ordering. Below, we
|
|
|
|
-- order again for the
|
|
|
|
-- general row order. Both
|
|
|
|
-- are needed!
|
|
|
|
}
|
|
|
|
)
|
|
|
|
),
|
|
|
|
selectFrom = selectFrom select,
|
|
|
|
selectJoins = selectJoins select,
|
|
|
|
selectWhere = selectWhere select,
|
|
|
|
selectOrderBy = selectOrderBy select,
|
|
|
|
-- Above: This orders the rows themselves. In
|
|
|
|
-- the RowNumberOverPartitionBy, we also set
|
|
|
|
-- a row order for the calculation of the
|
|
|
|
-- indices. Both are needed!
|
|
|
|
selectOffset = Nothing,
|
|
|
|
selectFinalWantedFields =
|
|
|
|
selectFinalWantedFields select,
|
|
|
|
selectCardinality = Many,
|
|
|
|
selectTop = NoTop,
|
|
|
|
selectGroupBy = mempty
|
|
|
|
}
|
|
|
|
}
|
|
|
|
),
|
|
|
|
selectWhere =
|
|
|
|
case selectOffset select of
|
|
|
|
Nothing -> mempty
|
|
|
|
Just offset ->
|
|
|
|
Where
|
|
|
|
[ OpExpression
|
|
|
|
MoreOp
|
|
|
|
(ColumnExpression FieldName {fieldNameEntity = coerce (fromAlias (selectFrom select)), fieldName = unEntityAlias indexAlias})
|
|
|
|
offset
|
|
|
|
],
|
|
|
|
selectOrderBy = Nothing, -- Not needed.
|
|
|
|
selectJoins = mempty,
|
|
|
|
selectOffset = Nothing,
|
|
|
|
-- This group by corresponds to the field name projections above. E.g. artist_other_id
|
|
|
|
selectGroupBy = map (fst) innerJoinFields
|
|
|
|
}
|
2021-04-12 13:18:29 +03:00
|
|
|
pure
|
|
|
|
Join
|
2021-09-24 01:56:37 +03:00
|
|
|
{ joinAlias = alias,
|
|
|
|
joinSource = JoinSelect joinSelect,
|
|
|
|
joinRightTable = fromAlias (selectFrom select),
|
|
|
|
joinOn,
|
|
|
|
joinProvenance =
|
2021-06-15 11:58:21 +03:00
|
|
|
ArrayJoinProvenance
|
2021-09-24 01:56:37 +03:00
|
|
|
( if True
|
|
|
|
then (fromMaybe [] (selectFinalWantedFields select))
|
|
|
|
else
|
|
|
|
( mapMaybe
|
|
|
|
projectionAlias
|
|
|
|
(toList (selectProjections select))
|
|
|
|
)
|
|
|
|
),
|
|
|
|
-- Above: Needed by DataLoader to determine the type of
|
|
|
|
-- Haskell-native join to perform.
|
|
|
|
joinFieldName,
|
|
|
|
joinExtractPath = Just aggFieldName
|
2021-04-12 13:18:29 +03:00
|
|
|
}
|
|
|
|
where
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AnnRelationSelectG
|
|
|
|
{ aarRelationshipName,
|
|
|
|
aarColumnMapping = mapping :: HashMap ColumnName ColumnName,
|
|
|
|
aarAnnSelect = annSelectG
|
|
|
|
} = annRelationSelectG
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-06-15 11:58:21 +03:00
|
|
|
-- | For entity projections, convert any entity aliases to their field
|
2021-12-15 18:18:56 +03:00
|
|
|
-- names. ArrayEntityProjection and ExpressionProjection get converted
|
|
|
|
-- to aliases to fields with the same names as all the expressions
|
|
|
|
-- have already aliases applied in select from ArrayAgg
|
|
|
|
-- (created in Hasura.Backends.BigQuery.ToQuery.fromArrayAgg)
|
|
|
|
aliasToFieldProjection :: EntityAlias -> Projection -> Projection
|
|
|
|
aliasToFieldProjection (EntityAlias selectAlias) =
|
2021-06-15 11:58:21 +03:00
|
|
|
\case
|
|
|
|
EntityProjection Aliased {aliasedAlias = name, aliasedThing = fields} ->
|
|
|
|
EntityProjection
|
|
|
|
Aliased
|
2021-09-24 01:56:37 +03:00
|
|
|
{ aliasedAlias = name,
|
|
|
|
aliasedThing =
|
2021-06-15 11:58:21 +03:00
|
|
|
fmap
|
|
|
|
(\(FieldName {..}, origin) -> (FieldName {fieldNameEntity = name, ..}, origin))
|
|
|
|
fields
|
|
|
|
}
|
2021-12-15 18:18:56 +03:00
|
|
|
ArrayEntityProjection _ aliased ->
|
|
|
|
aliasColumn aliased
|
|
|
|
ExpressionProjection aliased ->
|
|
|
|
aliasColumn aliased
|
2021-06-15 11:58:21 +03:00
|
|
|
p -> p
|
2021-12-15 18:18:56 +03:00
|
|
|
where
|
|
|
|
aliasColumn :: Aliased a -> Projection
|
|
|
|
aliasColumn aliased =
|
|
|
|
ExpressionProjection
|
|
|
|
aliased
|
|
|
|
{ aliasedThing = ColumnExpression (FieldName {fieldName = aliasedAlias aliased, fieldNameEntity = selectAlias})
|
|
|
|
}
|
2021-06-15 11:58:21 +03:00
|
|
|
|
2021-04-12 13:18:29 +03:00
|
|
|
fromRelName :: Rql.RelName -> FromIr Text
|
|
|
|
fromRelName relName =
|
|
|
|
pure (Rql.relNameToTxt relName)
|
|
|
|
|
|
|
|
-- | The context given by the reader is of the previous/parent
|
|
|
|
-- "remote" table. The WHERE that we're generating goes in the child,
|
|
|
|
-- "local" query. The @From@ passed in as argument is the local table.
|
|
|
|
--
|
|
|
|
-- We should hope to see e.g. "post.category = category.id" for a
|
|
|
|
-- local table of post and a remote table of category.
|
|
|
|
--
|
|
|
|
-- The left/right columns in @HashMap ColumnName ColumnName@ corresponds
|
|
|
|
-- to the left/right of @select ... join ...@. Therefore left=remote,
|
|
|
|
-- right=local in this context.
|
|
|
|
fromMapping ::
|
2021-09-24 01:56:37 +03:00
|
|
|
From ->
|
|
|
|
HashMap ColumnName ColumnName ->
|
|
|
|
ReaderT EntityAlias FromIr [Expression]
|
2021-04-12 13:18:29 +03:00
|
|
|
fromMapping localFrom =
|
|
|
|
traverse
|
2021-09-24 01:56:37 +03:00
|
|
|
( \(remotePgCol, localPgCol) -> do
|
|
|
|
localFieldName <- local (const (fromAlias localFrom)) (fromPGCol localPgCol)
|
|
|
|
remoteFieldName <- fromPGCol remotePgCol
|
|
|
|
pure
|
|
|
|
( EqualExpression
|
|
|
|
(ColumnExpression localFieldName)
|
|
|
|
(ColumnExpression remoteFieldName)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
. HM.toList
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
fromMappingFieldNames ::
|
2021-09-24 01:56:37 +03:00
|
|
|
EntityAlias ->
|
|
|
|
HashMap ColumnName ColumnName ->
|
|
|
|
ReaderT EntityAlias FromIr [(FieldName, FieldName)]
|
2021-04-12 13:18:29 +03:00
|
|
|
fromMappingFieldNames localFrom =
|
|
|
|
traverse
|
2021-09-24 01:56:37 +03:00
|
|
|
( \(remotePgCol, localPgCol) -> do
|
|
|
|
localFieldName <- local (const localFrom) (fromPGCol localPgCol)
|
|
|
|
remoteFieldName <- fromPGCol remotePgCol
|
|
|
|
pure
|
|
|
|
( (,)
|
|
|
|
(localFieldName)
|
|
|
|
(remoteFieldName)
|
|
|
|
)
|
|
|
|
)
|
|
|
|
. HM.toList
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Basic SQL expression types
|
|
|
|
|
|
|
|
fromOpExpG :: Expression -> Ir.OpExpG 'BigQuery Expression -> FromIr Expression
|
|
|
|
fromOpExpG expression op =
|
|
|
|
case op of
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.ANISNULL -> pure (IsNullExpression expression)
|
|
|
|
Ir.ANISNOTNULL -> pure (IsNotNullExpression expression)
|
2021-05-18 16:06:42 +03:00
|
|
|
Ir.AEQ False val -> pure (nullableBoolEquality expression val)
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.AEQ True val -> pure (EqualExpression expression val)
|
2021-05-18 16:06:42 +03:00
|
|
|
Ir.ANE False val -> pure (nullableBoolInequality expression val)
|
2021-09-24 01:56:37 +03:00
|
|
|
Ir.ANE True val -> pure (NotEqualExpression expression val)
|
|
|
|
Ir.AIN val -> pure (OpExpression InOp expression val)
|
|
|
|
Ir.ANIN val -> pure (OpExpression NotInOp expression val)
|
|
|
|
Ir.AGT val -> pure (OpExpression MoreOp expression val)
|
|
|
|
Ir.ALT val -> pure (OpExpression LessOp expression val)
|
|
|
|
Ir.AGTE val -> pure (OpExpression MoreOrEqualOp expression val)
|
|
|
|
Ir.ALTE val -> pure (OpExpression LessOrEqualOp expression val)
|
|
|
|
Ir.ACast _casts -> refute (pure (UnsupportedOpExpG op)) -- mkCastsExp casts
|
|
|
|
Ir.ALIKE _val -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SLIKE lhs val
|
|
|
|
Ir.ANLIKE _val -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SNLIKE lhs val
|
|
|
|
Ir.CEQ _rhsCol -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SEQ lhs $ mkQCol rhsCol
|
|
|
|
Ir.CNE _rhsCol -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SNE lhs $ mkQCol rhsCol
|
|
|
|
Ir.CGT _rhsCol -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SGT lhs $ mkQCol rhsCol
|
|
|
|
Ir.CLT _rhsCol -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SLT lhs $ mkQCol rhsCol
|
|
|
|
Ir.CGTE _rhsCol -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SGTE lhs $ mkQCol rhsCol
|
|
|
|
Ir.CLTE _rhsCol -> refute (pure (UnsupportedOpExpG op)) -- S.BECompare S.SLTE lhs $ mkQCol rhsCol
|
2021-04-12 13:18:29 +03:00
|
|
|
-- These are new as of 2021-02-18 to this API. Not sure what to do with them at present, marking as unsupported.
|
|
|
|
|
|
|
|
nullableBoolEquality :: Expression -> Expression -> Expression
|
|
|
|
nullableBoolEquality x y =
|
|
|
|
OrExpression
|
2021-09-24 01:56:37 +03:00
|
|
|
[ EqualExpression x y,
|
|
|
|
AndExpression [IsNullExpression x, IsNullExpression y]
|
2021-04-12 13:18:29 +03:00
|
|
|
]
|
|
|
|
|
|
|
|
nullableBoolInequality :: Expression -> Expression -> Expression
|
|
|
|
nullableBoolInequality x y =
|
|
|
|
OrExpression
|
2021-09-24 01:56:37 +03:00
|
|
|
[ NotEqualExpression x y,
|
|
|
|
AndExpression [IsNotNullExpression x, IsNullExpression y]
|
2021-04-12 13:18:29 +03:00
|
|
|
]
|
|
|
|
|
|
|
|
fromGBoolExp :: Ir.GBoolExp 'BigQuery Expression -> ReaderT EntityAlias FromIr Expression
|
|
|
|
fromGBoolExp =
|
|
|
|
\case
|
|
|
|
Ir.BoolAnd expressions ->
|
|
|
|
fmap AndExpression (traverse fromGBoolExp expressions)
|
|
|
|
Ir.BoolOr expressions ->
|
|
|
|
fmap OrExpression (traverse fromGBoolExp expressions)
|
|
|
|
Ir.BoolNot expression -> fmap NotExpression (fromGBoolExp expression)
|
|
|
|
Ir.BoolExists gExists -> fmap ExistsExpression (fromGExists gExists)
|
|
|
|
Ir.BoolFld expression -> pure expression
|
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Misc combinators
|
|
|
|
|
|
|
|
trueExpression :: Expression
|
|
|
|
trueExpression = ValueExpression (BoolValue True)
|
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Constants
|
|
|
|
|
|
|
|
aggFieldName :: Text
|
|
|
|
aggFieldName = "agg"
|
|
|
|
|
|
|
|
existsFieldName :: Text
|
|
|
|
existsFieldName = "exists_placeholder"
|
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Name generation
|
|
|
|
|
|
|
|
data NameTemplate
|
|
|
|
= ArrayRelationTemplate Text
|
|
|
|
| ArrayAggregateTemplate Text
|
|
|
|
| ObjectRelationTemplate Text
|
|
|
|
| TableTemplate Text
|
|
|
|
| ForOrderAlias Text
|
2021-06-15 11:58:21 +03:00
|
|
|
| IndexTemplate
|
2021-11-24 19:21:59 +03:00
|
|
|
| UnnestTemplate
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
generateEntityAlias :: NameTemplate -> FromIr EntityAlias
|
|
|
|
generateEntityAlias template = do
|
2021-06-25 16:35:39 +03:00
|
|
|
FromIr
|
2021-09-24 01:56:37 +03:00
|
|
|
( modify'
|
|
|
|
( \FromIrState {..} ->
|
|
|
|
FromIrState {indices = M.insertWith (+) prefix start indices, ..}
|
|
|
|
)
|
|
|
|
)
|
2021-06-25 16:35:39 +03:00
|
|
|
i <- FromIr (gets indices)
|
2021-04-12 13:18:29 +03:00
|
|
|
pure (EntityAlias (prefix <> tshow (fromMaybe start (M.lookup prefix i))))
|
|
|
|
where
|
|
|
|
start = 1
|
|
|
|
prefix = T.take 20 rendered
|
|
|
|
rendered =
|
|
|
|
case template of
|
2021-09-24 01:56:37 +03:00
|
|
|
ArrayRelationTemplate sample -> "ar_" <> sample
|
2021-04-12 13:18:29 +03:00
|
|
|
ArrayAggregateTemplate sample -> "aa_" <> sample
|
|
|
|
ObjectRelationTemplate sample -> "or_" <> sample
|
2021-09-24 01:56:37 +03:00
|
|
|
TableTemplate sample -> "t_" <> sample
|
|
|
|
ForOrderAlias sample -> "order_" <> sample
|
|
|
|
IndexTemplate -> "idx"
|
2021-11-24 19:21:59 +03:00
|
|
|
UnnestTemplate -> "unnest"
|
2021-04-12 13:18:29 +03:00
|
|
|
|
|
|
|
fromAlias :: From -> EntityAlias
|
|
|
|
fromAlias (FromQualifiedTable Aliased {aliasedAlias}) = EntityAlias aliasedAlias
|
2021-09-24 01:56:37 +03:00
|
|
|
fromAlias (FromSelect Aliased {aliasedAlias}) = EntityAlias aliasedAlias
|
2021-11-24 19:21:59 +03:00
|
|
|
fromAlias (FromSelectJson Aliased {aliasedAlias}) = EntityAlias aliasedAlias
|
2021-04-12 13:18:29 +03:00
|
|
|
|
2021-12-07 16:12:02 +03:00
|
|
|
fieldTextNames :: Ir.AnnFieldsG 'BigQuery Void Expression -> [Text]
|
2021-04-12 13:18:29 +03:00
|
|
|
fieldTextNames = fmap (\(Rql.FieldName name, _) -> name)
|
2021-06-15 11:58:21 +03:00
|
|
|
|
|
|
|
unEntityAlias :: EntityAlias -> Text
|
|
|
|
unEntityAlias (EntityAlias t) = t
|
2021-06-25 16:35:39 +03:00
|
|
|
|
|
|
|
--------------------------------------------------------------------------------
|
|
|
|
-- Global limit support
|
|
|
|
|
|
|
|
getGlobalTop :: FromIr Top
|
|
|
|
getGlobalTop =
|
|
|
|
FromIr
|
2021-09-24 01:56:37 +03:00
|
|
|
( asks
|
|
|
|
( \FromIrReader {config = FromIrConfig {globalSelectLimit}} ->
|
|
|
|
globalSelectLimit
|
|
|
|
)
|
|
|
|
)
|