2022-02-08 12:24:34 +03:00
|
|
|
|
-- | Postgres DDL Table
|
|
|
|
|
--
|
|
|
|
|
-- Used to fill up the enum values field of 'Hasura.RQL.Types.Table.TableCoreInfoG'.
|
|
|
|
|
--
|
2023-04-25 20:16:53 +03:00
|
|
|
|
-- See 'Hasura.Eventing.Backend'.
|
2021-02-14 09:07:52 +03:00
|
|
|
|
module Hasura.Backends.Postgres.DDL.Table
|
2021-09-20 10:34:59 +03:00
|
|
|
|
( fetchAndValidateEnumValues,
|
2021-02-14 09:07:52 +03:00
|
|
|
|
)
|
|
|
|
|
where
|
|
|
|
|
|
|
|
|
|
import Control.Monad.Trans.Control (MonadBaseControl)
|
|
|
|
|
import Control.Monad.Validate
|
2023-04-26 18:42:13 +03:00
|
|
|
|
import Data.HashMap.Strict qualified as HashMap
|
2021-02-14 09:07:52 +03:00
|
|
|
|
import Data.List (delete)
|
|
|
|
|
import Data.List.NonEmpty qualified as NE
|
|
|
|
|
import Data.Sequence qualified as Seq
|
2022-07-19 11:41:27 +03:00
|
|
|
|
import Data.Sequence.NonEmpty qualified as NESeq
|
2021-02-14 09:07:52 +03:00
|
|
|
|
import Data.Text.Extended
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
|
import Database.PG.Query qualified as PG
|
2021-02-14 09:07:52 +03:00
|
|
|
|
import Hasura.Backends.Postgres.Connection
|
|
|
|
|
import Hasura.Backends.Postgres.SQL.DML
|
|
|
|
|
import Hasura.Backends.Postgres.SQL.Types
|
2021-05-11 18:18:31 +03:00
|
|
|
|
import Hasura.Base.Error
|
2021-03-15 16:02:58 +03:00
|
|
|
|
import Hasura.Prelude
|
2021-04-22 00:44:37 +03:00
|
|
|
|
import Hasura.RQL.Types.Backend
|
2023-04-24 21:35:48 +03:00
|
|
|
|
import Hasura.RQL.Types.BackendType
|
2021-02-14 09:07:52 +03:00
|
|
|
|
import Hasura.RQL.Types.Column
|
|
|
|
|
import Hasura.SQL.Types
|
|
|
|
|
import Hasura.Server.Utils
|
2023-05-17 11:53:31 +03:00
|
|
|
|
import Hasura.Table.Cache
|
2021-02-14 09:07:52 +03:00
|
|
|
|
import Language.GraphQL.Draft.Syntax qualified as G
|
|
|
|
|
|
2021-04-22 00:44:37 +03:00
|
|
|
|
data EnumTableIntegrityError (b :: BackendType)
|
2022-07-29 17:05:03 +03:00
|
|
|
|
= EnumTablePostgresError Text
|
2021-02-14 09:07:52 +03:00
|
|
|
|
| EnumTableMissingPrimaryKey
|
2022-07-29 17:05:03 +03:00
|
|
|
|
| EnumTableMultiColumnPrimaryKey [PGCol]
|
|
|
|
|
| EnumTableNonTextualPrimaryKey (RawColumnInfo b)
|
2021-02-14 09:07:52 +03:00
|
|
|
|
| EnumTableNoEnumValues
|
2022-07-29 17:05:03 +03:00
|
|
|
|
| EnumTableInvalidEnumValueNames (NE.NonEmpty Text)
|
|
|
|
|
| EnumTableNonTextualCommentColumn (RawColumnInfo b)
|
|
|
|
|
| EnumTableTooManyColumns [PGCol]
|
2021-02-14 09:07:52 +03:00
|
|
|
|
|
|
|
|
|
fetchAndValidateEnumValues ::
|
2021-04-22 00:44:37 +03:00
|
|
|
|
forall pgKind m.
|
|
|
|
|
(Backend ('Postgres pgKind), MonadIO m, MonadBaseControl IO m) =>
|
2021-02-14 09:07:52 +03:00
|
|
|
|
PGSourceConfig ->
|
|
|
|
|
QualifiedTable ->
|
2021-04-22 00:44:37 +03:00
|
|
|
|
Maybe (PrimaryKey ('Postgres pgKind) (RawColumnInfo ('Postgres pgKind))) ->
|
|
|
|
|
[RawColumnInfo ('Postgres pgKind)] ->
|
2021-02-14 09:07:52 +03:00
|
|
|
|
m (Either QErr EnumValues)
|
|
|
|
|
fetchAndValidateEnumValues pgSourceConfig tableName maybePrimaryKey columnInfos =
|
|
|
|
|
runExceptT $
|
|
|
|
|
either (throw400 ConstraintViolation . showErrors) pure =<< runValidateT fetchAndValidate
|
|
|
|
|
where
|
|
|
|
|
fetchAndValidate ::
|
2021-04-22 00:44:37 +03:00
|
|
|
|
(MonadIO n, MonadBaseControl IO n, MonadValidate [EnumTableIntegrityError ('Postgres pgKind)] n) =>
|
|
|
|
|
n EnumValues
|
2021-02-14 09:07:52 +03:00
|
|
|
|
fetchAndValidate = do
|
|
|
|
|
maybePrimaryKeyColumn <- tolerate validatePrimaryKey
|
|
|
|
|
maybeCommentColumn <- validateColumns maybePrimaryKeyColumn
|
|
|
|
|
case maybePrimaryKeyColumn of
|
|
|
|
|
Nothing -> refute mempty
|
|
|
|
|
Just primaryKeyColumn -> do
|
|
|
|
|
result <-
|
|
|
|
|
runPgSourceReadTx pgSourceConfig $
|
|
|
|
|
runValidateT $
|
|
|
|
|
fetchEnumValuesFromDb tableName primaryKeyColumn maybeCommentColumn
|
|
|
|
|
case result of
|
|
|
|
|
Left e -> (refute . pure . EnumTablePostgresError . qeError) e
|
|
|
|
|
Right (Left vErrs) -> refute vErrs
|
|
|
|
|
Right (Right r) -> pure r
|
|
|
|
|
where
|
|
|
|
|
validatePrimaryKey = case maybePrimaryKey of
|
|
|
|
|
Nothing -> refute [EnumTableMissingPrimaryKey]
|
|
|
|
|
Just primaryKey -> case _pkColumns primaryKey of
|
2022-01-19 11:37:50 +03:00
|
|
|
|
column NESeq.:<|| Seq.Empty -> case rciType column of
|
Nested array support for Data Connectors Backend and MongoDB
## Description
This change adds support for querying into nested arrays in Data Connector agents that support such a concept (currently MongoDB).
### DC API changes
- New API type `ColumnType` which allows representing the type of a "column" as either a scalar type, an object reference or an array of `ColumnType`s. This recursive definition allows arbitrary nesting of arrays of types.
- The `type` fields in the API types `ColumnInfo` and `ColumnInsertSchema` now take a `ColumnType` instead of a `ScalarType`.
- To ensure backwards compatibility, a `ColumnType` representing a scalar serialises and deserialises to the same representation as `ScalarType`.
- In queries, the `Field` type now has a new constructor `NestedArrayField`. This contains a nested `Field` along with optional `limit`, `offset`, `where` and `order_by` arguments. (These optional arguments are not yet used by either HGE or the MongoDB agent.)
### MongoDB Haskell agent changes
- The `/schema` endpoint will now recognise arrays within the JSON validation schema and generate corresponding arrays in the DC schema.
- The `/query` endpoint will now handle `NestedArrayField`s within queries (although it does not yet handle `limit`, `offset`, `where` and `order_by`).
### HGE server changes
- The `Backend` type class adds a new type family `XNestedArrays b` to enable nested arrays on a per-backend basis (currently enabled only for the `DataConnector` backend.
- Within `RawColumnInfo` the column type is now represented by a new type `RawColumnType b` which mirrors the shape of the DC API `ColumnType`, but uses `XNestedObjects b` and `XNestedArrays b` type families to allow turning nested object and array supports on or off for a particular backend. In the `DataConnector` backend `API.CustomType` is converted into `RawColumnInfo 'DataConnector` while building the schema.
- In the next stage of schema building, the `RawColumnInfo` is converted into a `StructuredColumnInfo` which allows us to represent the three different types of columns: scalar, object and array. TODO: the `StructuredColumnInfo` looks very similar to the Logical Model types. The main difference is that it uses the `XNestedObjects` and `XNestedArrays` type families. We should be able to combine these two representations.
- The `StructuredColumnInfo` is then placed into a `FIColumn` `FieldInfo`. This involved some refactoring of `FieldInfo` as I had previously split out `FINestedObject` into a separate constructor. However it works out better to represent all "column" fields (i.e. scalar, object and array) using `FIColumn` as this make it easier to implement permission checking correctly. This is the reason the `StructuredColumnInfo` was needed.
- Next, the `FieldInfo` are used to generate `FieldParser`s. We add a new constructor to `AnnFieldG` for `AFNestedArray`. An `AFNestedArray` field parser can contain either a simple array selection or an array aggregate. Simple array `FieldParsers` are currently limited to subfield selection. We will add support for limit, offset, where and order_by in a future PR. We also don't yet generate array aggregate `FieldParsers.
- The new `AFNestedArray` field is handled by the `QueryPlan` module in the `DataConnector` backend. There we generate an `API.NestedArrayField` from the AFNestedArray. We also handle nested arrays when reshaping the response from the DC agent.
## Limitations
- Support for limit, offset, filter (where) and order_by is not yet fully implemented, although it should not be hard to add this
- Support for aggregations on nested arrays is not yet fully implemented
- Permissions involving nested arrays (and objects) not yet implemented
- This should be integrated with Logical Model types, but that will happen in a separate PR
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/9149
GitOrigin-RevId: 0e7b71a994fc1d2ca1ef73bfe7b96e95b5328531
2023-05-24 11:00:59 +03:00
|
|
|
|
RawColumnTypeScalar PGText -> pure column
|
2021-02-14 09:07:52 +03:00
|
|
|
|
_ -> refute [EnumTableNonTextualPrimaryKey column]
|
2022-01-19 11:37:50 +03:00
|
|
|
|
columns -> refute [EnumTableMultiColumnPrimaryKey $ map rciName (toList columns)]
|
2021-02-14 09:07:52 +03:00
|
|
|
|
|
|
|
|
|
validateColumns primaryKeyColumn = do
|
|
|
|
|
let nonPrimaryKeyColumns = maybe columnInfos (`delete` columnInfos) primaryKeyColumn
|
|
|
|
|
case nonPrimaryKeyColumns of
|
|
|
|
|
[] -> pure Nothing
|
2022-01-19 11:37:50 +03:00
|
|
|
|
[column] -> case rciType column of
|
Nested array support for Data Connectors Backend and MongoDB
## Description
This change adds support for querying into nested arrays in Data Connector agents that support such a concept (currently MongoDB).
### DC API changes
- New API type `ColumnType` which allows representing the type of a "column" as either a scalar type, an object reference or an array of `ColumnType`s. This recursive definition allows arbitrary nesting of arrays of types.
- The `type` fields in the API types `ColumnInfo` and `ColumnInsertSchema` now take a `ColumnType` instead of a `ScalarType`.
- To ensure backwards compatibility, a `ColumnType` representing a scalar serialises and deserialises to the same representation as `ScalarType`.
- In queries, the `Field` type now has a new constructor `NestedArrayField`. This contains a nested `Field` along with optional `limit`, `offset`, `where` and `order_by` arguments. (These optional arguments are not yet used by either HGE or the MongoDB agent.)
### MongoDB Haskell agent changes
- The `/schema` endpoint will now recognise arrays within the JSON validation schema and generate corresponding arrays in the DC schema.
- The `/query` endpoint will now handle `NestedArrayField`s within queries (although it does not yet handle `limit`, `offset`, `where` and `order_by`).
### HGE server changes
- The `Backend` type class adds a new type family `XNestedArrays b` to enable nested arrays on a per-backend basis (currently enabled only for the `DataConnector` backend.
- Within `RawColumnInfo` the column type is now represented by a new type `RawColumnType b` which mirrors the shape of the DC API `ColumnType`, but uses `XNestedObjects b` and `XNestedArrays b` type families to allow turning nested object and array supports on or off for a particular backend. In the `DataConnector` backend `API.CustomType` is converted into `RawColumnInfo 'DataConnector` while building the schema.
- In the next stage of schema building, the `RawColumnInfo` is converted into a `StructuredColumnInfo` which allows us to represent the three different types of columns: scalar, object and array. TODO: the `StructuredColumnInfo` looks very similar to the Logical Model types. The main difference is that it uses the `XNestedObjects` and `XNestedArrays` type families. We should be able to combine these two representations.
- The `StructuredColumnInfo` is then placed into a `FIColumn` `FieldInfo`. This involved some refactoring of `FieldInfo` as I had previously split out `FINestedObject` into a separate constructor. However it works out better to represent all "column" fields (i.e. scalar, object and array) using `FIColumn` as this make it easier to implement permission checking correctly. This is the reason the `StructuredColumnInfo` was needed.
- Next, the `FieldInfo` are used to generate `FieldParser`s. We add a new constructor to `AnnFieldG` for `AFNestedArray`. An `AFNestedArray` field parser can contain either a simple array selection or an array aggregate. Simple array `FieldParsers` are currently limited to subfield selection. We will add support for limit, offset, where and order_by in a future PR. We also don't yet generate array aggregate `FieldParsers.
- The new `AFNestedArray` field is handled by the `QueryPlan` module in the `DataConnector` backend. There we generate an `API.NestedArrayField` from the AFNestedArray. We also handle nested arrays when reshaping the response from the DC agent.
## Limitations
- Support for limit, offset, filter (where) and order_by is not yet fully implemented, although it should not be hard to add this
- Support for aggregations on nested arrays is not yet fully implemented
- Permissions involving nested arrays (and objects) not yet implemented
- This should be integrated with Logical Model types, but that will happen in a separate PR
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/9149
GitOrigin-RevId: 0e7b71a994fc1d2ca1ef73bfe7b96e95b5328531
2023-05-24 11:00:59 +03:00
|
|
|
|
RawColumnTypeScalar PGText -> pure $ Just column
|
2021-02-14 09:07:52 +03:00
|
|
|
|
_ -> dispute [EnumTableNonTextualCommentColumn column] $> Nothing
|
2022-01-19 11:37:50 +03:00
|
|
|
|
columns -> dispute [EnumTableTooManyColumns $ map rciName columns] $> Nothing
|
2021-02-14 09:07:52 +03:00
|
|
|
|
|
2021-04-22 00:44:37 +03:00
|
|
|
|
showErrors :: [EnumTableIntegrityError ('Postgres pgKind)] -> Text
|
2021-02-14 09:07:52 +03:00
|
|
|
|
showErrors allErrors =
|
|
|
|
|
"the table " <> tableName <<> " cannot be used as an enum " <> reasonsMessage
|
|
|
|
|
where
|
|
|
|
|
reasonsMessage = makeReasonMessage allErrors showOne
|
|
|
|
|
|
2021-04-22 00:44:37 +03:00
|
|
|
|
showOne :: EnumTableIntegrityError ('Postgres pgKind) -> Text
|
2021-02-14 09:07:52 +03:00
|
|
|
|
showOne = \case
|
|
|
|
|
EnumTablePostgresError err -> "postgres error: " <> err
|
|
|
|
|
EnumTableMissingPrimaryKey -> "the table must have a primary key"
|
|
|
|
|
EnumTableMultiColumnPrimaryKey cols ->
|
|
|
|
|
"the table’s primary key must not span multiple columns ("
|
|
|
|
|
<> commaSeparated (sort cols)
|
|
|
|
|
<> ")"
|
|
|
|
|
EnumTableNonTextualPrimaryKey colInfo -> typeMismatch "primary key" colInfo PGText
|
|
|
|
|
EnumTableNoEnumValues -> "the table must have at least one row"
|
|
|
|
|
EnumTableInvalidEnumValueNames values ->
|
|
|
|
|
let pluralString = " are not valid GraphQL enum value names"
|
|
|
|
|
valuesString = case NE.reverse (NE.sort values) of
|
|
|
|
|
value NE.:| [] -> "value " <> value <<> " is not a valid GraphQL enum value name"
|
|
|
|
|
value2 NE.:| [value1] -> "values " <> value1 <<> " and " <> value2 <<> pluralString
|
|
|
|
|
lastValue NE.:| otherValues ->
|
|
|
|
|
"values "
|
|
|
|
|
<> commaSeparated (reverse otherValues)
|
|
|
|
|
<> ", and "
|
|
|
|
|
<> lastValue <<> pluralString
|
|
|
|
|
in "the " <> valuesString
|
|
|
|
|
EnumTableNonTextualCommentColumn colInfo -> typeMismatch "comment column" colInfo PGText
|
|
|
|
|
EnumTableTooManyColumns cols ->
|
|
|
|
|
"the table must have exactly one primary key and optionally one comment column, not "
|
|
|
|
|
<> tshow (length cols)
|
|
|
|
|
<> " columns ("
|
|
|
|
|
<> commaSeparated (sort cols)
|
|
|
|
|
<> ")"
|
|
|
|
|
where
|
|
|
|
|
typeMismatch description colInfo expected =
|
Nested array support for Data Connectors Backend and MongoDB
## Description
This change adds support for querying into nested arrays in Data Connector agents that support such a concept (currently MongoDB).
### DC API changes
- New API type `ColumnType` which allows representing the type of a "column" as either a scalar type, an object reference or an array of `ColumnType`s. This recursive definition allows arbitrary nesting of arrays of types.
- The `type` fields in the API types `ColumnInfo` and `ColumnInsertSchema` now take a `ColumnType` instead of a `ScalarType`.
- To ensure backwards compatibility, a `ColumnType` representing a scalar serialises and deserialises to the same representation as `ScalarType`.
- In queries, the `Field` type now has a new constructor `NestedArrayField`. This contains a nested `Field` along with optional `limit`, `offset`, `where` and `order_by` arguments. (These optional arguments are not yet used by either HGE or the MongoDB agent.)
### MongoDB Haskell agent changes
- The `/schema` endpoint will now recognise arrays within the JSON validation schema and generate corresponding arrays in the DC schema.
- The `/query` endpoint will now handle `NestedArrayField`s within queries (although it does not yet handle `limit`, `offset`, `where` and `order_by`).
### HGE server changes
- The `Backend` type class adds a new type family `XNestedArrays b` to enable nested arrays on a per-backend basis (currently enabled only for the `DataConnector` backend.
- Within `RawColumnInfo` the column type is now represented by a new type `RawColumnType b` which mirrors the shape of the DC API `ColumnType`, but uses `XNestedObjects b` and `XNestedArrays b` type families to allow turning nested object and array supports on or off for a particular backend. In the `DataConnector` backend `API.CustomType` is converted into `RawColumnInfo 'DataConnector` while building the schema.
- In the next stage of schema building, the `RawColumnInfo` is converted into a `StructuredColumnInfo` which allows us to represent the three different types of columns: scalar, object and array. TODO: the `StructuredColumnInfo` looks very similar to the Logical Model types. The main difference is that it uses the `XNestedObjects` and `XNestedArrays` type families. We should be able to combine these two representations.
- The `StructuredColumnInfo` is then placed into a `FIColumn` `FieldInfo`. This involved some refactoring of `FieldInfo` as I had previously split out `FINestedObject` into a separate constructor. However it works out better to represent all "column" fields (i.e. scalar, object and array) using `FIColumn` as this make it easier to implement permission checking correctly. This is the reason the `StructuredColumnInfo` was needed.
- Next, the `FieldInfo` are used to generate `FieldParser`s. We add a new constructor to `AnnFieldG` for `AFNestedArray`. An `AFNestedArray` field parser can contain either a simple array selection or an array aggregate. Simple array `FieldParsers` are currently limited to subfield selection. We will add support for limit, offset, where and order_by in a future PR. We also don't yet generate array aggregate `FieldParsers.
- The new `AFNestedArray` field is handled by the `QueryPlan` module in the `DataConnector` backend. There we generate an `API.NestedArrayField` from the AFNestedArray. We also handle nested arrays when reshaping the response from the DC agent.
## Limitations
- Support for limit, offset, filter (where) and order_by is not yet fully implemented, although it should not be hard to add this
- Support for aggregations on nested arrays is not yet fully implemented
- Permissions involving nested arrays (and objects) not yet implemented
- This should be integrated with Logical Model types, but that will happen in a separate PR
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/9149
GitOrigin-RevId: 0e7b71a994fc1d2ca1ef73bfe7b96e95b5328531
2023-05-24 11:00:59 +03:00
|
|
|
|
let RawColumnTypeScalar scalarType = rciType @('Postgres pgKind) colInfo
|
|
|
|
|
in "the table’s "
|
|
|
|
|
<> description
|
|
|
|
|
<> " ("
|
|
|
|
|
<> rciName colInfo <<> ") must have type "
|
|
|
|
|
<> expected <<> ", not type " <>> scalarType
|
2021-02-14 09:07:52 +03:00
|
|
|
|
|
|
|
|
|
fetchEnumValuesFromDb ::
|
2021-04-22 00:44:37 +03:00
|
|
|
|
forall pgKind m.
|
|
|
|
|
(MonadTx m, MonadValidate [EnumTableIntegrityError ('Postgres pgKind)] m) =>
|
2021-02-14 09:07:52 +03:00
|
|
|
|
QualifiedTable ->
|
2021-04-22 00:44:37 +03:00
|
|
|
|
RawColumnInfo ('Postgres pgKind) ->
|
|
|
|
|
Maybe (RawColumnInfo ('Postgres pgKind)) ->
|
2021-02-14 09:07:52 +03:00
|
|
|
|
m EnumValues
|
|
|
|
|
fetchEnumValuesFromDb tableName primaryKeyColumn maybeCommentColumn = do
|
|
|
|
|
let nullExtr = Extractor SENull Nothing
|
2022-01-19 11:37:50 +03:00
|
|
|
|
commentExtr = maybe nullExtr (mkExtr . rciName) maybeCommentColumn
|
2021-02-14 09:07:52 +03:00
|
|
|
|
query =
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
|
PG.fromBuilder $
|
2021-02-14 09:07:52 +03:00
|
|
|
|
toSQL
|
|
|
|
|
mkSelect
|
|
|
|
|
{ selFrom = Just $ mkSimpleFromExp tableName,
|
2022-01-19 11:37:50 +03:00
|
|
|
|
selExtr = [mkExtr (rciName primaryKeyColumn), commentExtr]
|
2021-02-14 09:07:52 +03:00
|
|
|
|
}
|
Import `pg-client-hs` as `PG`
Result of executing the following commands:
```shell
# replace "as Q" imports with "as PG" (in retrospect this didn't need a regex)
git grep -lE 'as Q($|[^a-zA-Z])' -- '*.hs' | xargs sed -i -E 's/as Q($|[^a-zA-Z])/as PG\1/'
# replace " Q." with " PG."
git grep -lE ' Q\.' -- '*.hs' | xargs sed -i 's/ Q\./ PG./g'
# replace "(Q." with "(PG."
git grep -lE '\(Q\.' -- '*.hs' | xargs sed -i 's/(Q\./(PG./g'
# ditto, but for [, |, { and !
git grep -lE '\[Q\.' -- '*.hs' | xargs sed -i 's/\[Q\./\[PG./g'
git grep -l '|Q\.' -- '*.hs' | xargs sed -i 's/|Q\./|PG./g'
git grep -l '{Q\.' -- '*.hs' | xargs sed -i 's/{Q\./{PG./g'
git grep -l '!Q\.' -- '*.hs' | xargs sed -i 's/!Q\./!PG./g'
```
(Doing the `grep -l` before the `sed`, instead of `sed` on the entire codebase, reduces the number of `mtime` updates, and so reduces how many times a file gets recompiled while checking intermediate results.)
Finally, I manually removed a broken and unused `Arbitrary` instance in `Hasura.RQL.Network`. (It used an `import Test.QuickCheck.Arbitrary as Q` statement, which was erroneously caught by the first find-replace command.)
After this PR, `Q` is no longer used as an import qualifier. That was not the goal of this PR, but perhaps it's a useful fact for future efforts.
PR-URL: https://github.com/hasura/graphql-engine-mono/pull/5933
GitOrigin-RevId: 8c84c59d57789111d40f5d3322c5a885dcfbf40e
2022-09-20 22:54:43 +03:00
|
|
|
|
rawEnumValues <- liftTx $ PG.withQE defaultTxErrorHandler query () True
|
2021-02-14 09:07:52 +03:00
|
|
|
|
when (null rawEnumValues) $ dispute [EnumTableNoEnumValues]
|
|
|
|
|
let enumValues = flip map rawEnumValues $
|
|
|
|
|
\(enumValueText, comment) ->
|
|
|
|
|
case mkValidEnumValueName enumValueText of
|
|
|
|
|
Nothing -> Left enumValueText
|
|
|
|
|
Just enumValue -> Right (EnumValue enumValue, EnumValueInfo comment)
|
|
|
|
|
badNames = lefts enumValues
|
|
|
|
|
validEnums = rights enumValues
|
|
|
|
|
case NE.nonEmpty badNames of
|
|
|
|
|
Just someBadNames -> refute [EnumTableInvalidEnumValueNames someBadNames]
|
2023-04-26 18:42:13 +03:00
|
|
|
|
Nothing -> pure $ HashMap.fromList validEnums
|
2021-02-14 09:07:52 +03:00
|
|
|
|
where
|
|
|
|
|
-- https://graphql.github.io/graphql-spec/June2018/#EnumValue
|
|
|
|
|
mkValidEnumValueName name =
|
|
|
|
|
if name `elem` ["true", "false", "null"]
|
|
|
|
|
then Nothing
|
|
|
|
|
else G.mkName name
|