mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-18 04:51:35 +03:00
67a9045328
A pull request for cleaning up small issues, bugs, redundancies and missing things in the BigQuery backend. Summary: 1. Remove duplicate projection fields - BigQuery rejects these. 2. Add order_by to the test suite cases, as it was returning inconsistent results. 3. Add lots of in FromIr about how the dataloader approach is given support. 4. Produce the correct output structure for aggregates: a. Should be a singleton object for a top-level aggregate query. b. Should have appropriate aggregate{} and nodes{} labels. c. **Support for nodes** (via array_agg). 5. Smooth over support of array aggregates by removing the fields used for joining with an explicit projection of each wanted field. https://github.com/hasura/graphql-engine-mono/pull/1317 Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: cd3899f4667770a27055f94988ef2a6d5808f1f5
108 lines
3.8 KiB
Haskell
108 lines
3.8 KiB
Haskell
{- |
|
|
Working example:
|
|
|
|
$ curl -XPOST http://localhost:8080/v2/query -d @- <<EOF
|
|
{
|
|
"type":"bigquery_run_sql",
|
|
"args": {
|
|
"sql":"select 3 * 4 as foo, \"Hello, World!\" as bar",
|
|
"source":"chinook"
|
|
}
|
|
}
|
|
EOF
|
|
{"result_type":"TuplesOk","result":[["foo","bar"],["12","Hello, World!"]]}
|
|
-}
|
|
|
|
module Hasura.Backends.BigQuery.DDL.RunSQL
|
|
( runSQL
|
|
, runDatabaseInspection
|
|
, BigQueryRunSQL
|
|
)
|
|
where
|
|
|
|
import Hasura.Prelude
|
|
|
|
import qualified Data.Aeson as J
|
|
import qualified Data.HashMap.Strict.InsOrd as OMap
|
|
import qualified Data.Text as T
|
|
import qualified Data.Text.Lazy as LT
|
|
import qualified Data.Vector as V
|
|
|
|
import Data.Aeson.TH (deriveJSON)
|
|
import Data.Aeson.Text (encodeToLazyText)
|
|
|
|
import qualified Hasura.Backends.BigQuery.DataLoader.Execute as Execute
|
|
import qualified Hasura.Backends.BigQuery.DataLoader.Plan as Plan
|
|
import qualified Hasura.Backends.BigQuery.Types as BigQuery
|
|
|
|
import Hasura.Backends.BigQuery.Source (BigQuerySourceConfig (..))
|
|
import Hasura.Base.Error
|
|
import Hasura.EncJSON
|
|
import Hasura.RQL.DDL.Schema (RunSQLRes (..))
|
|
import Hasura.RQL.Types (CacheRWM, MetadataM, SourceName,
|
|
askSourceConfig)
|
|
import Hasura.SQL.Backend
|
|
|
|
|
|
data BigQueryRunSQL
|
|
= BigQueryRunSQL
|
|
{ _mrsSql :: Text
|
|
, _mrsSource :: !SourceName
|
|
} deriving (Show, Eq)
|
|
$(deriveJSON hasuraJSON ''BigQueryRunSQL)
|
|
|
|
runSQL ::
|
|
(MonadIO m, CacheRWM m, MonadError QErr m, MetadataM m) =>
|
|
BigQueryRunSQL ->
|
|
m EncJSON
|
|
runSQL = runSQL_ recordSetAsHeaderAndRows
|
|
|
|
-- | The SQL query in the request is ignored
|
|
runDatabaseInspection ::
|
|
(MonadIO m, CacheRWM m, MonadError QErr m, MetadataM m) =>
|
|
BigQueryRunSQL ->
|
|
m EncJSON
|
|
runDatabaseInspection (BigQueryRunSQL _query source) = do
|
|
BigQuerySourceConfig{_scDatasets = dataSets} <- askSourceConfig @'BigQuery source
|
|
let queries = ["SELECT *, ARRAY(SELECT as STRUCT * from " <>
|
|
dataSet <> ".INFORMATION_SCHEMA.COLUMNS WHERE table_name = t.table_name) as columns from " <>
|
|
dataSet <> ".INFORMATION_SCHEMA.TABLES as t" | dataSet <- dataSets]
|
|
query' = T.intercalate " UNION ALL " queries
|
|
runSQL_ recordSetAsSchema (BigQueryRunSQL query' source)
|
|
|
|
runSQL_ ::
|
|
(MonadIO m, CacheRWM m, MonadError QErr m, MetadataM m) =>
|
|
(Execute.RecordSet -> J.Value) ->
|
|
BigQueryRunSQL ->
|
|
m EncJSON
|
|
runSQL_ f (BigQueryRunSQL query source) = do
|
|
sourceConfig <- askSourceConfig @'BigQuery source
|
|
result <-
|
|
Execute.streamBigQuery
|
|
sourceConfig
|
|
Execute.BigQuery {query = LT.fromStrict query, parameters = mempty, cardinality = BigQuery.Many}
|
|
case result of
|
|
Left queryError -> throw400 BigQueryError (tshow queryError) -- TODO: Pretty print the error type.
|
|
Right recordSet ->
|
|
pure
|
|
(encJFromJValue
|
|
(RunSQLRes "TuplesOk" (f recordSet)))
|
|
|
|
recordSetAsHeaderAndRows :: Execute.RecordSet -> J.Value
|
|
recordSetAsHeaderAndRows Execute.RecordSet {rows} = J.toJSON (thead : tbody)
|
|
where
|
|
thead =
|
|
case rows V.!? 0 of
|
|
Nothing -> []
|
|
Just row ->
|
|
map (J.toJSON . (coerce :: Plan.FieldName -> Text)) (OMap.keys row)
|
|
tbody :: [[J.Value]]
|
|
tbody = map (\row -> map J.toJSON (OMap.elems row)) (toList rows)
|
|
|
|
recordSetAsSchema :: Execute.RecordSet -> J.Value
|
|
recordSetAsSchema rs@(Execute.RecordSet {rows}) =
|
|
recordSetAsHeaderAndRows $
|
|
rs { Execute.rows = OMap.adjust
|
|
(Execute.TextOutputValue . LT.toStrict . encodeToLazyText . J.toJSON)
|
|
(Plan.FieldName "columns") <$> rows }
|