BigQuery hspec tests

PR-URL: https://github.com/hasura/graphql-engine-mono/pull/3435
Co-authored-by: Vishnu Bharathi <4211715+scriptnull@users.noreply.github.com>
GitOrigin-RevId: 78f698becb83bd9732eddbe419361aff2799dd2c
This commit is contained in:
Kirill Zaborsky 2022-02-09 18:26:14 +03:00 committed by hasura-bot
parent 4d2a5dba51
commit 773870f443
25 changed files with 423 additions and 112 deletions

View File

@ -33,31 +33,38 @@ function generate_test_project() {
# NOTE: project_id_part may be shortened & altered to meet gcloud project ID requirements:
# https://cloud.google.com/resource-manager/docs/creating-managing-projects
local project_id_part=${1:-$(uuidgen)}
local tmp_id_file=${2}
HASURA_BIGQUERY_PROJECT_ID=$(echo bq-"$project_id_part" | cut -c1-30 | tr '[:upper:]' '[:lower:]')
echo ""
echo "--- create a short-lived bigquery project id: $HASURA_BIGQUERY_PROJECT_ID"
gcloud projects create "$HASURA_BIGQUERY_PROJECT_ID" --folder="$HASURA_BIGQUERY_TEST_DIR"
# projects require linking to a billing account to run any queries
# https://cloud.google.com/billing/docs
gcloud beta billing projects link "$HASURA_BIGQUERY_PROJECT_ID" --billing-account "$HASURA_BIGQUERY_BILLING_ACCT"
# checking project existence
gcloud projects describe "$HASURA_BIGQUERY_PROJECT_ID" --quiet
export HASURA_BIGQUERY_PROJECT_ID
echo "$HASURA_BIGQUERY_PROJECT_ID" > "$tmp_id_file"
}
function create_hasura_test_dataset() {
local name='hasura_test' # all bigquery tests expect a dataset to exist with this name
function create_bigquery_dataset() {
local dataset_name=${1}
echo ""
echo "--- create a test dataset id: $HASURA_BIGQUERY_PROJECT_ID:$name"
echo "--- create a test dataset id: $HASURA_BIGQUERY_PROJECT_ID:$dataset_name"
bq --location=US mk -d \
--project_id "$HASURA_BIGQUERY_PROJECT_ID" \
"$name"
"$dataset_name"
echo "ok"
}
function create_temp_bigquery_project() {
local project_id_part=${1:-$(uuidgen)}
local dataset_name=${2}
local tmp_id_file=${3}
verify_temp_project_env
generate_test_project "$project_id_part"
create_hasura_test_dataset
generate_test_project "$project_id_part" "${tmp_id_file}"
create_bigquery_dataset "$dataset_name"
}
function delete_temp_bigquery_project() {
@ -66,3 +73,41 @@ function delete_temp_bigquery_project() {
echo "--- delete bigquery project id: $project_id"
gcloud projects delete "$project_id" --quiet
}
authenticate_bigquery() {
local tests_dir=${1}
echo "--- :unlock: authenticate bigquery service account"
export HASURA_BIGQUERY_SERVICE_ACCOUNT_FILE="gcloud-service-key.json"
pushd "$tests_dir" || { echo "Couldn't pushd to $tests_dir"; exit 1; }
echo "${HASURA_BIGQUERY_SERVICE_KEY}" > "$HASURA_BIGQUERY_SERVICE_ACCOUNT_FILE"
gcloud auth activate-service-account --key-file="$HASURA_BIGQUERY_SERVICE_ACCOUNT_FILE" || { echo "Couldn't authenticate on GCloud"; exit 1; }
popd || { echo "Couldn't popd"; exit 1; }
}
ensure_bigquery_dataset() {
local dataset_name=${1}
echo "--- :database: ensure the bigquery data source is accessible, i.e. we can access the $dataset_name dataset in bigquery project"
for _ in $(seq 1 60);
do
curl --fail --output /dev/null \
"https://content-bigquery.googleapis.com/bigquery/v2/projects/$HASURA_BIGQUERY_PROJECT_ID/datasets/$dataset_name/tables?alt=json&key=$HASURA_BIGQUERY_API_KEY" \
-H "Authorization: Bearer $(gcloud auth print-access-token "$HASURA_BIGQUERY_IAM_ACCOUNT" \
--project="$HASURA_BIGQUERY_PROJECT_ID")" \
&& echo "Success" && return 0
echo -n .
sleep 1
done
echo "Failed waiting for bigquery dataset"
exit 1
}
remove_temp_project_with_id_in_file() {
local tmp_id_file="$1"
if [ -f "$tmp_id_file" ]; then
# necessary as $HASURA_BIGQUERY_PROJECT_ID is changed in the subshell
delete_temp_bigquery_project "$(cat "$tmp_id_file")"
rm "$tmp_id_file"
fi
}

View File

@ -974,7 +974,9 @@ test-suite tests-hspec
Harness.Backend.Sqlserver
Harness.Backend.Postgres
Harness.Backend.Citus
Harness.Backend.BigQuery
Harness.Env
Harness.GraphqlEngine
Harness.Http
Harness.State

View File

@ -5,6 +5,7 @@ module Hasura.Backends.BigQuery.Connection
resolveConfigurationInput,
resolveConfigurationInputs,
resolveConfigurationJson,
initConnection,
runBigQuery,
)
where
@ -94,6 +95,11 @@ resolveConfigurationInputs env = \case
FromYamls a -> pure a
FromEnvs v -> filter (not . T.null) . T.splitOn "," <$> MSSQLConn.getEnv env v
initConnection :: MonadIO m => ServiceAccount -> Text -> m BigQueryConnection
initConnection _bqServiceAccount _bqProjectId = do
_bqAccessTokenMVar <- liftIO $ newMVar Nothing -- `runBigQuery` initializes the token
pure BigQueryConnection {..}
getAccessToken :: MonadIO m => ServiceAccount -> m (Either TokenProblem TokenResp)
getAccessToken sa = do
eJwt <- encodeBearerJWT sa ["https://www.googleapis.com/auth/cloud-platform"]
@ -163,13 +169,13 @@ getAccessToken sa = do
]
-- | Get a usable token. If the token has expired refresh it.
getUsableToken :: MonadIO m => BigQuerySourceConfig -> m (Either TokenProblem TokenResp)
getUsableToken BigQuerySourceConfig {_scServiceAccount, _scAccessTokenMVar} =
getUsableToken :: MonadIO m => BigQueryConnection -> m (Either TokenProblem TokenResp)
getUsableToken BigQueryConnection {_bqServiceAccount, _bqAccessTokenMVar} =
liftIO $
modifyMVar _scAccessTokenMVar $ \mTokenResp -> do
modifyMVar _bqAccessTokenMVar $ \mTokenResp -> do
case mTokenResp of
Nothing -> do
refreshedToken <- getAccessToken _scServiceAccount
refreshedToken <- getAccessToken _bqServiceAccount
case refreshedToken of
Left e -> pure (Nothing, Left e)
Right t -> pure (Just t, Right t)
@ -177,7 +183,7 @@ getUsableToken BigQuerySourceConfig {_scServiceAccount, _scAccessTokenMVar} =
pt <- liftIO $ getPOSIXTime
if (pt >= fromIntegral _trExpiresAt - (10 :: NominalDiffTime)) -- when posix-time is greater than expires-at-minus-threshold
then do
refreshedToken' <- getAccessToken _scServiceAccount
refreshedToken' <- getAccessToken _bqServiceAccount
case refreshedToken' of
Left e -> pure (Just t, Left e)
Right t' -> pure (Just t', Right t')
@ -189,11 +195,11 @@ data BigQueryProblem
runBigQuery ::
(MonadIO m) =>
BigQuerySourceConfig ->
BigQueryConnection ->
Request ->
m (Either BigQueryProblem (Response BL.ByteString))
runBigQuery sc req = do
eToken <- getUsableToken sc
runBigQuery conn req = do
eToken <- getUsableToken conn
case eToken of
Left e -> pure . Left . TokenProblem $ e
Right TokenResp {_trAccessToken, _trExpiresAt} -> do

View File

@ -27,7 +27,6 @@ import Data.Text.Lazy qualified as LT
import Data.Vector qualified as V
import Hasura.Backends.BigQuery.Execute qualified as Execute
import Hasura.Backends.BigQuery.Source (BigQuerySourceConfig (..))
import Hasura.Backends.BigQuery.Types qualified as BigQuery
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.Prelude
@ -76,8 +75,8 @@ runSQL_ f (BigQueryRunSQL query source) = do
sourceConfig <- askSourceConfig @'BigQuery source
result <-
Execute.streamBigQuery
sourceConfig
Execute.BigQuery {query = LT.fromStrict query, parameters = mempty, cardinality = BigQuery.Many}
(_scConnection sourceConfig)
Execute.BigQuery {query = LT.fromStrict query, parameters = mempty}
case result of
Left queryError -> throw400 BigQueryError (tshow queryError) -- TODO: Pretty print the error type.
Right recordSet ->
@ -95,7 +94,7 @@ recordSetAsHeaderAndRows Execute.RecordSet {rows} = J.toJSON (thead : tbody)
Just row ->
map (J.toJSON . (coerce :: Execute.FieldNameText -> Text)) (OMap.keys row)
tbody :: [[J.Value]]
tbody = map (\row -> map J.toJSON (OMap.elems row)) (toList rows)
tbody = map (map J.toJSON . OMap.elems) (toList rows)
recordSetAsSchema :: Execute.RecordSet -> J.Value
recordSetAsSchema rs@(Execute.RecordSet {rows}) =

View File

@ -7,7 +7,6 @@ module Hasura.Backends.BigQuery.DDL.Source
)
where
import Control.Concurrent.MVar (newMVar)
import Data.Aeson qualified as J
import Data.ByteString.Lazy qualified as L
import Data.Environment qualified as Env
@ -42,9 +41,10 @@ resolveSourceConfig _name BigQueryConnSourceConfig {..} env = runExceptT $ do
eSA <- resolveConfigurationJson env _cscServiceAccount
case eSA of
Left e -> throw400 Unexpected $ T.pack e
Right _scServiceAccount -> do
Right serviceAccount -> do
projectId <- resolveConfigurationInput env _cscProjectId
_scConnection <- initConnection serviceAccount projectId
_scDatasets <- resolveConfigurationInputs env _cscDatasets
_scProjectId <- resolveConfigurationInput env _cscProjectId
_scGlobalSelectLimit <-
resolveConfigurationInput env `mapM` _cscGlobalSelectLimit >>= \case
Nothing -> pure defaultGlobalSelectLimit
@ -57,12 +57,7 @@ resolveSourceConfig _name BigQueryConnSourceConfig {..} env = runExceptT $ do
Just i' -> do
when (i' < 0) $ throw400 Unexpected "Need the integer for the global select limit to be non-negative"
pure i'
trMVar <- liftIO $ newMVar Nothing -- `runBigQuery` initializes the token
pure
BigQuerySourceConfig
{ _scAccessTokenMVar = trMVar,
..
}
pure BigQuerySourceConfig {..}
resolveSource ::
(MonadIO m) =>

View File

@ -7,6 +7,7 @@ module Hasura.Backends.BigQuery.Execute
( executeSelect,
runExecute,
streamBigQuery,
executeBigQuery,
BigQuery (..),
OutputValue (..),
RecordSet (..),
@ -101,7 +102,7 @@ instance Aeson.ToJSON OutputValue where
RecordOutputValue !record -> Aeson.toJSON record
data ExecuteReader = ExecuteReader
{ credentials :: !BigQuerySourceConfig
{ sourceConfig :: !BigQuerySourceConfig
}
data ExecuteProblem
@ -148,8 +149,7 @@ data BigQueryType
data BigQuery = BigQuery
{ query :: !LT.Text,
parameters :: !(InsOrdHashMap ParameterName Parameter),
cardinality :: BigQuery.Cardinality
parameters :: !(InsOrdHashMap ParameterName Parameter)
}
deriving (Show)
@ -211,20 +211,20 @@ runExecute ::
BigQuerySourceConfig ->
Execute RecordSet ->
m (Either ExecuteProblem RecordSet)
runExecute credentials m =
runExecute sourceConfig m =
liftIO
( runExceptT
( runReaderT
(unExecute (m >>= getFinalRecordSet))
(ExecuteReader {credentials})
(ExecuteReader {sourceConfig})
)
)
executeSelect :: Select -> Execute RecordSet
executeSelect select = do
credentials <- asks credentials
conn <- asks (_scConnection . sourceConfig)
recordSet <-
streamBigQuery credentials (selectToBigQuery select) >>= liftEither
streamBigQuery conn (selectToBigQuery select) >>= liftEither
pure recordSet {wantedFields = selectFinalWantedFields select}
-- | This is needed to strip out unneeded fields (join keys) in the
@ -263,8 +263,7 @@ selectToBigQuery select =
)
)
(OMap.toList params)
),
cardinality = selectCardinality select
)
}
where
(query, params) =
@ -348,14 +347,14 @@ valueToBigQueryJson = go
-- response. Until that test has been done, we should consider this a
-- preliminary implementation.
streamBigQuery ::
MonadIO m => BigQuerySourceConfig -> BigQuery -> m (Either ExecuteProblem RecordSet)
streamBigQuery credentials bigquery = do
jobResult <- createQueryJob credentials bigquery
MonadIO m => BigQueryConnection -> BigQuery -> m (Either ExecuteProblem RecordSet)
streamBigQuery conn bigquery = do
jobResult <- createQueryJob conn bigquery
case jobResult of
Right job -> loop Nothing Nothing
where
loop pageToken mrecordSet = do
results <- getJobResults credentials job Fetch {pageToken}
results <- getJobResults conn job Fetch {pageToken}
case results of
Left problem -> pure (Left problem)
Right
@ -379,6 +378,23 @@ streamBigQuery credentials bigquery = do
loop pageToken mrecordSet
Left e -> pure (Left e)
-- | Execute a query without expecting any output (e.g. CREATE TABLE or INSERT)
executeBigQuery :: MonadIO m => BigQueryConnection -> BigQuery -> m (Either ExecuteProblem ())
executeBigQuery conn bigquery = do
jobResult <- createQueryJob conn bigquery
case jobResult of
Right job -> loop Nothing
where
loop mrecordSet = do
results <- getJobResults conn job Fetch {pageToken = Nothing}
case results of
Left problem -> pure (Left problem)
Right (JobComplete _) -> pure (Right ())
Right JobIncomplete {} -> do
liftIO (threadDelay (1000 * 1000 * streamDelaySeconds))
loop mrecordSet
Left e -> pure (Left e)
--------------------------------------------------------------------------------
-- Querying results from a job
@ -434,17 +450,17 @@ data Fetch = Fetch
-- | Get results of a job.
getJobResults ::
MonadIO m =>
BigQuerySourceConfig ->
BigQueryConnection ->
Job ->
Fetch ->
m (Either ExecuteProblem JobResultsResponse)
getJobResults sc@BigQuerySourceConfig {..} Job {jobId, location} Fetch {pageToken} =
getJobResults conn Job {jobId, location} Fetch {pageToken} =
liftIO (catchAny run (pure . Left . GetJobResultsProblem))
where
-- https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs/get#query-parameters
url =
"GET https://bigquery.googleapis.com/bigquery/v2/projects/"
<> T.unpack _scProjectId
<> T.unpack (_bqProjectId conn)
<> "/queries/"
<> T.unpack jobId
<> "?alt=json&prettyPrint=false"
@ -456,7 +472,7 @@ getJobResults sc@BigQuerySourceConfig {..} Job {jobId, location} Fetch {pageToke
let req =
setRequestHeader "Content-Type" ["application/json"] $
parseRequest_ url
eResp <- runBigQuery sc req
eResp <- runBigQuery conn req
case eResp of
Left e -> pure (Left (ExecuteRunBigQueryProblem e))
Right resp ->
@ -505,8 +521,8 @@ instance Aeson.FromJSON Job where
)
-- | Create a job asynchronously.
createQueryJob :: MonadIO m => BigQuerySourceConfig -> BigQuery -> m (Either ExecuteProblem Job)
createQueryJob sc@BigQuerySourceConfig {..} BigQuery {..} =
createQueryJob :: MonadIO m => BigQueryConnection -> BigQuery -> m (Either ExecuteProblem Job)
createQueryJob conn BigQuery {..} =
liftIO
( do
-- putStrLn (LT.unpack query)
@ -516,13 +532,13 @@ createQueryJob sc@BigQuerySourceConfig {..} BigQuery {..} =
run = do
let url =
"POST https://content-bigquery.googleapis.com/bigquery/v2/projects/"
<> T.unpack _scProjectId
<> T.unpack (_bqProjectId conn)
<> "/jobs?alt=json&prettyPrint=false"
let req =
setRequestHeader "Content-Type" ["application/json"] $
setRequestBodyLBS body $
parseRequest_ url
eResp <- runBigQuery sc req
eResp <- runBigQuery conn req
case eResp of
Left e -> pure (Left (ExecuteRunBigQueryProblem e))
Right resp ->

View File

@ -181,17 +181,17 @@ getTables ::
MonadIO m =>
BigQuerySourceConfig ->
m (Either RestProblem [RestTable])
getTables sc@BigQuerySourceConfig {..} =
getTables BigQuerySourceConfig {..} =
runExceptT
(fmap concat (traverse (ExceptT . getTablesForDataSet sc) _scDatasets))
(fmap concat (traverse (ExceptT . getTablesForDataSet _scConnection) _scDatasets))
-- | Get tables in the dataset.
getTablesForDataSet ::
MonadIO m =>
BigQuerySourceConfig ->
BigQueryConnection ->
Text ->
m (Either RestProblem [RestTable])
getTablesForDataSet sc@BigQuerySourceConfig {..} dataSet = do
getTablesForDataSet conn dataSet = do
result <-
liftIO (catchAny (run Nothing mempty) (pure . Left . GetTablesProblem))
case result of
@ -201,7 +201,7 @@ getTablesForDataSet sc@BigQuerySourceConfig {..} dataSet = do
sequence
( traverse
( \RestTableBrief {tableReference = RestTableReference {tableId}} ->
getTable sc dataSet tableId
getTable conn dataSet tableId
)
briefs
)
@ -210,7 +210,7 @@ getTablesForDataSet sc@BigQuerySourceConfig {..} dataSet = do
let req =
setRequestHeader "Content-Type" ["application/json"] $
parseRequest_ url
eResp <- runBigQuery sc req
eResp <- runBigQuery conn req
case eResp of
Left e -> pure (Left (GetTablesBigQueryProblem e))
Right resp ->
@ -226,7 +226,7 @@ getTablesForDataSet sc@BigQuerySourceConfig {..} dataSet = do
where
url =
"GET https://bigquery.googleapis.com/bigquery/v2/projects/"
<> T.unpack _scProjectId
<> T.unpack (_bqProjectId conn)
<> "/datasets/"
<> T.unpack dataSet
<> "/tables?alt=json&"
@ -241,18 +241,18 @@ getTablesForDataSet sc@BigQuerySourceConfig {..} dataSet = do
-- | Get tables in the schema.
getTable ::
MonadIO m =>
BigQuerySourceConfig ->
BigQueryConnection ->
Text ->
Text ->
m (Either RestProblem RestTable)
getTable sc@BigQuerySourceConfig {..} dataSet tableId = do
getTable conn dataSet tableId = do
liftIO (catchAny run (pure . Left . GetTableProblem))
where
run = do
let req =
setRequestHeader "Content-Type" ["application/json"] $
parseRequest_ url
eResp <- runBigQuery sc req
eResp <- runBigQuery conn req
case eResp of
Left e -> pure (Left (GetTablesBigQueryProblem e))
Right resp ->
@ -265,7 +265,7 @@ getTable sc@BigQuerySourceConfig {..} dataSet tableId = do
where
url =
"GET https://bigquery.googleapis.com/bigquery/v2/projects/"
<> T.unpack _scProjectId
<> T.unpack (_bqProjectId conn)
<> "/datasets/"
<> T.unpack dataSet
<> "/tables/"

View File

@ -4,6 +4,7 @@
module Hasura.Backends.BigQuery.Source
( BigQueryConnSourceConfig (..),
BigQueryConnection (..),
BigQuerySourceConfig (..),
ConfigurationInput (..),
ConfigurationInputs (..),
@ -156,11 +157,16 @@ deriving instance Hashable BigQueryConnSourceConfig
instance Cacheable BigQueryConnSourceConfig where
unchanged _ = (==)
data BigQueryConnection = BigQueryConnection
{ _bqServiceAccount :: !ServiceAccount,
_bqProjectId :: !Text, -- this is part of service-account.json, but we put it here on purpose
_bqAccessTokenMVar :: !(MVar (Maybe TokenResp))
}
deriving (Eq)
data BigQuerySourceConfig = BigQuerySourceConfig
{ _scServiceAccount :: !ServiceAccount,
{ _scConnection :: !BigQueryConnection,
_scDatasets :: ![Text],
_scProjectId :: !Text, -- this is part of service-account.json, but we put it here on purpose
_scAccessTokenMVar :: !(MVar (Maybe TokenResp)),
_scGlobalSelectLimit :: !Int.Int64
}
deriving (Eq)
@ -171,8 +177,8 @@ instance Cacheable BigQuerySourceConfig where
instance J.ToJSON BigQuerySourceConfig where
toJSON BigQuerySourceConfig {..} =
J.object
[ "service_account" J..= _scServiceAccount,
[ "service_account" J..= _bqServiceAccount _scConnection,
"datasets" J..= _scDatasets,
"project_id" J..= _scProjectId,
"project_id" J..= _bqProjectId _scConnection,
"global_select_limit" J..= _scGlobalSelectLimit
]

View File

@ -0,0 +1,47 @@
{-# OPTIONS -Wno-redundant-constraints #-}
-- | BigQuery helpers.
module Harness.Backend.BigQuery
( run_,
getServiceAccount,
getProjectId,
)
where
import Control.Exception
import Data.String
import GHC.Stack
import Harness.Constants as Constants
import Harness.Env
import Hasura.Backends.BigQuery.Connection (initConnection)
import Hasura.Backends.BigQuery.Execute (BigQuery (..), executeBigQuery)
import Hasura.Backends.BigQuery.Source (ServiceAccount)
import Hasura.Prelude
getServiceAccount :: (HasCallStack) => IO ServiceAccount
getServiceAccount = getEnvJSON Constants.bigqueryServiceAccountVar
getProjectId :: (HasCallStack) => IO Text
getProjectId = getEnvString Constants.bigqueryProjectIdVar
-- | Run a plain Standard SQL string against the server, ignore the
-- result. Just checks for errors.
run_ :: (HasCallStack) => ServiceAccount -> Text -> String -> IO ()
run_ serviceAccount projectId query =
handle (\(e :: SomeException) -> bigQueryError e query) $ do
conn <- initConnection serviceAccount projectId
res <- executeBigQuery conn BigQuery {query = fromString query, parameters = mempty}
case res of
Left err -> bigQueryError err query
Right () -> pure ()
bigQueryError :: (Show e, HasCallStack) => e -> String -> IO ()
bigQueryError e query =
error
( unlines
[ "BigQuery query error:",
show e,
"SQL was:",
query
]
)

View File

@ -24,6 +24,8 @@ module Harness.Constants
sqlserverLivenessCheckIntervalSeconds,
sqlserverLivenessCheckIntervalMicroseconds,
sqlserverConnectInfo,
bigqueryServiceAccountVar,
bigqueryProjectIdVar,
httpHealthCheckAttempts,
httpHealthCheckIntervalSeconds,
httpHealthCheckIntervalMicroseconds,
@ -167,6 +169,12 @@ mysqlConnectInfo =
Mysql.connectPort = mysqlPort
}
bigqueryServiceAccountVar :: String
bigqueryServiceAccountVar = "HASURA_BIGQUERY_SERVICE_ACCOUNT"
bigqueryProjectIdVar :: String
bigqueryProjectIdVar = "HASURA_BIGQUERY_PROJECT_ID"
-- * HTTP health checks
httpHealthCheckAttempts :: Int

View File

@ -0,0 +1,32 @@
{-# OPTIONS -Wno-redundant-constraints #-}
module Harness.Env (getEnvRead, getEnvJSON, getEnvString) where
import Data.Aeson qualified as Aeson
import Data.String
import GHC.Stack
import Hasura.Prelude
import System.Environment (getEnv)
getEnvRead :: (Read a, HasCallStack) => String -> IO a
getEnvRead var = do
str <- getEnv var
onNothing
(readMaybe str)
( error
( unlines
["Failure parsing " <> var, " containing value " <> show str]
)
)
getEnvString :: (IsString a, HasCallStack) => String -> IO a
getEnvString var = fromString <$> getEnv var
getEnvJSON :: (Aeson.FromJSON a, HasCallStack) => String -> IO a
getEnvJSON var = do
accountString <- getEnv var
onLeft
(Aeson.eitherDecode' (fromString accountString))
( \err ->
error (unlines ["Failure parsing " <> var <> ":", show err])
)

View File

@ -16,12 +16,17 @@ import Control.Monad.Reader
import Control.Monad.Trans.Resource
import Data.Aeson
import Data.Aeson qualified as Aeson
import Data.Aeson.Text (encodeToLazyText)
import Data.ByteString.Char8 qualified as BS8
import Data.Conduit
import Data.Conduit.List qualified as CL
import Data.HashMap.Strict qualified as Map
import Data.Text.Encoding.Error qualified as T
import Data.Text.Lazy qualified as LT
import Data.Vector qualified as V
import Data.Yaml qualified
import Data.Yaml.Internal qualified
import Harness.Test.Feature (BackendOptions (..))
import Instances.TH.Lift ()
import Language.Haskell.TH
import Language.Haskell.TH.Lift as TH
@ -40,20 +45,35 @@ import Prelude
--
-- We use 'Visual' internally to easily display the 'Value' as YAML
-- when the test suite uses its 'Show' instance.
shouldReturnYaml :: IO Value -> Value -> IO ()
shouldReturnYaml actualIO expected = do
shouldReturnYaml :: BackendOptions -> IO Value -> Value -> IO ()
shouldReturnYaml BackendOptions {stringifyNumbers} actualIO expected = do
actual <- actualIO
shouldBe (Visual actual) (Visual expected)
let expected' =
if stringifyNumbers then stringifyExpectedToActual expected actual else expected
shouldBe (Visual actual) (Visual expected')
stringifyExpectedToActual :: Value -> Value -> Value
stringifyExpectedToActual (Number n) (String _) = String (LT.toStrict $ encodeToLazyText n)
stringifyExpectedToActual (Object hm) (Object hm') =
let stringifyKV k v =
case Map.lookup k hm' of
Just v' -> stringifyExpectedToActual v v'
Nothing -> v
in Object (Map.mapWithKey stringifyKV hm)
stringifyExpectedToActual (Array as) (Array bs) = Array (V.zipWith stringifyExpectedToActual as bs)
stringifyExpectedToActual expected _ = expected
-- | The action @actualIO@ should produce the @expected@ YAML,
-- represented (by the yaml package) as an aeson 'Value'.
--
-- We use 'Visual' internally to easily display the 'Value' as YAML
-- when the test suite uses its 'Show' instance.
shouldReturnOneOfYaml :: IO Value -> [Value] -> IO ()
shouldReturnOneOfYaml actualIO expected = do
shouldReturnOneOfYaml :: BackendOptions -> IO Value -> [Value] -> IO ()
shouldReturnOneOfYaml BackendOptions {stringifyNumbers} actualIO expecteds = do
actual <- actualIO
shouldContain (map Visual expected) [Visual actual]
let fixNumbers expected =
if stringifyNumbers then stringifyExpectedToActual expected actual else expected
shouldContain (map (Visual . fixNumbers) expecteds) [Visual actual]
-------------------------------------------------------------------

View File

@ -3,6 +3,8 @@ module Harness.Test.Feature
( feature,
Feature (..),
Backend (..),
BackendOptions (..),
defaultBackendOptions,
)
where
@ -15,7 +17,7 @@ import Prelude
-- | Use this record to put together a test against a set of backends.
data Feature = Feature
{ backends :: [Backend],
tests :: SpecWith State
tests :: BackendOptions -> SpecWith State
}
-- | A backend specification.
@ -27,16 +29,26 @@ data Backend = Backend
-- tables calls.
setup :: State -> IO (),
-- | Clean up any resources you created in 'setup'.
teardown :: State -> IO ()
teardown :: State -> IO (),
-- | Backend-specific details which should be taken into account in tests
backendOptions :: BackendOptions
}
data BackendOptions = BackendOptions
{ -- | Defines whether numeric values for the particular backend output as strings
stringifyNumbers :: Bool
}
defaultBackendOptions :: BackendOptions
defaultBackendOptions = BackendOptions {stringifyNumbers = False}
-- | Test the feature, running the setup before any tests are run, and
-- and ensuring teardown happens after all tests are run.
feature :: Feature -> SpecWith State
feature Feature {backends, tests} =
for_
backends
( \Backend {name, setup, teardown} ->
( \Backend {name, setup, teardown, backendOptions} ->
describe
name
( aroundAllWith
@ -48,7 +60,7 @@ feature Feature {backends, tests} =
)
(teardown state)
)
tests
(tests backendOptions)
)
)

View File

@ -25,7 +25,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -141,10 +142,11 @@ DROP TABLE author;
--------------------------------------------------------------------------------
-- Tests
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Select an author and one of their articles" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -4,6 +4,7 @@
-- | Test querying an entity for a couple fields.
module Test.BasicFieldsSpec (spec) where
import Harness.Backend.BigQuery as BigQuery
import Harness.Backend.Citus as Citus
import Harness.Backend.Mysql as Mysql
import Harness.Backend.Postgres as Postgres
@ -28,22 +29,35 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
},
Feature.Backend
{ name = "PostgreSQL",
setup = postgresSetup,
teardown = postgresTeardown
teardown = postgresTeardown,
backendOptions = Feature.defaultBackendOptions
},
Feature.Backend
{ name = "Citus",
setup = citusSetup,
teardown = citusTeardown
teardown = citusTeardown,
backendOptions = Feature.defaultBackendOptions
},
Feature.Backend
{ name = "SQLServer",
setup = sqlserverSetup,
teardown = sqlserverTeardown
teardown = sqlserverTeardown,
backendOptions = Feature.defaultBackendOptions
},
Feature.Backend
{ name = "BigQuery",
setup = bigquerySetup,
teardown = bigqueryTeardown,
backendOptions =
Feature.BackendOptions
{ stringifyNumbers = True
}
}
],
Feature.tests = tests
@ -233,18 +247,90 @@ sqlserverTeardown _ = do
DROP TABLE hasura.author;
|]
--------------------------------------------------------------------------------
-- BigQuery backend
bigquerySetup :: State -> IO ()
bigquerySetup state = do
-- Clear and reconfigure the metadata
serviceAccount <- BigQuery.getServiceAccount
projectId <- BigQuery.getProjectId
GraphqlEngine.post_
state
"/v1/metadata"
[yaml|
type: replace_metadata
args:
version: 3
sources:
- name: bigquery
kind: bigquery
tables: []
configuration:
service_account: *serviceAccount
project_id: *projectId
datasets: [hasura]
|]
-- Setup tables
BigQuery.run_
serviceAccount
projectId
[sql|
CREATE TABLE hasura.author
(
id INT64,
name STRING
);
|]
BigQuery.run_
serviceAccount
projectId
[sql|
INSERT INTO hasura.author
(id, name)
VALUES
(1, 'Author 1'),
(2, 'Author 2');
|]
-- Track the tables
GraphqlEngine.post_
state
"/v1/metadata"
[yaml|
type: bigquery_track_table
args:
source: bigquery
table:
dataset: hasura
name: author
|]
bigqueryTeardown :: State -> IO ()
bigqueryTeardown _ = do
serviceAccount <- BigQuery.getServiceAccount
projectId <- BigQuery.getProjectId
BigQuery.run_
serviceAccount
projectId
[sql|
DROP TABLE hasura.author;
|]
--------------------------------------------------------------------------------
-- Tests
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Author fields" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
query {
hasura_author {
hasura_author(order_by:[{id:asc}]) {
name
id
}
@ -261,6 +347,7 @@ data:
|]
it "Use operationName" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphqlYaml
state
[yaml|
@ -272,7 +359,7 @@ query: |
}
}
query chooseThisOne {
hasura_author {
hasura_author(order_by:[{id:asc}]) {
id
name
}
@ -289,6 +376,7 @@ data:
|]
it "Missing field" $ \state -> do
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -311,6 +399,7 @@ errors:
|]
it "Missing table" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -26,7 +26,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -97,10 +98,11 @@ query QueryParams {includeId, skipId} =
}
|]
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Skip id field conditionally" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
(query QueryParams {includeId = False, skipId = False})
@ -114,6 +116,7 @@ data:
it "Skip id field conditionally, includeId=true" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
(query QueryParams {includeId = True, skipId = False})
@ -129,6 +132,7 @@ data:
it "Skip id field conditionally, skipId=true" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
(query QueryParams {includeId = False, skipId = True})
@ -142,6 +146,7 @@ data:
it "Skip id field conditionally, skipId=true, includeId=true" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
(query QueryParams {includeId = True, skipId = True})
@ -157,6 +162,7 @@ data:
it "Author with skip id" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphqlYaml
state
[yaml|
@ -180,6 +186,7 @@ data:
|]
it "Author with skip name" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphqlYaml
state
[yaml|
@ -205,6 +212,7 @@ data:
-- These three come from <https://github.com/hasura/graphql-engine-mono/blob/5f6f862e5f6b67d82cfa59568edfc4f08b920375/server/tests-py/queries/graphql_query/mysql/select_query_author_with_wrong_directive_err.yaml#L1>
it "Rejects unknown directives" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphqlYaml
state
[yaml|
@ -226,6 +234,7 @@ errors:
|]
it "Rejects duplicate directives" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphqlYaml
state
[yaml|
@ -247,6 +256,7 @@ errors:
|]
it "Rejects directives on wrong element" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphqlYaml
state
[yaml|

View File

@ -14,7 +14,7 @@ spec =
Feature.feature
Feature.Feature
{ Feature.backends = [],
Feature.tests = tests
Feature.tests = \_ -> tests
}
--------------------------------------------------------------------------------

View File

@ -25,7 +25,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -82,10 +83,11 @@ DROP TABLE author;
--------------------------------------------------------------------------------
-- Tests
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "limit 1" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -110,6 +112,7 @@ data:
-- on ordering with tests like this.
it "Basic offset query" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -137,6 +140,7 @@ data:
-- We use ordering here, which yields a stable result.
it "order descending, offset 2, limit 1" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -27,7 +27,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -157,10 +158,11 @@ DROP TABLE author;
--------------------------------------------------------------------------------
-- Tests
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Nested select on article" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -28,7 +28,8 @@ spec =
[ Feature.Backend
{ name = "Postgres",
setup = postgresSetup,
teardown = postgresTeardown
teardown = postgresTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -44,10 +45,11 @@ spec =
--
-- Because of that, we use 'shouldReturnOneOfYaml' and list all of the possible (valid)
-- expected results.
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Query by id" $ \state ->
shouldReturnOneOfYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -79,6 +81,7 @@ data:
it "Query limit 2" $ \state ->
shouldReturnOneOfYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -116,6 +119,7 @@ data:
it "where author name" $ \state ->
shouldReturnOneOfYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -147,6 +151,7 @@ data:
it "order by author id" $ \state ->
shouldReturnOneOfYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -184,6 +189,7 @@ data:
it "count articles" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -25,7 +25,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -131,10 +132,11 @@ DROP TABLE author;
--------------------------------------------------------------------------------
-- Tests
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Author of article where id=1" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -159,6 +161,7 @@ data:
-- originally from <https://github.com/hasura/graphql-engine-mono/blob/cf64da26e818ca0e4ec39667296c67021bc03c2a/server/tests-py/queries/graphql_query/mysql/select_query_author_quoted_col.yaml>
it "Simple GraphQL object query on author" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -25,7 +25,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -84,10 +85,11 @@ DROP TABLE author;
-- That includes order by {text,id} {desc,asc}
--
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Order by id ascending" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -110,6 +112,7 @@ data:
it "Order by id descending" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -132,6 +135,7 @@ data:
it "Order by name ascending" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|
@ -154,6 +158,7 @@ data:
it "Order by name descending" $ \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -25,7 +25,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -105,10 +106,11 @@ DROP TABLE author;
--------------------------------------------------------------------------------
-- Tests
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Query that a view works properly" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -25,7 +25,8 @@ spec =
[ Feature.Backend
{ name = "MySQL",
setup = mysqlSetup,
teardown = mysqlTeardown
teardown = mysqlTeardown,
backendOptions = Feature.defaultBackendOptions
}
],
Feature.tests = tests
@ -80,10 +81,11 @@ DROP TABLE author;
--------------------------------------------------------------------------------
-- Tests
tests :: SpecWith State
tests = do
tests :: Feature.BackendOptions -> SpecWith State
tests opts = do
it "Where id=1" \state ->
shouldReturnYaml
opts
( GraphqlEngine.postGraphql
state
[graphql|

View File

@ -4,8 +4,6 @@ args:
args:
source: bigquery
sql: |
DROP SCHEMA IF EXISTS `regency-polecat-beehive.hasura_test` CASCADE;
CREATE SCHEMA `regency-polecat-beehive.hasura_test`;
CREATE TABLE `hasura_test.all_types` (
`string` STRING,
`bytes` BYTES,