mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-18 04:51:35 +03:00
3d5fb984b0
## Description There is a bug in the metadata defaults code, see [the original PR](https://github.com/hasura/graphql-engine-mono/pull/6286). Steps to reproduce this issue: * Start a new HGE project * Start HGE with a defaults argument: `HASURA_GRAPHQL_LOG_LEVEL=debug cabal run exe:graphql-engine -- serve --enable-console --console-assets-dir=./console/static/dist --metadata-defaults='{"backend_configs": {"dataconnector": {"mongo": {"display_name": "BONGOBB", "uri": "http://localhost:8123"}}}}'` * Add a source (doesn't need to be related to the defaults) * Export metadata * See that the defaults are present in the exported metadata ## Related Issues * Github Issue: https://github.com/hasura/graphql-engine/issues/9237 * Jira: https://hasurahq.atlassian.net/browse/GDC-647 * Original PR: https://github.com/hasura/graphql-engine-mono/pull/6286 ## Solution * The test for if defaults should be included for metadata api operations has been extended to check for updates * Metadata inconsistencies have been hidden for `/capabilities` calls on startup ## TODO * [x] Fix bug * [x] Write tests * [x] OSS Metadata Migration to correct persisted data - `server/src-rsr/migrations/47_to_48.sql` * [x] Cloud Metadata Migration - `pro/server/res/cloud/migrations/6_to_7.sql` * [x] Bump Catalog Version - `server/src-rsr/catalog_version.txt` * [x] Update Catalog Versions - `server/src-rsr/catalog_versions.txt` (This will be done by Infra when creating a release) * [x] Log connection error as it occurs *(Already being logged. Requires `--enabled-log-types startup,webhook-log,websocket-log,http-log,data-connector-log`) * [x] Don't mark metadata inconsistencies for this call. ## Questions * [ ] Does the `pro/server/res/cloud/migrations/6_to_7.sql` cover the cloud scenarios? * [ ] Should we have `SET search_path` in migrations? * [x] What should be in `server/src-rsr/catalog_versions.txt`? ## Testing To test the solution locally run: > docker compose up -d and > cabal run -- exe:api-tests --skip BigQuery --skip SQLServer --skip '/Test.API.Explain/Postgres/' ## Solution In `runMetadataQuery` in `server/src-lib/Hasura/Server/API/Metadata.hs`: ```diff - if (exportsMetadata _rqlMetadata) + if (exportsMetadata _rqlMetadata || queryModifiesMetadata _rqlMetadata) ``` This ensures that defaults aren't present in operations that serialise metadata. Note: You might think that `X_add_source` would need the defaults to be present to add a source that references the defaults, but since the resolution occurs in the schema-cache building phase, the defaults can be excluded for the metadata modifications required for `X_add_source`. In addition to the code-change, a metadata migration has been introduced in order to clean up serialised defaults. The following scenarios need to be considered for both OSS and Cloud: * The user has not had defaults serialised * The user has had the defaults serialised and no other backends configured * The user has had the defaults serialised and has also configured other backends We want to remove as much of the metadata as possible without any user-specified data and this should be reflected in migration `server/src-rsr/migrations/47_to_48.sql`. ## Server checklist ### Catalog upgrade Does this PR change Hasura Catalog version? - ✅ Yes ### Metadata Does this PR add a new Metadata feature? - ✅ No ### GraphQL - ✅ No new GraphQL schema is generated ### Breaking changes - ✅ No Breaking changes ## Changelog __Component__ : server __Type__: bugfix __Product__: community-edition ### Short Changelog Fixes a metadata defaults serialization bug and introduces a metadata migration to correct data that has been persisted due to the bug. PR-URL: https://github.com/hasura/graphql-engine-mono/pull/7034 GitOrigin-RevId: ad7d4f748397a1a607f2c0c886bf0fbbc3f873f2
80 lines
3.5 KiB
Haskell
80 lines
3.5 KiB
Haskell
{-# LANGUAGE UndecidableInstances #-}
|
|
|
|
module Hasura.Backends.DataConnector.Agent.Client
|
|
( AgentClientContext (..),
|
|
AgentClientT,
|
|
runAgentClientT,
|
|
)
|
|
where
|
|
|
|
import Control.Exception (try)
|
|
import Control.Lens ((&~), (.=))
|
|
import Hasura.Backends.DataConnector.Logging (logAgentRequest, logClientError)
|
|
import Hasura.Base.Error
|
|
import Hasura.HTTP qualified
|
|
import Hasura.Logging (Hasura, Logger)
|
|
import Hasura.Prelude
|
|
import Hasura.Tracing (MonadTrace, tracedHttpRequest)
|
|
import Network.HTTP.Client (Manager)
|
|
import Network.HTTP.Client qualified as HTTP
|
|
import Network.HTTP.Client.Transformable qualified as TransformableHTTP
|
|
import Network.HTTP.Types.Status (Status)
|
|
import Servant.Client
|
|
import Servant.Client.Core (Request, RunClient (..))
|
|
import Servant.Client.Internal.HttpClient (clientResponseToResponse, mkFailureResponse)
|
|
|
|
data AgentClientContext = AgentClientContext
|
|
{ _accLogger :: Logger Hasura,
|
|
_accBaseUrl :: BaseUrl,
|
|
_accHttpManager :: Manager,
|
|
_accResponseTimeout :: Maybe Int
|
|
}
|
|
|
|
newtype AgentClientT m a = AgentClientT (ReaderT AgentClientContext m a)
|
|
deriving newtype (Functor, Applicative, Monad, MonadError e, MonadTrace, MonadIO)
|
|
|
|
runAgentClientT :: AgentClientT m a -> AgentClientContext -> m a
|
|
runAgentClientT (AgentClientT action) ctx = runReaderT action ctx
|
|
|
|
askClientContext :: Monad m => AgentClientT m AgentClientContext
|
|
askClientContext = AgentClientT ask
|
|
|
|
instance (MonadIO m, MonadTrace m, MonadError QErr m) => RunClient (AgentClientT m) where
|
|
runRequestAcceptStatus = runRequestAcceptStatus'
|
|
throwClientError = throwClientError'
|
|
|
|
runRequestAcceptStatus' :: (MonadIO m, MonadTrace m, MonadError QErr m) => Maybe [Status] -> Request -> (AgentClientT m) Response
|
|
runRequestAcceptStatus' acceptStatus req = do
|
|
AgentClientContext {..} <- askClientContext
|
|
let req' = defaultMakeClientRequest _accBaseUrl req
|
|
|
|
transformableReq <-
|
|
TransformableHTTP.tryFromClientRequest req'
|
|
`onLeft` (\err -> throw500 $ "Error in Data Connector backend: Could not create request. " <> err)
|
|
|
|
-- Set the response timeout explicitly if it is provided
|
|
let transformableReq' =
|
|
transformableReq &~ do
|
|
for _accResponseTimeout \x -> TransformableHTTP.timeout .= HTTP.responseTimeoutMicro x
|
|
|
|
(tracedReq, responseOrException) <- tracedHttpRequest transformableReq' (\tracedReq -> fmap (tracedReq,) . liftIO . try @HTTP.HttpException $ TransformableHTTP.performRequest tracedReq _accHttpManager)
|
|
logAgentRequest _accLogger tracedReq responseOrException
|
|
case responseOrException of
|
|
-- throwConnectionError is used here in order to avoid a metadata inconsistency error
|
|
Left ex -> throwConnectionError $ "Error in Data Connector backend: " <> Hasura.HTTP.serializeHTTPExceptionMessage (Hasura.HTTP.HttpException ex)
|
|
Right response -> do
|
|
let status = TransformableHTTP.responseStatus response
|
|
servantResponse = clientResponseToResponse id response
|
|
goodStatus = case acceptStatus of
|
|
Nothing -> TransformableHTTP.statusIsSuccessful status
|
|
Just good -> status `elem` good
|
|
if goodStatus
|
|
then pure $ servantResponse
|
|
else throwClientError $ mkFailureResponse _accBaseUrl req servantResponse
|
|
|
|
throwClientError' :: (MonadIO m, MonadTrace m, MonadError QErr m) => ClientError -> (AgentClientT m) a
|
|
throwClientError' err = do
|
|
AgentClientContext {..} <- askClientContext
|
|
logClientError _accLogger err
|
|
throw500 $ "Error in Data Connector backend: " <> Hasura.HTTP.serializeServantClientErrorMessage err
|