Merge branch 'master' into feature/add-identity-frequent-column

This commit is contained in:
Rikin Kachhia 2020-08-11 18:45:14 +05:30 committed by GitHub
commit 37fe910101
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 212 additions and 71 deletions

View File

@ -25,6 +25,9 @@ If you do have such headers configured, then you must update the header configur
- console: update sidebar icons for different action and trigger types (#5445)
- console: make add column UX consistent with others (#5486)
- console: add "identity" to frequently used columns (close #4279) (#5360)
- cli: improve error messages thrown when metadata apply fails (#5513)
- cli: fix issue with creating seed migrations while using tables with capital letters (closes #5532) (#5549)
- build: introduce additional log kinds for cli-migrations image (#5529)
## `v1.3.0`

View File

@ -70,26 +70,26 @@ func (o *SeedNewOptions) Run() error {
// If we are initializing from a database table
// create a hasura client and add table name opts
if createSeedOpts.Data == nil {
var body []byte
if len(o.FromTableNames) > 0 {
migrateDriver, err := migrate.NewMigrate(ec, true)
if err != nil {
return errors.Wrap(err, "cannot initialize migrate driver")
}
// Send the query
body, err := migrateDriver.ExportDataDump(o.FromTableNames)
body, err = migrateDriver.ExportDataDump(o.FromTableNames)
if err != nil {
return errors.Wrap(err, "exporting seed data")
}
createSeedOpts.Data = bytes.NewReader(body)
} else {
const defaultText = ""
data, err := editor.CaptureInputFromEditor(editor.GetPreferredEditorFromEnvironment, defaultText, "*.sql")
var err error
body, err = editor.CaptureInputFromEditor(editor.GetPreferredEditorFromEnvironment, defaultText, "*.sql")
if err != nil {
return errors.Wrap(err, "cannot find default editor from env")
}
createSeedOpts.Data = bytes.NewReader(data)
}
createSeedOpts.Data = bytes.NewReader(body)
}
fs := afero.NewOsFs()

View File

@ -219,7 +219,7 @@ func (h *HasuraDB) ApplyMetadata() error {
if err != nil {
return err
}
herror.migrationQuery = "offending object: \n\r\n\r" + string(queryData)
h.logger.Debugf("offending object: \n\r\n\r" + string(queryData))
}
}
return herror

View File

@ -289,11 +289,35 @@ type HasuraError struct {
Code string `json:"code"`
}
type InconsistentMetadataError struct {
Definition interface{} `json:"definition,omitempty" mapstructure:"definition,omitempty"`
Reason string `json:"reason,omitempty" mapstructure:"reason,omitempty"`
Type string `json:"type,omitempty" mapstructure:"type,omitempty"`
}
func (mderror *InconsistentMetadataError) String() string {
var out string
if mderror.Reason != "" {
out = fmt.Sprintf("\nreason: %v\n", mderror.Reason)
}
if mderror.Type != "" {
out = fmt.Sprintf("%stype: %v\n", out, mderror.Type)
}
if mderror.Definition != nil {
m, err := json.MarshalIndent(mderror.Definition, "", " ")
if err == nil {
out = fmt.Sprintf("%sdefinition: \n%s", out, string(m))
}
}
return out
}
type SQLInternalError struct {
Arguments []string `json:"arguments" mapstructure:"arguments,omitempty"`
Error PostgresError `json:"error" mapstructure:"error,omitempty"`
Prepared bool `json:"prepared" mapstructure:"prepared,omitempty"`
Statement string `json:"statement" mapstructure:"statement,omitempty"`
Arguments []string `json:"arguments" mapstructure:"arguments,omitempty"`
Error *PostgresError `json:"error" mapstructure:"error,omitempty"`
Prepared bool `json:"prepared" mapstructure:"prepared,omitempty"`
Statement string `json:"statement" mapstructure:"statement,omitempty"`
InconsistentMetadataError `mapstructure:",squash"`
}
type PostgresError struct {
StatusCode string `json:"status_code" mapstructure:"status_code,omitempty"`
@ -323,20 +347,7 @@ func (h HasuraError) Error() string {
err := mapstructure.Decode(v, &internalError)
if err == nil {
// postgres error
errorStrings = append(errorStrings, fmt.Sprintf("[%s] %s: %s", internalError.Error.StatusCode, internalError.Error.ExecStatus, internalError.Error.Message))
if len(internalError.Error.Description) > 0 {
errorStrings = append(errorStrings, fmt.Sprintf("Description: %s", internalError.Error.Description))
}
if len(internalError.Error.Hint) > 0 {
errorStrings = append(errorStrings, fmt.Sprintf("Hint: %s", internalError.Error.Hint))
}
}
}
if v, ok := h.Internal.([]interface{}); ok {
err := mapstructure.Decode(v, &internalErrors)
if err == nil {
for _, internalError := range internalErrors {
// postgres error
if internalError.Error != nil {
errorStrings = append(errorStrings, fmt.Sprintf("[%s] %s: %s", internalError.Error.StatusCode, internalError.Error.ExecStatus, internalError.Error.Message))
if len(internalError.Error.Description) > 0 {
errorStrings = append(errorStrings, fmt.Sprintf("Description: %s", internalError.Error.Description))
@ -345,8 +356,35 @@ func (h HasuraError) Error() string {
errorStrings = append(errorStrings, fmt.Sprintf("Hint: %s", internalError.Error.Hint))
}
}
if e := internalError.InconsistentMetadataError.String(); e != "" {
errorStrings = append(errorStrings, e)
}
}
}
if v, ok := h.Internal.([]interface{}); ok {
err := mapstructure.Decode(v, &internalErrors)
if err == nil {
for _, internalError := range internalErrors {
// postgres error
if internalError.Error != nil {
errorStrings = append(errorStrings, fmt.Sprintf("[%s] %s: %s", internalError.Error.StatusCode, internalError.Error.ExecStatus, internalError.Error.Message))
if len(internalError.Error.Description) > 0 {
errorStrings = append(errorStrings, fmt.Sprintf("Description: %s", internalError.Error.Description))
}
if len(internalError.Error.Hint) > 0 {
errorStrings = append(errorStrings, fmt.Sprintf("Hint: %s", internalError.Error.Hint))
}
}
if e := internalError.InconsistentMetadataError.String(); e != "" {
errorStrings = append(errorStrings, e)
}
}
}
}
if len(errorStrings) == 0 {
return ""
}
return strings.Join(errorStrings, "\r\n")
}

View File

@ -72,3 +72,94 @@ func TestHasuraError_Error(t *testing.T) {
})
}
}
func TestInconsistentMetadataError_String(t *testing.T) {
type fields struct {
Definition interface{}
Reason string
Type string
}
tests := []struct {
name string
fields fields
want string
}{
{
"can generate error correctly when all fields are given",
fields{
Reason: "test reason",
Type: "test",
Definition: func() interface{} {
var m interface{}
err := json.Unmarshal([]byte(`{"test": "test"}`), &m)
if err != nil {
t.Error(err)
}
return m
}(),
},
`
reason: test reason
type: test
definition:
{
"test": "test"
}`,
},
{
"will not panic when Definition is not a valid json (string)",
fields{
Definition: func() interface{} {
return "test"
}(),
Reason: "",
Type: "",
},
`definition:
"test"`,
},
{
"will not panic when Definition is not a valid json (Int)",
fields{
Definition: func() interface{} {
return 1
}(),
Reason: "",
Type: "",
},
`definition:
1`,
},
{
"will not panic when Definition is (struct Array)",
fields{
Definition: func() interface{} {
return []struct{Name string}{ { "test" } , { "test"} }
}(),
Reason: "",
Type: "",
},
`definition:
[
{
"Name": "test"
},
{
"Name": "test"
}
]`,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
mderror := &InconsistentMetadataError{
Definition: tt.fields.Definition,
Reason: tt.fields.Reason,
Type: tt.fields.Type,
}
if got := mderror.String(); got != tt.want {
t.Errorf("String() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -1847,7 +1847,12 @@ func (m *Migrate) ApplySeed(q interface{}) error {
}
func (m *Migrate) ExportDataDump(tableNames []string) ([]byte, error) {
return m.databaseDrv.ExportDataDump(tableNames)
// to support tables starting with capital letters
modifiedTableNames := make([]string, len(tableNames))
for idx, val := range tableNames {
modifiedTableNames[idx] = fmt.Sprintf(`"%s"`, val)
}
return m.databaseDrv.ExportDataDump(modifiedTableNames)
}
func printDryRunStatus(migrations []*Migration) *bytes.Buffer {

View File

@ -4,8 +4,9 @@ set -e
log() {
TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%S.000+0000")
MESSAGE=$1
echo "{\"timestamp\":\"$TIMESTAMP\",\"level\":\"info\",\"type\":\"startup\",\"detail\":{\"kind\":\"migration-apply\",\"info\":\"$MESSAGE\"}}"
LOGKIND=$1
MESSAGE=$2
echo "{\"timestamp\":\"$TIMESTAMP\",\"level\":\"info\",\"type\":\"startup\",\"detail\":{\"kind\":\"$LOGKIND\",\"info\":\"$MESSAGE\"}}"
}
DEFAULT_MIGRATIONS_DIR="/hasura-migrations"
@ -13,40 +14,40 @@ TEMP_MIGRATIONS_DIR="/tmp/hasura-migrations"
# configure the target database for migrations
if [ ${HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR} ]; then
log "database url for migrations is set by $HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR"
log "migrations-startup" "database url for migrations is set by $HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR"
HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL=$(printenv $HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR)
elif [ -z ${HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL+x} ]; then
HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL=$HASURA_GRAPHQL_DATABASE_URL
fi
log "database url for migrations is set by HASURA_GRAPHQL_DATABASE_URL"
log "migrations-startup" "database url for migrations is set by HASURA_GRAPHQL_DATABASE_URL"
# Use 9691 port for running temporary instance.
# In case 9691 is occupied (according to docker networking), then this will fail.
# override with another port in that case
# TODO: Find a proper random port
if [ -z ${HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT+x} ]; then
log "migrations server port env var is not set, defaulting to 9691"
log "migrations-startup" "migrations server port env var is not set, defaulting to 9691"
HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT=9691
fi
if [ -z ${HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT+x} ]; then
log "server timeout is not set, defaulting to 30 seconds"
log "migrations-startup" "server timeout is not set, defaulting to 30 seconds"
HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT=30
fi
# wait for a port to be ready
wait_for_port() {
local PORT=$1
log "waiting $HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT for $PORT to be ready"
log "migrations-startup" "waiting $HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT for $PORT to be ready"
for i in `seq 1 $HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT`;
do
nc -z localhost $PORT > /dev/null 2>&1 && log "port $PORT is ready" && return
nc -z localhost $PORT > /dev/null 2>&1 && log "migrations-startup" "port $PORT is ready" && return
sleep 1
done
log "failed waiting for $PORT" && exit 1
log "migrations-startup" "failed waiting for $PORT, try increasing HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT (default: 30)" && exit 1
}
log "starting graphql engine temporarily on port $HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT"
log "migrations-startup" "starting graphql engine temporarily on port $HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT"
# start graphql engine with metadata api enabled
graphql-engine --database-url "$HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL" \
@ -61,13 +62,13 @@ wait_for_port $HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT
# check if migration directory is set, default otherwise
log "checking for migrations directory"
if [ -z ${HASURA_GRAPHQL_MIGRATIONS_DIR+x} ]; then
log "env var HASURA_GRAPHQL_MIGRATIONS_DIR is not set, defaulting to $DEFAULT_MIGRATIONS_DIR"
log "migrations-startup" "env var HASURA_GRAPHQL_MIGRATIONS_DIR is not set, defaulting to $DEFAULT_MIGRATIONS_DIR"
HASURA_GRAPHQL_MIGRATIONS_DIR="$DEFAULT_MIGRATIONS_DIR"
fi
# apply migrations if the directory exist
if [ -d "$HASURA_GRAPHQL_MIGRATIONS_DIR" ]; then
log "applying migrations from $HASURA_GRAPHQL_MIGRATIONS_DIR"
log "migrations-apply" "applying migrations from $HASURA_GRAPHQL_MIGRATIONS_DIR"
mkdir -p "$TEMP_MIGRATIONS_DIR"
cp -a "$HASURA_GRAPHQL_MIGRATIONS_DIR/." "$TEMP_MIGRATIONS_DIR/migrations/"
cd "$TEMP_MIGRATIONS_DIR"
@ -75,20 +76,20 @@ if [ -d "$HASURA_GRAPHQL_MIGRATIONS_DIR" ]; then
hasura-cli migrate apply
# check if metadata.[yaml|json] exist and apply
if [ -f migrations/metadata.yaml ]; then
log "applying metadata from $HASURA_GRAPHQL_MIGRATIONS_DIR/metadata.yaml"
log "migrations-apply" "applying metadata from $HASURA_GRAPHQL_MIGRATIONS_DIR/metadata.yaml"
hasura-cli metadata apply
elif [ -f migrations/metadata.json ]; then
log "applying metadata from $HASURA_GRAPHQL_MIGRATIONS_DIR/metadata.json"
log "migrations-apply" "applying metadata from $HASURA_GRAPHQL_MIGRATIONS_DIR/metadata.json"
hasura-cli metadata apply
fi
else
log "directory $HASURA_GRAPHQL_MIGRATIONS_DIR does not exist, skipping migrations"
log "migrations-apply" "directory $HASURA_GRAPHQL_MIGRATIONS_DIR does not exist, skipping migrations"
fi
# kill graphql engine that we started earlier
log "killing temporary server"
log "migrations-shutdown" "killing temporary server"
kill $PID
# pass control to CMD
log "graphql-engine will now start in normal mode"
log "migrations-shutdown" "graphql-engine will now start in normal mode"
exec "$@"

View File

@ -4,8 +4,9 @@ set -e
log() {
TIMESTAMP=$(date -u "+%Y-%m-%dT%H:%M:%S.000+0000")
MESSAGE=$1
echo "{\"timestamp\":\"$TIMESTAMP\",\"level\":\"info\",\"type\":\"startup\",\"detail\":{\"kind\":\"migration-apply\",\"info\":\"$MESSAGE\"}}"
LOGKIND=$1
MESSAGE=$2
echo "{\"timestamp\":\"$TIMESTAMP\",\"level\":\"info\",\"type\":\"startup\",\"detail\":{\"kind\":\"$LOGKIND\",\"info\":\"$MESSAGE\"}}"
}
DEFAULT_MIGRATIONS_DIR="/hasura-migrations"
@ -14,40 +15,40 @@ TEMP_PROJECT_DIR="/tmp/hasura-project"
# configure the target database for migrations
if [ ${HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR} ]; then
log "database url for migrations is set by $HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR"
log "migrations-startup" "database url for migrations is set by $HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR"
HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL=$(printenv $HASURA_GRAPHQL_MIGRATIONS_DATABASE_ENV_VAR)
elif [ -z ${HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL+x} ]; then
HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL=$HASURA_GRAPHQL_DATABASE_URL
fi
log "database url for migrations is set by HASURA_GRAPHQL_DATABASE_URL"
log "migrations-startup" "database url for migrations is set by HASURA_GRAPHQL_DATABASE_URL"
# Use 9691 port for running temporary instance.
# In case 9691 is occupied (according to docker networking), then this will fail.
# override with another port in that case
# TODO: Find a proper random port
if [ -z ${HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT+x} ]; then
log "migrations server port env var is not set, defaulting to 9691"
log "migrations-startup" "migrations server port env var is not set, defaulting to 9691"
HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT=9691
fi
if [ -z ${HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT+x} ]; then
log "server timeout is not set, defaulting to 30 seconds"
log "migrations-startup" "server timeout is not set, defaulting to 30 seconds"
HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT=30
fi
# wait for a port to be ready
wait_for_port() {
local PORT=$1
log "waiting $HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT for $PORT to be ready"
log "migrations-startup" "waiting $HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT for $PORT to be ready"
for i in `seq 1 $HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT`;
do
nc -z localhost $PORT > /dev/null 2>&1 && log "port $PORT is ready" && return
nc -z localhost $PORT > /dev/null 2>&1 && log "migrations-startup" "port $PORT is ready" && return
sleep 1
done
log "failed waiting for $PORT" && exit 1
log "migrations-startup" "failed waiting for $PORT, try increasing HASURA_GRAPHQL_MIGRATIONS_SERVER_TIMEOUT (default: 30)" && exit 1
}
log "starting graphql engine temporarily on port $HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT"
log "migrations-startup" "starting graphql engine temporarily on port $HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT"
# start graphql engine with metadata api enabled
graphql-engine --database-url "$HASURA_GRAPHQL_MIGRATIONS_DATABASE_URL" \
@ -60,22 +61,20 @@ PID=$!
wait_for_port $HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT
# check if migration directory is set, default otherwise
log "checking for migrations directory"
if [ -z ${HASURA_GRAPHQL_MIGRATIONS_DIR+x} ]; then
log "env var HASURA_GRAPHQL_MIGRATIONS_DIR is not set, defaulting to $DEFAULT_MIGRATIONS_DIR"
log "migrations-startup" "env var HASURA_GRAPHQL_MIGRATIONS_DIR is not set, defaulting to $DEFAULT_MIGRATIONS_DIR"
HASURA_GRAPHQL_MIGRATIONS_DIR="$DEFAULT_MIGRATIONS_DIR"
fi
# check if metadata directory is set, default otherwise
log "checking for metadata directory"
if [ -z ${HASURA_GRAPHQL_METADATA_DIR+x} ]; then
log "env var HASURA_GRAPHQL_METADATA_DIR is not set, defaulting to $DEFAULT_METADATA_DIR"
log "migrations-startup" "env var HASURA_GRAPHQL_METADATA_DIR is not set, defaulting to $DEFAULT_METADATA_DIR"
HASURA_GRAPHQL_METADATA_DIR="$DEFAULT_METADATA_DIR"
fi
# apply migrations if the directory exist
if [ -d "$HASURA_GRAPHQL_MIGRATIONS_DIR" ]; then
log "applying migrations from $HASURA_GRAPHQL_MIGRATIONS_DIR"
log "migrations-apply" "applying migrations from $HASURA_GRAPHQL_MIGRATIONS_DIR"
mkdir -p "$TEMP_PROJECT_DIR"
cp -a "$HASURA_GRAPHQL_MIGRATIONS_DIR/." "$TEMP_PROJECT_DIR/migrations/"
cd "$TEMP_PROJECT_DIR"
@ -83,13 +82,13 @@ if [ -d "$HASURA_GRAPHQL_MIGRATIONS_DIR" ]; then
echo "endpoint: http://localhost:$HASURA_GRAPHQL_MIGRATIONS_SERVER_PORT" >> config.yaml
hasura-cli migrate apply
else
log "directory $HASURA_GRAPHQL_MIGRATIONS_DIR does not exist, skipping migrations"
log "migrations-apply" "directory $HASURA_GRAPHQL_MIGRATIONS_DIR does not exist, skipping migrations"
fi
# apply metadata if the directory exist
if [ -d "$HASURA_GRAPHQL_METADATA_DIR" ]; then
rm -rf "TEMP_PROJECT_DIR"
log "applying metadata from $HASURA_GRAPHQL_METADATA_DIR"
log "migrations-apply" "applying metadata from $HASURA_GRAPHQL_METADATA_DIR"
mkdir -p "$TEMP_PROJECT_DIR"
cp -a "$HASURA_GRAPHQL_METADATA_DIR/." "$TEMP_PROJECT_DIR/metadata/"
cd "$TEMP_PROJECT_DIR"
@ -98,13 +97,13 @@ if [ -d "$HASURA_GRAPHQL_METADATA_DIR" ]; then
echo "metadata_directory: metadata" >> config.yaml
hasura-cli metadata apply
else
log "directory $HASURA_GRAPHQL_METADATA_DIR does not exist, skipping metadata"
log "migrations-apply" "directory $HASURA_GRAPHQL_METADATA_DIR does not exist, skipping metadata"
fi
# kill graphql engine that we started earlier
log "killing temporary server"
log "migrations-shutdown" "killing temporary server"
kill $PID
# pass control to CMD
log "graphql-engine will now start in normal mode"
log "migrations-shutdown" "graphql-engine will now start in normal mode"
exec "$@"

View File

@ -235,7 +235,7 @@ migrateCatalogSchema env logger pool httpManager sqlGenCtx = do
let pgExecCtx = mkPGExecCtx Q.Serializable pool
adminRunCtx = RunCtx adminUserInfo httpManager sqlGenCtx
currentTime <- liftIO Clock.getCurrentTime
initialiseResult <- runExceptT $ peelRun adminRunCtx pgExecCtx Q.ReadWrite $
initialiseResult <- runExceptT $ peelRun adminRunCtx pgExecCtx Q.ReadWrite Nothing $
(,) <$> migrateCatalog env currentTime <*> liftTx fetchLastUpdate
((migrationResult, schemaCache), lastUpdateEvent) <-
@ -586,7 +586,7 @@ runAsAdmin
runAsAdmin pool sqlGenCtx httpManager m = do
let runCtx = RunCtx adminUserInfo httpManager sqlGenCtx
pgCtx = mkPGExecCtx Q.Serializable pool
runExceptT $ peelRun runCtx pgCtx Q.ReadWrite m
runExceptT $ peelRun runCtx pgCtx Q.ReadWrite Nothing m
execQuery
:: ( HasVersion

View File

@ -352,7 +352,7 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
E.GExPRemote rsi opDef ->
runRemoteGQ timerTot telemCacheHit execCtx requestId userInfo reqHdrs opDef rsi
where
telemTransport = Telem.HTTP
telemTransport = Telem.WebSocket
runHasuraGQ
:: ExceptT () m DiffTime
-> Telem.CacheHit
@ -368,8 +368,9 @@ onStart env serverEnv wsConn (StartMsg opId q) = catchAndIgnore $ do
Tracing.interpTraceT id $ executeQuery queryParsed asts genSql pgExecCtx Q.ReadOnly opTx
-- Response headers discarded over websockets
E.ExOpMutation _ opTx -> Tracing.trace "pg" do
ctx <- Tracing.currentContext
execQueryOrMut Telem.Mutation Nothing $
Tracing.interpTraceT (runLazyTx pgExecCtx Q.ReadWrite . withUserInfo userInfo) opTx
Tracing.interpTraceT (runLazyTx pgExecCtx Q.ReadWrite . withTraceContext ctx . withUserInfo userInfo) opTx
E.ExOpSubs lqOp -> do
-- log the graphql query
logQueryLog logger query Nothing reqId

View File

@ -16,6 +16,7 @@ import Control.Monad.Trans.Control (MonadBaseControl)
import Control.Monad.Unique
import Hasura.RQL.Types
import qualified Hasura.Tracing as Tracing
data RunCtx
= RunCtx
@ -50,7 +51,8 @@ peelRun
=> RunCtx
-> PGExecCtx
-> Q.TxAccess
-> Maybe Tracing.TraceContext
-> Run a
-> ExceptT QErr m a
peelRun runCtx@(RunCtx userInfo _ _) pgExecCtx txAccess (Run m) =
runLazyTx pgExecCtx txAccess $ withUserInfo userInfo $ runReaderT m runCtx
peelRun runCtx@(RunCtx userInfo _ _) pgExecCtx txAccess ctx (Run m) =
runLazyTx pgExecCtx txAccess $ maybe id withTraceContext ctx $ withUserInfo userInfo $ runReaderT m runCtx

View File

@ -198,10 +198,11 @@ runQuery
-> SQLGenCtx -> SystemDefined -> RQLQuery -> m (EncJSON, RebuildableSchemaCache Run)
runQuery env pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = do
accessMode <- getQueryAccessMode query
traceCtx <- Tracing.currentContext
resE <- runQueryM env query & Tracing.interpTraceT \x -> do
a <- x & runHasSystemDefinedT systemDefined
& runCacheRWT sc
& peelRun runCtx pgExecCtx accessMode
& peelRun runCtx pgExecCtx accessMode (Just traceCtx)
& runExceptT
& liftIO
pure (either

View File

@ -221,7 +221,7 @@ refreshSchemaCache sqlGenCtx pool logger httpManager cacheRef invalidations thre
rebuildableCache <- fst <$> liftIO (readIORef $ _scrCache cacheRef)
((), cache, _) <- buildSchemaCacheWithOptions CatalogSync invalidations
& runCacheRWT rebuildableCache
& peelRun runCtx pgCtx PG.ReadWrite
& peelRun runCtx pgCtx PG.ReadWrite Nothing
pure ((), cache)
case resE of
Left e -> logError logger threadType $ TEQueryError e

View File

@ -83,7 +83,7 @@ buildPostgresSpecs pgConnOptions = do
runAsAdmin :: Run a -> IO a
runAsAdmin =
peelRun runContext pgContext Q.ReadWrite
peelRun runContext pgContext Q.ReadWrite Nothing
>>> runExceptT
>=> flip onLeft printErrJExit