cli: fix applying migrations in a different environment after config v3 update

GitOrigin-RevId: 2f5d554dba50da1d45ea9689afca07f4d0f2c1e5
This commit is contained in:
Aravind K P 2021-05-17 20:49:15 +05:30 committed by hasura-bot
parent 14f80c55ff
commit 9dfed5da57
40 changed files with 511 additions and 40 deletions

View File

@ -27,6 +27,7 @@
$ hasura metadata export -o json
```
- cli: add support for `graphql_schema_introspection` metadata object
- cli: fix applying migrations in a different environment after config v3 update (#6861)
## v2.0.0-alpha.10

View File

@ -433,6 +433,12 @@ type ExecutionContext struct {
// current database on which operation is being done
Source Source
HasMetadataV3 bool
// after a `scripts update-config-v3` all migrate commands will try to automatically
// move cli state from hdb_catalog.* tables to catalog state if that hasn't happened
// already this configuration option will disable this step
// more details in: https://github.com/hasura/graphql-engine/issues/6861
DisableAutoStateMigration bool
}
type Source struct {
@ -923,16 +929,11 @@ func GetCommonMetadataOps(ec *ExecutionContext) hasura.CommonMetadataOperations
}
func GetMigrationsStateStore(ec *ExecutionContext) statestore.MigrationsStateStore {
const (
defaultMigrationsTable = "schema_migrations"
defaultSchema = "hdb_catalog"
)
if ec.Config.Version <= V2 {
if !ec.HasMetadataV3 {
return migrations.NewMigrationStateStoreHdbTable(ec.APIClient.V1Query, defaultSchema, defaultMigrationsTable)
return migrations.NewMigrationStateStoreHdbTable(ec.APIClient.V1Query, migrations.DefaultSchema, migrations.DefaultMigrationsTable)
}
return migrations.NewMigrationStateStoreHdbTable(ec.APIClient.V2Query, defaultSchema, defaultMigrationsTable)
return migrations.NewMigrationStateStoreHdbTable(ec.APIClient.V2Query, migrations.DefaultSchema, migrations.DefaultMigrationsTable)
}
return migrations.NewCatalogStateStore(statestore.NewCLICatalogState(ec.APIClient.V1Metadata))
}

View File

@ -57,6 +57,9 @@ func NewMigrateCmd(ec *cli.ExecutionContext) *cobra.Command {
util.BindPFlag(v, "insecure_skip_tls_verify", f.Lookup("insecure-skip-tls-verify"))
util.BindPFlag(v, "certificate_authority", f.Lookup("certificate-authority"))
f.BoolVar(&ec.DisableAutoStateMigration, "disable-auto-state-migration", false, "after a config v3 update, disable automatically moving state from hdb_catalog.schema_migrations to catalog state")
f.MarkHidden("disable-auto-state-migration")
migrateCmd.AddCommand(
newMigrateApplyCmd(ec),
newMigrateStatusCmd(ec),

View File

@ -59,4 +59,66 @@ var _ = Describe("migrate_apply", func() {
Eventually(session, 60*40).Should(Exit(0))
})
})
})
var _ = Describe("automatic state migration should not affect new config v3 projects", func() {
var dirName string
var session *Session
var teardown func()
BeforeEach(func() {
dirName = testutil.RandDirName()
hgeEndPort, teardownHGE := testutil.StartHasuraWithMetadataDatabase(GinkgoT(), testutil.HasuraVersion)
hgeEndpoint := fmt.Sprintf("http://0.0.0.0:%s", hgeEndPort)
port, teardownPG := testutil.StartPGContainer(GinkgoT(), "test", "test", "test")
// add a pg source named default
testutil.AddPGSourceToHasura(GinkgoT(), hgeEndpoint, fmt.Sprintf("postgres://test:test@%v:%v/test", testutil.DockerSwitchIP, port), "default")
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"init", dirName},
})
editEndpointInConfig(filepath.Join(dirName, defaultConfigFilename), hgeEndpoint)
teardown = func() {
session.Kill()
os.RemoveAll(dirName)
teardownHGE()
teardownPG()
}
})
AfterEach(func() {
teardown()
})
It("should apply the migrations on server ", func() {
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"migrate",
"create",
"schema_creation",
"--up-sql",
"create schema \"testing\";",
"--down-sql",
"drop schema \"testing\" cascade;",
"--database-name",
"default",
"--log-level",
"debug",
},
WorkingDirectory: dirName,
})
session = testutil.Hasura(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--database-name", "default", "--log-level", "debug"},
WorkingDirectory: dirName,
})
wantKeywordList := []string{
".*Applying migrations...*.",
".*migrations*.",
".*applied*.",
}
for _, keyword := range wantKeywordList {
Eventually(session.Err, 60*40).Should(Say(keyword))
}
Eventually(session, 60*40).Should(Exit(0))
})
})

View File

@ -1,6 +1,7 @@
package commands
import (
"fmt"
"github.com/hasura/graphql-engine/cli/internal/scripts"
"github.com/hasura/graphql-engine/cli/util"
"github.com/spf13/afero"
@ -12,6 +13,7 @@ import (
func newUpdateMultipleSources(ec *cli.ExecutionContext) *cobra.Command {
v := viper.New()
var opts scripts.UpdateProjectV3Opts
cmd := &cobra.Command{
Use: "update-project-v3",
Short: "Update the Hasura project from config v2 to v3",
@ -28,19 +30,23 @@ Note that this process is completely independent from your Hasura Graphql Engine
return ec.Validate()
},
RunE: func(cmd *cobra.Command, args []string) error {
opts := scripts.UpgradeToMuUpgradeProjectToMultipleSourcesOpts{
Fs: afero.NewOsFs(),
ProjectDirectory: ec.ExecutionDirectory,
MigrationsAbsDirectoryPath: ec.MigrationDir,
SeedsAbsDirectoryPath: ec.SeedsDirectory,
Logger: ec.Logger,
EC: ec,
if opts.Force && len(opts.TargetDatabase) == 0 {
return fmt.Errorf("--database-name is required when --force is set")
}
opts.Fs = afero.NewOsFs()
opts.ProjectDirectory = ec.ExecutionDirectory
opts.MigrationsAbsDirectoryPath = ec.MigrationDir
opts.SeedsAbsDirectoryPath = ec.SeedsDirectory
opts.Logger = ec.Logger
opts.EC = ec
return scripts.UpdateProjectV3(opts)
},
}
f := cmd.Flags()
f.StringVar(&opts.TargetDatabase, "database-name", "", "database name for which the current migrations / seeds belong to")
f.BoolVar(&opts.Force, "force", false, "do not ask for confirmation")
f.BoolVar(&opts.MoveStateOnly, "move-state-only", false, "do only a state migration from old hdb_catalog.* table to catalog state and skip others")
f.String("endpoint", "", "http(s) endpoint for Hasura GraphQL engine")
f.String("admin-secret", "", "admin secret for Hasura GraphQL engine")

View File

@ -0,0 +1,140 @@
package commands
import (
"fmt"
"github.com/hasura/graphql-engine/cli/internal/testutil"
"github.com/hasura/graphql-engine/cli/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
"os"
"path/filepath"
)
var _ = Describe("scripts_update_project_v3", func() {
var projectDirectory string
var devEndpoint, stagingEndpoint, prodEndpoint string
var teardown func()
BeforeEach(func() {
projectDirectory = testutil.RandDirName()
// create three hasura instances to mimic a environment promotion scenario
devHasuraPort, teardownDev := testutil.StartHasura(GinkgoT(), testutil.HasuraVersion)
devEndpoint = fmt.Sprintf("http://0.0.0.0:%s", devHasuraPort)
stagingHasuraPort, teardownStaging := testutil.StartHasura(GinkgoT(), testutil.HasuraVersion)
stagingEndpoint = fmt.Sprintf("http://0.0.0.0:%s", stagingHasuraPort)
prodHasuraPort, teardownProd := testutil.StartHasura(GinkgoT(), testutil.HasuraVersion)
prodEndpoint = fmt.Sprintf("http://0.0.0.0:%s", prodHasuraPort)
teardown = func() {
os.RemoveAll(projectDirectory)
teardownProd()
teardownStaging()
teardownDev()
}
})
AfterEach(func() {
teardown()
})
It("update a config v2 project to config v3", func() {
Context("sets up dev project", func() {
// copy template project directory migrations to test project directory
Expect(util.CopyDir("testdata/config-v2-test-project", projectDirectory)).Should(BeNil())
editEndpointInConfig(filepath.Join(projectDirectory, defaultConfigFilename), devEndpoint)
// apply migrations and metadata
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"migrate", "apply"},
WorkingDirectory: projectDirectory,
})
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"metadata", "apply"},
WorkingDirectory: projectDirectory,
})
})
Context("applies migrations to staging and production", func() {
// apply migrations and metadata
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--endpoint", stagingEndpoint},
WorkingDirectory: projectDirectory,
})
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"metadata", "apply", "--endpoint", stagingEndpoint},
WorkingDirectory: projectDirectory,
})
// apply migrations and metadata
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--endpoint", prodEndpoint},
WorkingDirectory: projectDirectory,
})
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"metadata", "apply", "--endpoint", prodEndpoint},
WorkingDirectory: projectDirectory,
})
})
Context("updates dev project to config v3", func() {
// apply migrations and metadata
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"scripts", "update-project-v3", "--database-name", "default", "--force"},
WorkingDirectory: projectDirectory,
})
})
Context("applies metadata and migrations on staging hasura instance with auto state migration disabled", func() {
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"metadata", "apply", "--endpoint", stagingEndpoint},
WorkingDirectory: projectDirectory,
})
session := testutil.Hasura(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--database-name", "default", "--endpoint", stagingEndpoint, "--disable-auto-state-migration"},
WorkingDirectory: projectDirectory,
})
Eventually(session.Err, 60).Should(Say(`.*error.*`))
Eventually(session, 60*4).Should(Exit())
})
Context("applies metadata and migrations on staging hasura instance", func() {
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"metadata", "apply", "--endpoint", stagingEndpoint},
WorkingDirectory: projectDirectory,
})
session := testutil.Hasura(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--database-name", "default", "--endpoint", stagingEndpoint},
WorkingDirectory: projectDirectory,
})
Eventually(session, 60*4).Should(Exit(0))
Eventually(session.Wait().Err.Contents()).Should(ContainSubstring("nothing to apply"))
// This now should not trigger a state migration
session = testutil.Hasura(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--database-name", "default", "--endpoint", stagingEndpoint, "--log-level", "debug"},
WorkingDirectory: projectDirectory,
})
Eventually(session, 60*4).Should(Exit(0))
Eventually(session.Wait().Err.Contents()).Should(ContainSubstring(`{"level":"debug","msg":"skipping state migration, found IsStateCopyCompleted: true Migrations: map[default:map[1620138136207:false 1620138146208:false 1620138161039:false 1620138169404:false 1620138179776:false 1620138189381:false 1620138199344:false]]"`))
})
Context("applies metadata and migrations on production hasura instance", func() {
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"metadata", "apply", "--endpoint", prodEndpoint},
WorkingDirectory: projectDirectory,
})
session := testutil.Hasura(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--database-name", "default", "--endpoint", prodEndpoint, "--disable-auto-state-migration"},
WorkingDirectory: projectDirectory,
})
Eventually(session.Err, 60).Should(Say(`.*error.*`))
testutil.RunCommandAndSucceed(testutil.CmdOpts{
Args: []string{"scripts", "update-project-v3", "--database-name", "default", "--force", "--move-state-only"},
WorkingDirectory: projectDirectory,
})
session = testutil.Hasura(testutil.CmdOpts{
Args: []string{"migrate", "apply", "--database-name", "default", "--endpoint", prodEndpoint},
WorkingDirectory: projectDirectory,
})
Eventually(session, 60*4).Should(Exit(0))
Eventually(session.Wait().Err.Contents()).Should(ContainSubstring("nothing to apply"))
})
})
})

View File

@ -0,0 +1,6 @@
version: 2
endpoint: http://localhost:8080
metadata_directory: metadata
actions:
kind: synchronous
handler_webhook_baseurl: http://localhost:3000

View File

@ -0,0 +1,2 @@

View File

@ -0,0 +1,6 @@
actions: []
custom_types:
enums: []
input_objects: []
objects: []
scalars: []

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1 @@
[]

View File

@ -0,0 +1,21 @@
- table:
schema: public
name: t1
- table:
schema: public
name: t2
- table:
schema: public
name: t3
- table:
schema: public
name: t4
- table:
schema: public
name: t5
- table:
schema: public
name: t6
- table:
schema: public
name: t7

View File

@ -0,0 +1 @@
version: 2

View File

@ -0,0 +1 @@
DROP TABLE "public"."t1";

View File

@ -0,0 +1 @@
CREATE TABLE "public"."t1" ("id" serial NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );

View File

@ -0,0 +1 @@
DROP TABLE "public"."t2";

View File

@ -0,0 +1 @@
CREATE TABLE "public"."t2" ("created_at" timestamptz NOT NULL DEFAULT now(), "id" serial NOT NULL, PRIMARY KEY ("id") );

View File

@ -0,0 +1 @@
DROP TABLE "public"."t3";

View File

@ -0,0 +1 @@
CREATE TABLE "public"."t3" ("id" serial NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );

View File

@ -0,0 +1 @@
DROP TABLE "public"."t4";

View File

@ -0,0 +1 @@
CREATE TABLE "public"."t4" ("id" serial NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );

View File

@ -0,0 +1 @@
DROP TABLE "public"."t5";

View File

@ -0,0 +1 @@
CREATE TABLE "public"."t5" ("id" serial NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );

View File

@ -0,0 +1 @@
DROP TABLE "public"."t6";

View File

@ -0,0 +1 @@
CREATE TABLE "public"."t6" ("id" serial NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );

View File

@ -0,0 +1 @@
DROP TABLE "public"."t7";

View File

@ -0,0 +1 @@
CREATE TABLE "public"."t7" ("id" serial NOT NULL, "created_at" timestamptz NOT NULL DEFAULT now(), PRIMARY KEY ("id") );

View File

@ -55,7 +55,7 @@ func TestMetadataObject_Build(t *testing.T) {
err := m.Build(tt.args.metadata)
if tt.wantErr {
require.Error(t, err)
}else {
} else {
b, err := yaml.Marshal(tt.args.metadata)
assert.NoError(t, err)
assert.Equal(t, tt.want, string(b))
@ -115,7 +115,7 @@ rate_limit:
got, err := obj.Export(tt.args.metadata)
if tt.wantErr {
require.Error(t, err)
}else {
} else {
require.NoError(t, err)
var wantContent = map[string]string{}
var gotContent = map[string]string{}

View File

@ -11,7 +11,7 @@ import (
/*
V3MetadataTableConfig is responsible for exporting and applying "tables" metadata objects
in config v2 format on a server with v3 metadata
*/
*/
type V3MetadataTableConfig struct {
*TableConfig
}

View File

@ -24,7 +24,7 @@ import (
"github.com/spf13/afero"
)
type UpgradeToMuUpgradeProjectToMultipleSourcesOpts struct {
type UpdateProjectV3Opts struct {
EC *cli.ExecutionContext
Fs afero.Fs
// Path to project directory
@ -32,12 +32,15 @@ type UpgradeToMuUpgradeProjectToMultipleSourcesOpts struct {
// Directory in which migrations are stored
MigrationsAbsDirectoryPath string
SeedsAbsDirectoryPath string
TargetDatabase string
Force bool
MoveStateOnly bool
Logger *logrus.Logger
}
// UpdateProjectV3 will help a project directory move from a single
// The project is expected to be in Config V2
func UpdateProjectV3(opts UpgradeToMuUpgradeProjectToMultipleSourcesOpts) error {
func UpdateProjectV3(opts UpdateProjectV3Opts) error {
/* New flow
Config V2 -> Config V3
- Warn user about creating a backup
@ -49,7 +52,7 @@ func UpdateProjectV3(opts UpgradeToMuUpgradeProjectToMultipleSourcesOpts) error
*/
// pre checks
if opts.EC.Config.Version != cli.V2 {
if opts.EC.Config.Version != cli.V2 && !opts.MoveStateOnly {
return fmt.Errorf("project should be using config V2 to be able to update to V3")
}
if !opts.EC.HasMetadataV3 {
@ -68,19 +71,26 @@ func UpdateProjectV3(opts UpgradeToMuUpgradeProjectToMultipleSourcesOpts) error
opts.Logger.Warn(`During the update process CLI uses the server as the source of truth, so make sure your server is upto date`)
opts.Logger.Warn(`The update process replaces project metadata with metadata on the server`)
response, err := util.GetYesNoPrompt("continue?")
if err != nil {
return err
}
if response == "n" {
return nil
if !opts.Force {
response, err := util.GetYesNoPrompt("continue?")
if err != nil {
return err
}
if response == "n" {
return nil
}
}
// move migration child directories
// get directory names to move
targetDatabase, err := util.GetInputPrompt("what database does the current migrations / seeds belong to?")
if err != nil {
return err
targetDatabase := opts.TargetDatabase
if len(targetDatabase) == 0 {
var err error
targetDatabase, err = util.GetInputPrompt("what database does the current migrations / seeds belong to?")
if err != nil {
return err
}
}
opts.EC.Spinner.Start()
opts.EC.Spin("updating project... ")
// copy state
@ -90,9 +100,13 @@ func UpdateProjectV3(opts UpgradeToMuUpgradeProjectToMultipleSourcesOpts) error
return err
}
if len(sources) >= 1 {
if err := copyState(opts.EC, targetDatabase); err != nil {
if err := CopyState(opts.EC, targetDatabase); err != nil {
return err
}
if opts.MoveStateOnly {
opts.EC.Spinner.Stop()
return nil
}
}
// move migration child directories
@ -248,9 +262,9 @@ func isHasuraCLIGeneratedMigration(dirPath string) (bool, error) {
return regexp.MatchString(regex, filepath.Base(dirPath))
}
func copyState(ec *cli.ExecutionContext, destdatabase string) error {
func CopyState(ec *cli.ExecutionContext, destdatabase string) error {
// copy migrations state
src := cli.GetMigrationsStateStore(ec)
src := migrations.NewMigrationStateStoreHdbTable(ec.APIClient.V2Query, migrations.DefaultSchema, migrations.DefaultMigrationsTable)
if err := src.PrepareMigrationsStateStore(); err != nil {
return err
}
@ -275,6 +289,14 @@ func copyState(ec *cli.ExecutionContext, destdatabase string) error {
if err != nil {
return err
}
cliState, err := statestore.NewCLICatalogState(ec.APIClient.V1Metadata).Get()
if err != nil {
return fmt.Errorf("error while fetching catalog state: %v", err)
}
cliState.IsStateCopyCompleted = true
if _, err := statestore.NewCLICatalogState(ec.APIClient.V1Metadata).Set(*cliState); err != nil {
return fmt.Errorf("cannot set catalog state: %v", err)
}
return nil
}

View File

@ -263,8 +263,8 @@ func Test_copyState(t *testing.T) {
dstMigrations := migrations.NewCatalogStateStore(statestore.NewCLICatalogState(tt.args.ec.APIClient.V1Metadata))
assert.NoError(t, srcSettings.UpdateSetting("test", "test"))
assert.NoError(t, srcMigrations.SetVersion("", 123, false))
if err := copyState(tt.args.ec, tt.args.destdatabase); (err != nil) != tt.wantErr {
t.Fatalf("copyState() error = %v, wantErr %v", err, tt.wantErr)
if err := CopyState(tt.args.ec, tt.args.destdatabase); (err != nil) != tt.wantErr {
t.Fatalf("CopyState() error = %v, wantErr %v", err, tt.wantErr)
}
v, err := dstSettings.GetSetting("test")
assert.NoError(t, err)

View File

@ -9,6 +9,11 @@ import (
"github.com/hasura/graphql-engine/cli/migrate/database"
)
const (
DefaultMigrationsTable = "schema_migrations"
DefaultSchema = "hdb_catalog"
)
// until version 1.4 migration state was stored a special table
// this struct will implement the methods required
type MigrationStateStoreHdbTable struct {

View File

@ -59,8 +59,14 @@ func (c *CLICatalogState) Set(state CLIState) (io.Reader, error) {
type MigrationsState map[string]map[string]bool
type CLIState struct {
Migrations MigrationsState `json:"migrations" mapstructure:"migrations"`
Settings map[string]string `json:"settings" mapstructure:"settings"`
Migrations MigrationsState `json:"migrations,omitempty" mapstructure:"migrations,omitempty"`
Settings map[string]string `json:"settings" mapstructure:"settings"`
// IsStateCopyCompleted is a utility variable
// pre config v3 state was stored in users database connected to hasura in `hdb_catalog.*` tables
// this variable is set to true when state copy happens from hdb_catalog.* tables
// this process is carried out during a scripts update-project-v3 command or an implicit state copy
// introduced in https://github.com/hasura/graphql-engine-mono/pull/1298
IsStateCopyCompleted bool `json:"isStateCopyCompleted" mapstructure:"isStateCopyCompleted"`
}
func (c *CLIState) Init() {

View File

@ -115,7 +115,7 @@ func StartHasura(t TestingT, version string) (port string, teardown func()) {
return hasura.GetPort("8080/tcp"), teardown
}
func StartHasuraWithMetadataDatabase(t *testing.T, version string) (port string, teardown func()) {
func StartHasuraWithMetadataDatabase(t TestingT, version string) (port string, teardown func()) {
if len(version) == 0 {
t.Fatal("no hasura version provided, probably use testutil.HasuraVersion")
}
@ -208,7 +208,7 @@ func StartHasuraWithMSSQLSource(t *testing.T, version string) (string, string, f
mssqlTeardown()
}
connectionString := fmt.Sprintf("DRIVER={ODBC Driver 17 for SQL Server};SERVER=%s,%s;DATABASE=master;Uid=SA;Pwd=%s;Encrypt=no", DockerSwitchIP, mssqlPort, MSSQLPassword)
addSourceToHasura(t, fmt.Sprintf("%s:%s", BaseURL, hasuraPort), connectionString, sourcename)
addMSSQLSourceToHasura(t, fmt.Sprintf("%s:%s", BaseURL, hasuraPort), connectionString, sourcename)
return hasuraPort, sourcename, teardown
}
@ -257,7 +257,49 @@ func startMSSQLContainer(t *testing.T) (string, func()) {
return mssql.GetPort("1433/tcp"), teardown
}
func addSourceToHasura(t *testing.T, hasuraEndpoint, connectionString, sourceName string) {
// startsMSSQLContainer and creates a database and returns the port number
func StartPGContainer(t TestingT, user, password, database string) (string, func()) {
var err error
pool, err := dockertest.NewPool("")
if err != nil {
t.Fatalf("Could not connect to docker: %s", err)
}
uniqueName := getUniqueName(t)
pgopts := &dockertest.RunOptions{
Name: fmt.Sprintf("%s-%s", uniqueName, "pg"),
Repository: "postgres",
Tag: "11",
Env: []string{
fmt.Sprintf("POSTGRES_USER=%s", user),
fmt.Sprintf("POSTGRES_PASSWORD=%s", password),
fmt.Sprintf("POSTGRES_DB=%s", database),
},
ExposedPorts: []string{"5432"},
}
pg, err := pool.RunWithOptions(pgopts)
if err != nil {
t.Fatalf("Could not start resource: %s", err)
}
var db *sql.DB
if err = pool.Retry(func() error {
var err error
db, err = sql.Open("postgres", fmt.Sprintf("postgres://test:test@%s:%s/%s?sslmode=disable", "0.0.0.0", pg.GetPort("5432/tcp"), "test"))
if err != nil {
return err
}
return db.Ping()
}); err != nil {
t.Fatal(err)
}
teardown := func() {
if err = pool.Purge(pg); err != nil {
t.Fatalf("Could not purge resource: %s", err)
}
}
return pg.GetPort("5432/tcp"), teardown
}
func addMSSQLSourceToHasura(t *testing.T, hasuraEndpoint, connectionString, sourceName string) {
url := fmt.Sprintf("%s/v1/metadata", hasuraEndpoint)
body := fmt.Sprintf(`
{
@ -294,6 +336,49 @@ func addSourceToHasura(t *testing.T, hasuraEndpoint, connectionString, sourceNam
t.Fatalf("cannot add mssql source to hasura: %s", string(body))
}
}
func AddPGSourceToHasura(t TestingT, hasuraEndpoint, connectionString, sourceName string) {
url := fmt.Sprintf("%s/v1/metadata", hasuraEndpoint)
body := fmt.Sprintf(`
{
"type": "pg_add_source",
"args": {
"name": "%s",
"configuration": {
"connection_info": {
"database_url": "%s"
}
}
}
}
`, sourceName, connectionString)
fmt.Println(connectionString)
fmt.Println(hasuraEndpoint)
req, err := http.NewRequest("POST", url, strings.NewReader(body))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Content-Type", "application/json")
adminSecret := os.Getenv("HASURA_GRAPHQL_TEST_ADMIN_SECRET")
if adminSecret != "" {
req.Header.Set("x-hasura-admin-secret", adminSecret)
}
r, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatal(err)
}
if r.StatusCode != http.StatusOK {
body, err := ioutil.ReadAll(r.Body)
if err != nil {
t.Fatal(err)
}
defer r.Body.Close()
t.Fatalf("cannot add pg source to hasura: %s", string(body))
}
}
func NewHttpcClient(t *testing.T, port string, headers map[string]string) *httpc.Client {
adminSecret := os.Getenv("HASURA_GRAPHQL_TEST_ADMIN_SECRET")
if headers == nil {
@ -316,4 +401,4 @@ func getUniqueName(t TestingT) string {
t.Fatalf("Could not connect to docker: %s", err)
}
return u.String() + "-" + randomdata.SillyName()
}
}

View File

@ -2,6 +2,9 @@ package migrate
import (
"fmt"
"github.com/hasura/graphql-engine/cli/internal/scripts"
"github.com/hasura/graphql-engine/cli/internal/statestore"
"github.com/hasura/graphql-engine/cli/internal/statestore/migrations"
nurl "net/url"
"os"
"path/filepath"
@ -164,6 +167,13 @@ func NewMigrate(ec *cli.ExecutionContext, isCmd bool, sourceName string, sourceK
if ec.Config.Version >= cli.V2 {
t.databaseDrv.EnableCheckMetadataConsistency(true)
}
if ok, err := copyStateToCatalogStateAPIIfRequired(ec, sourceName); err != nil {
ec.Logger.Warn(err)
} else if ok {
if err := t.ReScan(); err != nil {
return nil, err
}
}
return t, nil
}
@ -210,3 +220,76 @@ func IsMigrationsSupported(kind hasura.SourceKind) bool {
}
return false
}
func copyStateToCatalogStateAPIIfRequired(ec *cli.ExecutionContext, sourceName string) (bool, error) {
// if
// the project is in config v3
// source name is default
// isStateCopyCompleted is false in catalog state
// hdb_catalog.schema_migrations is not empty
if !ec.DisableAutoStateMigration && ec.Config.Version >= cli.V3 && sourceName == "default" {
// get cli catalog and check isStateCopyCompleted is false
cs := statestore.NewCLICatalogState(ec.APIClient.V1Metadata)
state, err := cs.Get()
if err != nil {
return false, err
}
markStateMigrationCompleted := func() error {
state.IsStateCopyCompleted = true
if _, err := cs.Set(*state); err != nil {
return fmt.Errorf("error settting state: %w", err)
}
return nil
}
if !state.IsStateCopyCompleted {
// if control reaches this block we'll set IsStateCopyCompleted to true
// this makes sure we only attempt to automatically do the state migration once
// we'll leave it up to the user to correct the errors and use
// scripts update-project-v3 --move-state-only to move state
//
// this will also make sure new config v3 projects will not repeatedly reach this block
// for a example a user connecting a custom source named default
// with no read permissions to other schemas ie we cannot access `hdb_catalog.schema_migrations`
// in the first run it'll encounter an error but will also mark IsStateCopyCompleted to true
// thereby not running this block again
// check if hdb_catalog.schema_migrations exists
// check if migrations state table exists
query := hasura.PGRunSQLInput{
SQL: `SELECT COUNT(1) FROM information_schema.tables WHERE table_name = '` + migrations.DefaultMigrationsTable + `' AND table_schema = '` + migrations.DefaultSchema + `' LIMIT 1`,
}
runsqlResp, err := ec.APIClient.V2Query.PGRunSQL(query)
if err != nil {
ec.Logger.Warn("encountered error when trying to move migrations from hdb_catalog.schema_migrations to catalog state\n", err,
"\nnote: ignore this if you are not updating your project from config v2 -> config v3")
ec.Logger.Debug("marking IsStateCopyCompleted as true %w", markStateMigrationCompleted())
return false, nil
}
if runsqlResp.ResultType != hasura.TuplesOK {
ec.Logger.Warn("encountered error when trying to move migrations from hdb_catalog.schema_migrations to catalog state", fmt.Errorf("invalid result Type %s", runsqlResp.ResultType),
"\nnote: ignore this if you are not updating your project from config v2 -> config v3")
ec.Logger.Debug("marking IsStateCopyCompleted as true %w", markStateMigrationCompleted())
return false, nil
}
result := runsqlResp.Result
if result[1][0] == "0" {
// hdb_catalog.schema_migrations doesn't exist
ec.Logger.Debug("hdb_catalog.schema_migrations was not found, skipping state migration")
ec.Logger.Debug("marking IsStateCopyCompleted as true %w", markStateMigrationCompleted())
return false, nil
}
ec.Logger.Debug("copying cli state from hdb_catalog.schema_migrations to catalog state")
// COPY STATE
if err := scripts.CopyState(ec, sourceName); err != nil {
return false, err
}
ec.Logger.Debug("copying cli state from hdb_catalog.schema_migrations to catalog state success")
return true, nil
}
ec.Logger.Debugf("skipping state migration, found IsStateCopyCompleted: %v Migrations: %v", state.IsStateCopyCompleted, state.Migrations)
return false, nil
}
return false, nil
}

View File

@ -2,6 +2,7 @@
"settings": {
"migration_mode": "true"
},
"isStateCopyCompleted": true,
"migrations": {
"default": {
"1616826329751": false