From cc3539415bbb283a482a22045ccc2e2c70cb6138 Mon Sep 17 00:00:00 2001 From: Aravind K P Date: Thu, 1 Apr 2021 16:08:55 +0530 Subject: [PATCH] cli: fix seeds incorrectly being applied to databases GitOrigin-RevId: cce2612ddfd90d1fd10a0a3561d24c64569a6935 --- CHANGELOG.md | 1 + cli/cli.go | 9 ++ cli/commands/seed.go | 11 ++ cli/commands/seed_apply.go | 12 +- cli/commands/seed_create.go | 21 ++- cli/integration_test/v2/seeds.go | 5 +- cli/integration_test/v3/seeds.go | 3 + cli/internal/hasura/client.go | 3 + cli/internal/hasura/pgdump.go | 15 ++ cli/internal/hasura/pgdump/pgdump.go | 46 ++++++ cli/internal/hasura/pgdump/pgdump_test.go | 111 ++++++++++++++ cli/internal/hasura/v1query/v1_query.go | 22 +++ cli/internal/hasura/v1query/v1_query_test.go | 135 +++++++++++++++++ cli/internal/hasura/v2query/v2_query_test.go | 111 ++++++++++++++ cli/internal/hasura/v2query/v2query.go | 19 +++ cli/internal/httpc/httpc.go | 34 +++-- cli/migrate/database/driver.go | 2 - cli/migrate/database/hasuradb/seed.go | 55 ------- cli/migrate/database/seed.go | 8 - cli/migrate/migrate.go | 27 ---- cli/seed/apply.go | 98 ++++++++---- cli/seed/apply_test.go | 150 +++++++++++++++++++ cli/seed/create.go | 38 +++++ cli/seed/create_test.go | 110 ++++++++++++++ cli/seed/seed.go | 17 +++ cli/seed/testdata/seeds/articles.sql | 14 ++ cli/seed/testdata/seeds/authors.sql | 10 ++ 27 files changed, 938 insertions(+), 149 deletions(-) create mode 100644 cli/internal/hasura/pgdump.go create mode 100644 cli/internal/hasura/pgdump/pgdump.go create mode 100644 cli/internal/hasura/pgdump/pgdump_test.go delete mode 100644 cli/migrate/database/hasuradb/seed.go delete mode 100644 cli/migrate/database/seed.go create mode 100644 cli/seed/apply_test.go create mode 100644 cli/seed/create_test.go create mode 100644 cli/seed/seed.go create mode 100644 cli/seed/testdata/seeds/articles.sql create mode 100644 cli/seed/testdata/seeds/authors.sql diff --git a/CHANGELOG.md b/CHANGELOG.md index 015718845f2..9eab8af0adc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ - server: update pg_dump clean output to disable function body validation in create function statements to avoid errors due to forward references - server: fix a bug preventing some MSSQL foreign key relationships from being tracked - console: add a comment field for actions (#231) +- cli: fix seeds incorrectly being applied to databases in config v3 (#6683) ## v2.0.0-alpha.6 diff --git a/cli/cli.go b/cli/cli.go index 634e7577283..f8d688275ba 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -23,6 +23,8 @@ import ( "strings" "time" + "github.com/hasura/graphql-engine/cli/internal/hasura/pgdump" + "github.com/hasura/graphql-engine/cli/migrate/database/hasuradb" "github.com/hasura/graphql-engine/cli/internal/hasura/v1metadata" @@ -249,6 +251,12 @@ func (s *ServerConfig) GetV2QueryEndpoint() string { return nurl.String() } +func (s *ServerConfig) GetPGDumpEndpoint() string { + nurl := *s.ParsedEndpoint + nurl.Path = path.Join(nurl.Path, s.APIPaths.PGDump) + return nurl.String() +} + // GetQueryEndpoint provides the url to contact the query API func (s *ServerConfig) GetV1MetadataEndpoint() string { nurl := *s.ParsedEndpoint @@ -681,6 +689,7 @@ func (ec *ExecutionContext) Validate() error { V1Metadata: v1metadata.New(httpClient, ec.Config.GetV1MetadataEndpoint()), V1Query: v1query.New(httpClient, ec.Config.GetV1QueryEndpoint()), V2Query: v2query.New(httpClient, ec.Config.GetV2QueryEndpoint()), + PGDump: pgdump.New(httpClient, ec.Config.GetPGDumpEndpoint()), } var state *util.ServerState if ec.HasMetadataV3 { diff --git a/cli/commands/seed.go b/cli/commands/seed.go index 373269fb402..e6d2674ec9c 100644 --- a/cli/commands/seed.go +++ b/cli/commands/seed.go @@ -3,6 +3,8 @@ package commands import ( "fmt" + "github.com/hasura/graphql-engine/cli/seed" + "github.com/hasura/graphql-engine/cli" "github.com/hasura/graphql-engine/cli/internal/hasura" "github.com/hasura/graphql-engine/cli/internal/metadatautil" @@ -76,3 +78,12 @@ func NewSeedCmd(ec *cli.ExecutionContext) *cobra.Command { return seedCmd } + +func getSeedDriver(configVersion cli.ConfigVersion) (driver *seed.Driver) { + if configVersion >= cli.V3 { + driver = seed.NewDriver(ec.APIClient.V2Query.Bulk, ec.APIClient.PGDump) + } else { + driver = seed.NewDriver(ec.APIClient.V1Query.Bulk, ec.APIClient.PGDump) + } + return driver +} diff --git a/cli/commands/seed_apply.go b/cli/commands/seed_apply.go index 71362c772bf..3920598f55c 100644 --- a/cli/commands/seed_apply.go +++ b/cli/commands/seed_apply.go @@ -1,8 +1,6 @@ package commands import ( - "github.com/hasura/graphql-engine/cli/internal/hasura" - "github.com/hasura/graphql-engine/cli/migrate" "github.com/spf13/afero" "github.com/spf13/cobra" @@ -11,7 +9,8 @@ import ( ) type SeedApplyOptions struct { - EC *cli.ExecutionContext + EC *cli.ExecutionContext + Driver *seed.Driver // seed file to apply FileNames []string @@ -35,6 +34,7 @@ func newSeedApplyCmd(ec *cli.ExecutionContext) *cobra.Command { return ec.Validate() }, RunE: func(cmd *cobra.Command, args []string) error { + opts.Driver = getSeedDriver(ec.Config.Version) opts.EC.Spin("Applying seeds...") opts.Source = ec.Source err := opts.Run() @@ -51,10 +51,6 @@ func newSeedApplyCmd(ec *cli.ExecutionContext) *cobra.Command { } func (o *SeedApplyOptions) Run() error { - migrateDriver, err := migrate.NewMigrate(o.EC, true, "", hasura.SourceKindPG) - if err != nil { - return err - } fs := afero.NewOsFs() - return seed.ApplySeedsToDatabase(o.EC, fs, migrateDriver, o.FileNames, o.Source.Name) + return o.Driver.ApplySeedsToDatabase(fs, o.EC.SeedsDirectory, o.FileNames, o.EC.Source) } diff --git a/cli/commands/seed_create.go b/cli/commands/seed_create.go index 6433d48b339..3ac3735b18e 100644 --- a/cli/commands/seed_create.go +++ b/cli/commands/seed_create.go @@ -2,10 +2,13 @@ package commands import ( "bytes" + "fmt" + "io/ioutil" "os" "path/filepath" - "github.com/hasura/graphql-engine/cli/migrate" + "github.com/hasura/graphql-engine/cli/internal/hasura" + "github.com/pkg/errors" "github.com/spf13/afero" "github.com/spf13/cobra" @@ -16,7 +19,8 @@ import ( ) type SeedNewOptions struct { - EC *cli.ExecutionContext + EC *cli.ExecutionContext + Driver *seed.Driver // filename for the new seed file SeedName string @@ -51,6 +55,7 @@ func newSeedCreateCmd(ec *cli.ExecutionContext) *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { opts.SeedName = args[0] opts.Source = ec.Source + opts.Driver = getSeedDriver(ec.Config.Version) err := opts.Run() if err != nil { return err @@ -76,21 +81,23 @@ func (o *SeedNewOptions) Run() error { UserProvidedSeedName: o.SeedName, DirectoryPath: filepath.Join(o.EC.SeedsDirectory, o.Source.Name), } - // If we are initializing from a database table // create a hasura client and add table name opts if createSeedOpts.Data == nil { var body []byte if len(o.FromTableNames) > 0 { - migrateDriver, err := migrate.NewMigrate(ec, true, o.Source.Name, o.Source.Kind) - if err != nil { - return errors.Wrap(err, "cannot initialize migrate driver") + if o.Source.Kind != hasura.SourceKindPG && o.EC.Config.Version >= cli.V3 { + return fmt.Errorf("--from-table is supported only for postgres sources") } // Send the query - body, err = migrateDriver.ExportDataDump(o.FromTableNames, o.Source.Name, o.Source.Kind) + bodyReader, err := o.Driver.ExportDatadump(o.FromTableNames, o.Source.Name) if err != nil { return errors.Wrap(err, "exporting seed data") } + body, err = ioutil.ReadAll(bodyReader) + if err != nil { + return err + } } else { const defaultText = "" var err error diff --git a/cli/integration_test/v2/seeds.go b/cli/integration_test/v2/seeds.go index 6b329aa1d79..9eb11f788ad 100644 --- a/cli/integration_test/v2/seeds.go +++ b/cli/integration_test/v2/seeds.go @@ -124,7 +124,8 @@ func TestSeedsApplyCmd(t *testing.T, ec *cli.ExecutionContext) { { "can apply all seeds", &commands.SeedApplyOptions{ - EC: ec, + EC: ec, + Driver: seed.NewDriver(ec.APIClient.V1Query.Bulk, ec.APIClient.PGDump), }, false, }, @@ -133,6 +134,7 @@ func TestSeedsApplyCmd(t *testing.T, ec *cli.ExecutionContext) { &commands.SeedApplyOptions{ EC: ec, FileNames: []string{"1591867862409_test.sql"}, + Driver: seed.NewDriver(ec.APIClient.V1Query.Bulk, ec.APIClient.PGDump), }, false, }, @@ -141,6 +143,7 @@ func TestSeedsApplyCmd(t *testing.T, ec *cli.ExecutionContext) { &commands.SeedApplyOptions{ EC: ec, FileNames: []string{"1591867862419_test2.sql"}, + Driver: seed.NewDriver(ec.APIClient.V1Query.Bulk, ec.APIClient.PGDump), }, true, }, diff --git a/cli/integration_test/v3/seeds.go b/cli/integration_test/v3/seeds.go index 47306339253..19239cacaca 100644 --- a/cli/integration_test/v3/seeds.go +++ b/cli/integration_test/v3/seeds.go @@ -130,6 +130,7 @@ func TestSeedsApplyCmd(t *testing.T, ec *cli.ExecutionContext) { &commands.SeedApplyOptions{ EC: ec, Source: cli.Source{Name: "default"}, + Driver: seed.NewDriver(ec.APIClient.V2Query.Bulk, ec.APIClient.PGDump), }, false, }, @@ -138,6 +139,7 @@ func TestSeedsApplyCmd(t *testing.T, ec *cli.ExecutionContext) { &commands.SeedApplyOptions{ EC: ec, FileNames: []string{"1591867862409_test.sql"}, + Driver: seed.NewDriver(ec.APIClient.V2Query.Bulk, ec.APIClient.PGDump), }, false, }, @@ -146,6 +148,7 @@ func TestSeedsApplyCmd(t *testing.T, ec *cli.ExecutionContext) { &commands.SeedApplyOptions{ EC: ec, FileNames: []string{"1591867862419_test2.sql"}, + Driver: seed.NewDriver(ec.APIClient.V2Query.Bulk, ec.APIClient.PGDump), }, true, }, diff --git a/cli/internal/hasura/client.go b/cli/internal/hasura/client.go index e4e3e55f297..bd35d9e57f0 100644 --- a/cli/internal/hasura/client.go +++ b/cli/internal/hasura/client.go @@ -12,12 +12,14 @@ type Client struct { V1Metadata V1Metadata V1Query V1Query V2Query V2Query + PGDump PGDump } type V1Query interface { CommonMetadataOperations PGSourceOps Send(requestBody interface{}) (httpcResponse *httpc.Response, body io.Reader, error error) + Bulk([]RequestBody) (io.Reader, error) } type V1Metadata interface { @@ -43,6 +45,7 @@ type V2Query interface { PGSourceOps MSSQLSourceOps Send(requestBody interface{}) (httpcResponse *httpc.Response, body io.Reader, error error) + Bulk([]RequestBody) (io.Reader, error) } type RequestBody struct { diff --git a/cli/internal/hasura/pgdump.go b/cli/internal/hasura/pgdump.go new file mode 100644 index 00000000000..2bf52bdff04 --- /dev/null +++ b/cli/internal/hasura/pgdump.go @@ -0,0 +1,15 @@ +package hasura + +import ( + "io" +) + +type PGDumpRequest struct { + Opts []string `json:"opts"` + CleanOutput bool `json:"clean_output"` + SourceName string `json:"source,omitempty"` +} + +type PGDump interface { + Send(request PGDumpRequest) (responseBody io.Reader, error error) +} diff --git a/cli/internal/hasura/pgdump/pgdump.go b/cli/internal/hasura/pgdump/pgdump.go new file mode 100644 index 00000000000..658fe204a2c --- /dev/null +++ b/cli/internal/hasura/pgdump/pgdump.go @@ -0,0 +1,46 @@ +package pgdump + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + + "github.com/hasura/graphql-engine/cli/internal/httpc" + + "github.com/hasura/graphql-engine/cli/internal/hasura" +) + +type Client struct { + *httpc.Client + path string +} + +func New(client *httpc.Client, path string) *Client { + return &Client{client, path} +} + +func (c *Client) send(body interface{}, responseBodyWriter io.Writer) (*httpc.Response, error) { + req, err := c.NewRequest(http.MethodPost, c.path, body) + if err != nil { + return nil, err + } + resp, err := c.LockAndDo(context.Background(), req, responseBodyWriter) + if err != nil { + return nil, err + } + return resp, nil +} + +func (c *Client) Send(request hasura.PGDumpRequest) (io.Reader, error) { + responseBody := new(bytes.Buffer) + response, err := c.send(request, responseBody) + if err != nil { + return nil, err + } + if response.StatusCode != http.StatusOK { + return nil, fmt.Errorf("pg_dump request: %d %s", response.StatusCode, responseBody.String()) + } + return responseBody, nil +} diff --git a/cli/internal/hasura/pgdump/pgdump_test.go b/cli/internal/hasura/pgdump/pgdump_test.go new file mode 100644 index 00000000000..854876aedf8 --- /dev/null +++ b/cli/internal/hasura/pgdump/pgdump_test.go @@ -0,0 +1,111 @@ +package pgdump + +import ( + "io/ioutil" + "testing" + + pg "github.com/hasura/graphql-engine/cli/internal/hasura/sourceops/postgres" + + "github.com/hasura/graphql-engine/cli/internal/testutil" + + "github.com/stretchr/testify/require" + + "github.com/hasura/graphql-engine/cli/internal/hasura" + "github.com/hasura/graphql-engine/cli/internal/httpc" +) + +func TestClient_Send(t *testing.T) { + portHasuraV13, teardown13 := testutil.StartHasura(t, "v1.3.3") + defer teardown13() + portHasuraLatest, teardownLatest := testutil.StartHasura(t, testutil.HasuraVersion) + defer teardownLatest() + type fields struct { + Client *httpc.Client + path string + } + type args struct { + request hasura.PGDumpRequest + } + + pgclient := pg.New(testutil.NewHttpcClient(t, portHasuraV13, nil), "v1/query") + sqlInput := hasura.PGRunSQLInput{ + SQL: `CREATE TABLE test ( + section NUMERIC NOT NULL, + id1 NUMERIC NOT NULL, + id2 NUMERIC NOT NULL +);`, + } + _, err := pgclient.PGRunSQL(sqlInput) + require.NoError(t, err) + pgclient = pg.New(testutil.NewHttpcClient(t, portHasuraLatest, nil), "v2/query") + _, err = pgclient.PGRunSQL(sqlInput) + require.NoError(t, err) + + tests := []struct { + name string + fields fields + args args + want string + wantErr bool + }{ + { + "can make a pg_dump v1.3.3", + fields{ + Client: testutil.NewHttpcClient(t, portHasuraV13, nil), + path: "/v1alpha1/pg_dump", + }, + args{ + request: hasura.PGDumpRequest{ + Opts: []string{"--schema-only", "--table", "test"}, + CleanOutput: true, + }, + }, + `CREATE TABLE public.test ( + section numeric NOT NULL, + id1 numeric NOT NULL, + id2 numeric NOT NULL +); +ALTER TABLE public.test OWNER TO postgres; +`, + false, + }, + { + "can make a pg_dump on latest", + fields{ + Client: testutil.NewHttpcClient(t, portHasuraLatest, nil), + path: "/v1alpha1/pg_dump", + }, + args{ + request: hasura.PGDumpRequest{ + Opts: []string{"--schema-only", "--table", "test"}, + CleanOutput: true, + }, + }, + `CREATE TABLE public.test ( + section numeric NOT NULL, + id1 numeric NOT NULL, + id2 numeric NOT NULL +); +ALTER TABLE public.test OWNER TO postgres; +`, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + Client: tt.fields.Client, + path: tt.fields.path, + } + got, err := c.Send(tt.args.request) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + gotb, err := ioutil.ReadAll(got) + require.NoError(t, err) + require.Equal(t, tt.want, string(gotb)) + } + }) + } +} diff --git a/cli/internal/hasura/v1query/v1_query.go b/cli/internal/hasura/v1query/v1_query.go index 7ae4ef2a20c..250991e1051 100644 --- a/cli/internal/hasura/v1query/v1_query.go +++ b/cli/internal/hasura/v1query/v1_query.go @@ -3,9 +3,12 @@ package v1query import ( "bytes" "context" + "fmt" "io" "net/http" + "github.com/hasura/graphql-engine/cli/internal/hasura" + "github.com/hasura/graphql-engine/cli/internal/hasura/commonmetadata" "github.com/hasura/graphql-engine/cli/internal/hasura/sourceops/postgres" "github.com/hasura/graphql-engine/cli/internal/httpc" @@ -42,3 +45,22 @@ func (c *Client) Send(body interface{}) (*httpc.Response, io.Reader, error) { } return resp, responseBody, nil } + +func (c *Client) Bulk(args []hasura.RequestBody) (io.Reader, error) { + body := hasura.RequestBody{ + Type: "bulk", + Args: args, + } + req, err := c.NewRequest(http.MethodPost, c.path, body) + if err != nil { + return nil, err + } + responseBody := new(bytes.Buffer) + resp, err := c.LockAndDo(context.Background(), req, responseBody) + if err != nil { + return nil, err + } else if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("bulk request failed: %v %v", resp.StatusCode, responseBody.String()) + } + return responseBody, nil +} diff --git a/cli/internal/hasura/v1query/v1_query_test.go b/cli/internal/hasura/v1query/v1_query_test.go index 963e150041e..aa5d3472c64 100644 --- a/cli/internal/hasura/v1query/v1_query_test.go +++ b/cli/internal/hasura/v1query/v1_query_test.go @@ -6,6 +6,11 @@ import ( "net/http" "testing" + "github.com/stretchr/testify/require" + + "github.com/hasura/graphql-engine/cli/internal/hasura" + + "github.com/hasura/graphql-engine/cli/internal/hasura/sourceops/postgres" pg "github.com/hasura/graphql-engine/cli/internal/hasura/sourceops/postgres" "github.com/hasura/graphql-engine/cli/internal/hasura/commonmetadata" @@ -83,3 +88,133 @@ func TestClient_Send(t *testing.T) { }) } } + +func TestClient_Bulk(t *testing.T) { + port, teardown := testutil.StartHasura(t, "v1.3.3") + defer teardown() + type fields struct { + Client *httpc.Client + path string + SourceOps *postgres.SourceOps + ClientCommonMetadataOps *commonmetadata.ClientCommonMetadataOps + } + type args struct { + args []hasura.RequestBody + } + tests := []struct { + name string + fields fields + args args + want string + wantErr bool + }{ + { + "can send a bulk request", + fields{ + Client: func() *httpc.Client { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port), nil) + if err != nil { + t.Fatal(err) + } + return c + }(), + path: "v1/query", + }, + args{ + args: []hasura.RequestBody{ + { + Type: "run_sql", + Version: 0, + Args: hasura.PGRunSQLInput{ + SQL: "select 1", + }, + }, + { + Type: "run_sql", + Version: 0, + Args: hasura.PGRunSQLInput{ + SQL: "select 1", + }, + }, + }, + }, + `[ + { + "result_type": "TuplesOk", + "result": [ + [ + "?column?" + ], + [ + "1" + ] + ] + }, + { + "result_type": "TuplesOk", + "result": [ + [ + "?column?" + ], + [ + "1" + ] + ] + } +]`, + false, + }, + { + "can throw error on a bad request", + fields{ + Client: func() *httpc.Client { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port), nil) + if err != nil { + t.Fatal(err) + } + return c + }(), + path: "v1/query", + }, + args{ + args: []hasura.RequestBody{ + { + Type: "run_sql", + Version: 0, + Args: hasura.PGRunSQLInput{ + SQL: "select something crazy!", + }, + }, + { + Type: "run_sql", + Version: 0, + Args: hasura.PGRunSQLInput{ + SQL: "select 1", + }, + }, + }, + }, + ``, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + Client: tt.fields.Client, + path: tt.fields.path, + SourceOps: tt.fields.SourceOps, + ClientCommonMetadataOps: tt.fields.ClientCommonMetadataOps, + } + got, err := c.Bulk(tt.args.args) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + gotb, err := ioutil.ReadAll(got) + require.NoError(t, err) + require.Equal(t, tt.want, string(gotb)) + } + }) + } +} diff --git a/cli/internal/hasura/v2query/v2_query_test.go b/cli/internal/hasura/v2query/v2_query_test.go index cc8f6a7cc05..67636b661f9 100644 --- a/cli/internal/hasura/v2query/v2_query_test.go +++ b/cli/internal/hasura/v2query/v2_query_test.go @@ -6,6 +6,9 @@ import ( "net/http" "testing" + "github.com/hasura/graphql-engine/cli/internal/hasura" + "github.com/stretchr/testify/require" + pg "github.com/hasura/graphql-engine/cli/internal/hasura/sourceops/postgres" "github.com/hasura/graphql-engine/cli/internal/hasura/commonmetadata" @@ -90,3 +93,111 @@ func TestClient_Send(t *testing.T) { }) } } + +func TestClient_Bulk(t *testing.T) { + port, mssqlSourceName, teardown := testutil.StartHasuraWithMSSQLSource(t, testutil.HasuraVersion) + defer teardown() + type fields struct { + Client *httpc.Client + path string + } + type args struct { + args []hasura.RequestBody + } + tests := []struct { + name string + fields fields + args args + want string + wantErr bool + }{ + { + "can send a bulk request", + fields{ + Client: func() *httpc.Client { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port), nil) + if err != nil { + t.Fatal(err) + } + return c + }(), + path: "v2/query", + }, + args{ + args: []hasura.RequestBody{ + { + Type: "mssql_run_sql", + Args: hasura.PGRunSQLInput{ + SQL: "select 1", + Source: mssqlSourceName, + }, + }, + }, + }, + `[ + { + "result_type": "TuplesOk", + "result": [ + [ + "" + ], + [ + 1 + ] + ] + } +]`, + false, + }, + { + "can throw error on a bad request", + fields{ + Client: func() *httpc.Client { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port), nil) + if err != nil { + t.Fatal(err) + } + return c + }(), + path: "v1/query", + }, + args{ + args: []hasura.RequestBody{ + { + Type: "run_sql", + Version: 0, + Args: hasura.PGRunSQLInput{ + SQL: "select something crazy!", + }, + }, + { + Type: "run_sql", + Version: 0, + Args: hasura.PGRunSQLInput{ + SQL: "select 1", + }, + }, + }, + }, + ``, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Client{ + Client: tt.fields.Client, + path: tt.fields.path, + } + got, err := c.Bulk(tt.args.args) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + gotb, err := ioutil.ReadAll(got) + require.NoError(t, err) + require.Equal(t, tt.want, string(gotb)) + } + }) + } +} diff --git a/cli/internal/hasura/v2query/v2query.go b/cli/internal/hasura/v2query/v2query.go index bed3fa64a9e..7fe333f550a 100644 --- a/cli/internal/hasura/v2query/v2query.go +++ b/cli/internal/hasura/v2query/v2query.go @@ -3,6 +3,7 @@ package v2query import ( "bytes" "context" + "fmt" "io" "net/http" @@ -42,3 +43,21 @@ func (c *Client) Send(body interface{}) (*httpc.Response, io.Reader, error) { } return resp, responseBody, nil } +func (c *Client) Bulk(args []hasura.RequestBody) (io.Reader, error) { + body := hasura.RequestBody{ + Type: "bulk", + Args: args, + } + req, err := c.NewRequest(http.MethodPost, c.path, body) + if err != nil { + return nil, err + } + responseBody := new(bytes.Buffer) + resp, err := c.LockAndDo(context.Background(), req, responseBody) + if err != nil { + return nil, err + } else if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("bulk request failed: %v %v", resp.StatusCode, responseBody.String()) + } + return responseBody, nil +} diff --git a/cli/internal/httpc/httpc.go b/cli/internal/httpc/httpc.go index 06d6181b121..5e90b68a8eb 100644 --- a/cli/internal/httpc/httpc.go +++ b/cli/internal/httpc/httpc.go @@ -108,6 +108,13 @@ func (c *Client) LockAndDo(ctx context.Context, req *http.Request, v interface{} return c.Do(ctx, req, v) } +func hasJSONContentType(headers http.Header) bool { + if headers.Get("Content-Type") == "application/json" { + return true + } + return false +} + func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) { resp, err := c.BareDo(ctx, req) if err != nil { @@ -117,18 +124,23 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Res switch v := v.(type) { case nil: case io.Writer: - // indent json response - respBodyBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return resp, err + if hasJSONContentType(resp.Header) { + // indent json response + var respBodyBytes []byte + respBodyBytes, err = ioutil.ReadAll(resp.Body) + if err != nil { + return resp, err + } + var buf bytes.Buffer + err = json.Indent(&buf, respBodyBytes, "", " ") + if err != nil { + return resp, err + } + // copy it to writer + _, err = io.Copy(v, &buf) + } else { + _, err = io.Copy(v, resp.Body) } - var buf bytes.Buffer - err = json.Indent(&buf, respBodyBytes, "", " ") - if err != nil { - return resp, err - } - // copy it to writer - _, err = io.Copy(v, &buf) default: decErr := json.NewDecoder(resp.Body).Decode(v) if decErr == io.EOF { diff --git a/cli/migrate/database/driver.go b/cli/migrate/database/driver.go index ef49a6067b8..a12388b81fd 100644 --- a/cli/migrate/database/driver.go +++ b/cli/migrate/database/driver.go @@ -117,8 +117,6 @@ type Driver interface { SchemaDriver - SeedDriver - SettingsDriver Query(data interface{}) error diff --git a/cli/migrate/database/hasuradb/seed.go b/cli/migrate/database/hasuradb/seed.go deleted file mode 100644 index 6682158e71e..00000000000 --- a/cli/migrate/database/hasuradb/seed.go +++ /dev/null @@ -1,55 +0,0 @@ -package hasuradb - -import ( - "bytes" - "fmt" - "net/http" - - "github.com/hasura/graphql-engine/cli/internal/hasura" - - "github.com/pkg/errors" -) - -func (h *HasuraDB) ApplySeed(m interface{}) error { - resp, body, err := h.genericQueryRequest(m) - if err != nil { - return err - } - if resp.StatusCode != http.StatusOK { - v, ok := body.(*bytes.Buffer) - if ok { - return errors.New(v.String()) - } - return fmt.Errorf("applying %v failed with code %d", m, resp.StatusCode) - } - return nil -} - -func (h *HasuraDB) ExportDataDump(fromTables []string, sourceName string, sourceKind hasura.SourceKind) ([]byte, error) { - switch sourceKind { - case hasura.SourceKindPG: - pgDumpOpts := []string{"--no-owner", "--no-acl", "--data-only", "--column-inserts"} - for _, table := range fromTables { - pgDumpOpts = append(pgDumpOpts, "--table", table) - } - query := SchemaDump{ - Opts: pgDumpOpts, - CleanOutput: true, - Database: sourceName, - } - - resp, body, err := h.sendSchemaDumpQuery(query) - if err != nil { - h.logger.Debug(err) - return nil, err - } - h.logger.Debug("exporting data: ", string(body)) - - if resp.StatusCode != http.StatusOK { - return nil, NewHasuraError(body, h.config.isCMD) - } - - return body, nil - } - return nil, fmt.Errorf("not supported for source %s of type %v", sourceName, sourceKind) -} diff --git a/cli/migrate/database/seed.go b/cli/migrate/database/seed.go deleted file mode 100644 index 800595369fd..00000000000 --- a/cli/migrate/database/seed.go +++ /dev/null @@ -1,8 +0,0 @@ -package database - -import "github.com/hasura/graphql-engine/cli/internal/hasura" - -type SeedDriver interface { - ApplySeed(m interface{}) error - ExportDataDump(tableNames []string, sourceName string, sourceKind hasura.SourceKind) ([]byte, error) -} diff --git a/cli/migrate/migrate.go b/cli/migrate/migrate.go index 3b4348e6489..063671d38a1 100644 --- a/cli/migrate/migrate.go +++ b/cli/migrate/migrate.go @@ -12,7 +12,6 @@ import ( "fmt" "io" "os" - "strings" "sync" "text/tabwriter" "time" @@ -1803,32 +1802,6 @@ func (m *Migrate) readDownFromVersion(from int64, to int64, ret chan<- interface } } -func (m *Migrate) ApplySeed(q interface{}) error { - return m.databaseDrv.ApplySeed(q) -} - -func (m *Migrate) ExportDataDump(tableNames []string, sourceName string, sourceKind hasura.SourceKind) ([]byte, error) { - // to support tables starting with capital letters - modifiedTableNames := make([]string, len(tableNames)) - - for idx, val := range tableNames { - split := strings.Split(val, ".") - splitLen := len(split) - - if splitLen != 1 && splitLen != 2 { - return nil, fmt.Errorf(`invalid schema/table provided "%s"`, val) - } - - if splitLen == 2 { - modifiedTableNames[idx] = fmt.Sprintf(`"%s"."%s"`, split[0], split[1]) - } else { - modifiedTableNames[idx] = fmt.Sprintf(`"%s"`, val) - } - } - - return m.databaseDrv.ExportDataDump(modifiedTableNames, sourceName, sourceKind) -} - func printDryRunStatus(migrations []*Migration) *bytes.Buffer { out := new(tabwriter.Writer) buf := &bytes.Buffer{} diff --git a/cli/seed/apply.go b/cli/seed/apply.go index 8835fe8ed48..4aab80647c8 100644 --- a/cli/seed/apply.go +++ b/cli/seed/apply.go @@ -5,67 +5,105 @@ import ( "os" "path/filepath" + "github.com/hasura/graphql-engine/cli/internal/hasura" + "github.com/hasura/graphql-engine/cli" - "github.com/hasura/graphql-engine/cli/migrate" - "github.com/hasura/graphql-engine/cli/migrate/database/hasuradb" "github.com/pkg/errors" "github.com/spf13/afero" ) +func hasAllowedSeedFileExtensions(filename string) error { + extension := filepath.Ext(filename) + allowedExtensions := []string{".sql", ".SQL"} + for _, allowedExtension := range allowedExtensions { + if allowedExtension == extension { + return nil + } + } + return fmt.Errorf("expected extension to be one of %v but got %s on file %s", allowedExtensions, extension, filename) +} + // ApplySeedsToDatabase will read all .sql files in the given // directory and apply it to hasura -func ApplySeedsToDatabase(ec *cli.ExecutionContext, fs afero.Fs, m *migrate.Migrate, filenames []string, database string) error { - seedQuery := hasuradb.HasuraInterfaceBulk{ - Type: "bulk", - Args: make([]interface{}, 0), +func (d *Driver) ApplySeedsToDatabase(fs afero.Fs, rootSeedsDirectory string, filenames []string, source cli.Source) error { + seedsDirectory := rootSeedsDirectory + if len(source.Name) > 0 { + seedsDirectory = filepath.Join(rootSeedsDirectory, source.Name) } - + getSourceKind := func(source cli.Source) hasura.SourceKind { + if len(source.Name) == 0 { + return hasura.SourceKindPG + } + return source.Kind + } + var sqlAsBytes [][]byte if len(filenames) > 0 { for _, filename := range filenames { - absFilename := filepath.Join(ec.SeedsDirectory, filename) + absFilename := filepath.Join(seedsDirectory, filename) + if err := hasAllowedSeedFileExtensions(absFilename); err != nil { + return err + } b, err := afero.ReadFile(fs, absFilename) if err != nil { return errors.Wrap(err, "error opening file") } - q := hasuradb.HasuraInterfaceQuery{ - Type: "run_sql", - Args: hasuradb.RunSQLInput{ - Source: database, - SQL: string(b), - }, - } - seedQuery.Args = append(seedQuery.Args, q) + sqlAsBytes = append(sqlAsBytes, b) } } else { - err := afero.Walk(fs, ec.SeedsDirectory, func(path string, file os.FileInfo, err error) error { + err := afero.Walk(fs, seedsDirectory, func(path string, file os.FileInfo, err error) error { if file == nil || err != nil { return err } - if !file.IsDir() && filepath.Ext(file.Name()) == ".sql" { + if err := hasAllowedSeedFileExtensions(file.Name()); err == nil && !file.IsDir() { b, err := afero.ReadFile(fs, path) if err != nil { return errors.Wrap(err, "error opening file") } - q := hasuradb.HasuraInterfaceQuery{ - Type: "run_sql", - Args: hasuradb.RunSQLInput{ - SQL: string(b), - Source: database, - }, - } - seedQuery.Args = append(seedQuery.Args, q) + sqlAsBytes = append(sqlAsBytes, b) } - return nil }) if err != nil { return errors.Wrap(err, "error walking the directory path") } } - if len(seedQuery.Args) == 0 { - return fmt.Errorf("no SQL files found in %s", ec.SeedsDirectory) + var args []hasura.RequestBody + sourceKind := getSourceKind(source) + switch sourceKind { + case hasura.SourceKindPG: + for _, sql := range sqlAsBytes { + request := hasura.RequestBody{ + Type: "run_sql", + Args: hasura.PGRunSQLInput{ + SQL: string(sql), + Source: source.Name, + }, + } + args = append(args, request) + } + case hasura.SourceKindMSSQL: + for _, sql := range sqlAsBytes { + request := hasura.RequestBody{ + Type: "mssql_run_sql", + Args: hasura.MSSQLRunSQLInput{ + SQL: string(sql), + Source: source.Name, + }, + } + args = append(args, request) + } + default: + return fmt.Errorf("database %s of kind %s is not supported", source.Name, source.Kind) } - return m.ApplySeed(seedQuery) + + if len(args) == 0 { + return fmt.Errorf("no SQL files found in %s", seedsDirectory) + } + _, err := d.SendBulk(args) + if err != nil { + return err + } + return nil } diff --git a/cli/seed/apply_test.go b/cli/seed/apply_test.go new file mode 100644 index 00000000000..13edea3c3ba --- /dev/null +++ b/cli/seed/apply_test.go @@ -0,0 +1,150 @@ +package seed + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hasura/graphql-engine/cli/internal/hasura/pgdump" + "github.com/hasura/graphql-engine/cli/internal/testutil" + + "github.com/hasura/graphql-engine/cli/internal/hasura/v1query" + "github.com/hasura/graphql-engine/cli/internal/httpc" + + "github.com/hasura/graphql-engine/cli" + "github.com/hasura/graphql-engine/cli/internal/hasura" + "github.com/spf13/afero" +) + +func TestDriver_ApplySeedsToDatabase(t *testing.T) { + port13, teardown := testutil.StartHasura(t, "v1.3.3") + defer teardown() + portLatest, teardown := testutil.StartHasura(t, testutil.HasuraVersion) + defer teardown() + type fields struct { + SendBulk sendBulk + PGDumpClient hasura.PGDump + } + type args struct { + fs afero.Fs + rootSeedsDirectory string + filenames []string + source cli.Source + } + tests := []struct { + name string + fields fields + args args + wantErr bool + // functions which should be run before the test + // possibly to prepare test fixtures maybe + before func(t *testing.T) + }{ + { + "can apply seeds in v1.3.3", + fields{ + func() sendBulk { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port13), nil) + if err != nil { + t.Fatal(err) + } + return v1query.New(c, "v1/query").Bulk + }(), + func() hasura.PGDump { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port13), nil) + if err != nil { + t.Fatal(err) + } + return pgdump.New(c, "v1alpha1/pg_dump") + }(), + }, + args{ + fs: afero.NewOsFs(), + rootSeedsDirectory: "testdata/seeds", + filenames: []string{}, + }, + false, + nil, + }, + { + "can apply seeds in latest", + fields{ + func() sendBulk { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", portLatest), nil) + if err != nil { + t.Fatal(err) + } + return v1query.New(c, "v2/query").Bulk + }(), + func() hasura.PGDump { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", portLatest), nil) + if err != nil { + t.Fatal(err) + } + return pgdump.New(c, "v1alpha1/pg_dump") + }(), + }, + args{ + fs: afero.NewOsFs(), + rootSeedsDirectory: "testdata/seeds", + filenames: []string{}, + }, + false, + nil, + }, + { + "can apply seeds from files", + fields{ + func() sendBulk { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", portLatest), nil) + if err != nil { + t.Fatal(err) + } + return v1query.New(c, "v2/query").Bulk + }(), + func() hasura.PGDump { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", portLatest), nil) + if err != nil { + t.Fatal(err) + } + return pgdump.New(c, "v1alpha1/pg_dump") + }(), + }, + args{ + fs: afero.NewOsFs(), + rootSeedsDirectory: "testdata/seeds", + filenames: []string{ + "articles.sql", + }, + }, + false, + func(t *testing.T) { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", portLatest), nil) + if err != nil { + t.Fatal(err) + } + v1QueryClient := v1query.New(c, "v2/query") + _, err = v1QueryClient.PGRunSQL(hasura.PGRunSQLInput{ + SQL: "DROP TABLE articles", + Source: "default", + }) + require.NoError(t, err) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Driver{ + SendBulk: tt.fields.SendBulk, + PGDumpClient: tt.fields.PGDumpClient, + } + if tt.before != nil { + tt.before(t) + } + if err := d.ApplySeedsToDatabase(tt.args.fs, tt.args.rootSeedsDirectory, tt.args.filenames, tt.args.source); (err != nil) != tt.wantErr { + t.Errorf("ApplySeedsToDatabase() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/cli/seed/create.go b/cli/seed/create.go index e3cfb468010..25c157ab9cc 100644 --- a/cli/seed/create.go +++ b/cli/seed/create.go @@ -8,8 +8,11 @@ import ( "os" "path/filepath" "strconv" + "strings" "time" + "github.com/hasura/graphql-engine/cli/internal/hasura" + "github.com/spf13/afero" ) @@ -48,3 +51,38 @@ func CreateSeedFile(fs afero.Fs, opts CreateSeedOpts) (*string, error) { return &fullFilePath, nil } + +func (d *Driver) ExportDatadump(tableNames []string, sourceName string) (io.Reader, error) { + // to support tables starting with capital letters + modifiedTableNames := make([]string, len(tableNames)) + + for idx, val := range tableNames { + split := strings.Split(val, ".") + splitLen := len(split) + + if splitLen != 1 && splitLen != 2 { + return nil, fmt.Errorf(`invalid schema/table provided "%s"`, val) + } + + if splitLen == 2 { + modifiedTableNames[idx] = fmt.Sprintf(`"%s"."%s"`, split[0], split[1]) + } else { + modifiedTableNames[idx] = fmt.Sprintf(`"%s"`, val) + } + } + + pgDumpOpts := []string{"--no-owner", "--no-acl", "--data-only", "--column-inserts"} + for _, table := range modifiedTableNames { + pgDumpOpts = append(pgDumpOpts, "--table", table) + } + request := hasura.PGDumpRequest{ + Opts: pgDumpOpts, + CleanOutput: true, + SourceName: sourceName, + } + response, err := d.PGDumpClient.Send(request) + if err != nil { + return nil, err + } + return response, nil +} diff --git a/cli/seed/create_test.go b/cli/seed/create_test.go new file mode 100644 index 00000000000..45420e08276 --- /dev/null +++ b/cli/seed/create_test.go @@ -0,0 +1,110 @@ +package seed + +import ( + "fmt" + "io/ioutil" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/hasura/graphql-engine/cli/internal/hasura/pgdump" + "github.com/hasura/graphql-engine/cli/internal/hasura/v1query" + "github.com/hasura/graphql-engine/cli/internal/httpc" + + "github.com/hasura/graphql-engine/cli/internal/testutil" + + "github.com/hasura/graphql-engine/cli/internal/hasura" +) + +func TestDriver_ExportDatadump(t *testing.T) { + port, teardown := testutil.StartHasura(t, testutil.HasuraVersion) + defer teardown() + type fields struct { + SendBulk sendBulk + PGDumpClient hasura.PGDump + } + type args struct { + tableNames []string + sourceName string + } + tests := []struct { + name string + fields fields + args args + want string + wantErr bool + before func(t *testing.T) + }{ + { + "can export data dump", + fields{ + func() sendBulk { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port), nil) + if err != nil { + t.Fatal(err) + } + return v1query.New(c, "v2/query").Bulk + }(), + func() hasura.PGDump { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port), nil) + if err != nil { + t.Fatal(err) + } + return pgdump.New(c, "v1alpha1/pg_dump") + }(), + }, + args{ + tableNames: []string{"articles", "authors"}, + sourceName: "default", + }, + `INSERT INTO public.articles (id, title, content, rating, author_id) VALUES (1, 'test1', 'test1', 1, 4); +INSERT INTO public.articles (id, title, content, rating, author_id) VALUES (2, 'test2', 'test1', 1, 4); +INSERT INTO public.articles (id, title, content, rating, author_id) VALUES (3, 'test3', 'test1', 1, 4); +INSERT INTO public.authors (id, name) VALUES (1, 'test1'); +INSERT INTO public.authors (id, name) VALUES (4, 'test2'); +SELECT pg_catalog.setval('public.articles_author_id_seq', 1, false); +SELECT pg_catalog.setval('public.articles_id_seq', 1, false); +SELECT pg_catalog.setval('public.authors_id_seq', 1, false); +`, + false, + func(t *testing.T) { + c, err := httpc.New(nil, fmt.Sprintf("http://localhost:%s/", port), nil) + if err != nil { + t.Fatal(err) + } + q := v1query.New(c, "v2/query") + b, err := ioutil.ReadFile("testdata/seeds/articles.sql") + require.NoError(t, err) + _, err = q.PGRunSQL(hasura.PGRunSQLInput{ + SQL: string(b), + }) + require.NoError(t, err) + b, err = ioutil.ReadFile("testdata/seeds/authors.sql") + require.NoError(t, err) + _, err = q.PGRunSQL(hasura.PGRunSQLInput{ + SQL: string(b), + }) + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + d := &Driver{ + SendBulk: tt.fields.SendBulk, + PGDumpClient: tt.fields.PGDumpClient, + } + if tt.before != nil { + tt.before(t) + } + got, err := d.ExportDatadump(tt.args.tableNames, tt.args.sourceName) + if tt.wantErr { + require.Error(t, err) + } else { + require.NoError(t, err) + gotb, err := ioutil.ReadAll(got) + require.NoError(t, err) + require.Equal(t, tt.want, string(gotb)) + } + }) + } +} diff --git a/cli/seed/seed.go b/cli/seed/seed.go new file mode 100644 index 00000000000..4bf671450ed --- /dev/null +++ b/cli/seed/seed.go @@ -0,0 +1,17 @@ +package seed + +import ( + "io" + + "github.com/hasura/graphql-engine/cli/internal/hasura" +) + +type sendBulk func([]hasura.RequestBody) (io.Reader, error) +type Driver struct { + SendBulk sendBulk + PGDumpClient hasura.PGDump +} + +func NewDriver(s sendBulk, pgDumpClient hasura.PGDump) *Driver { + return &Driver{s, pgDumpClient} +} diff --git a/cli/seed/testdata/seeds/articles.sql b/cli/seed/testdata/seeds/articles.sql new file mode 100644 index 00000000000..bb2d78a743b --- /dev/null +++ b/cli/seed/testdata/seeds/articles.sql @@ -0,0 +1,14 @@ +CREATE TABLE articles +( + id serial NOT NULL, + title text NOT NULL, + content text NOT NULL, + rating integer NOT NULL, + author_id serial NOT NULL, + PRIMARY KEY (id) +); + +INSERT INTO articles (id, title, content, rating, author_id) +VALUES (1, 'test1', 'test1', 1, 4), + (2, 'test2', 'test1', 1, 4), + (3, 'test3', 'test1', 1, 4); \ No newline at end of file diff --git a/cli/seed/testdata/seeds/authors.sql b/cli/seed/testdata/seeds/authors.sql new file mode 100644 index 00000000000..9dd94877129 --- /dev/null +++ b/cli/seed/testdata/seeds/authors.sql @@ -0,0 +1,10 @@ +CREATE TABLE authors +( + id SERIAL PRIMARY KEY, + name TEXT +); + +INSERT INTO authors(id, name) +VALUES (1, 'test1'), + (4, 'test2'); +