2018-06-28 11:36:25 +03:00
|
|
|
// Package migrate implements migrations on Hasura GraphQL Engine.
|
|
|
|
//
|
|
|
|
// This package is borrowed from https://github.com/golang-migrate/migrate with
|
|
|
|
// additions for Hasura specific yaml file support and a improved Rails-like
|
|
|
|
// migration pattern.
|
2018-06-24 16:40:48 +03:00
|
|
|
package migrate
|
|
|
|
|
|
|
|
import (
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
"bytes"
|
|
|
|
"container/list"
|
2020-04-28 14:59:57 +03:00
|
|
|
"crypto/tls"
|
2018-06-24 16:40:48 +03:00
|
|
|
"fmt"
|
2020-02-24 19:14:46 +03:00
|
|
|
"io"
|
2018-06-24 16:40:48 +03:00
|
|
|
"os"
|
|
|
|
"sync"
|
2020-06-03 14:19:36 +03:00
|
|
|
"text/tabwriter"
|
2018-06-24 16:40:48 +03:00
|
|
|
"time"
|
|
|
|
|
2020-06-03 14:19:36 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli/util"
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli/metadata/types"
|
2018-06-24 16:40:48 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli/migrate/database"
|
|
|
|
"github.com/hasura/graphql-engine/cli/migrate/source"
|
2020-02-24 19:14:46 +03:00
|
|
|
"github.com/pkg/errors"
|
2018-06-24 16:40:48 +03:00
|
|
|
log "github.com/sirupsen/logrus"
|
2020-04-07 12:23:20 +03:00
|
|
|
"gopkg.in/yaml.v2"
|
2018-06-24 16:40:48 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
// DefaultPrefetchMigrations sets the number of migrations to pre-read
|
|
|
|
// from the source. This is helpful if the source is remote, but has little
|
|
|
|
// effect for a local source (i.e. file system).
|
|
|
|
// Please note that this setting has a major impact on the memory usage,
|
|
|
|
// since each pre-read migration is buffered in memory. See DefaultBufferSize.
|
|
|
|
var DefaultPrefetchMigrations = uint64(10)
|
|
|
|
|
|
|
|
// DefaultLockTimeout sets the max time a database driver has to acquire a lock.
|
|
|
|
var DefaultLockTimeout = 15 * time.Second
|
|
|
|
|
|
|
|
var (
|
|
|
|
ErrNoChange = fmt.Errorf("no change")
|
|
|
|
ErrNilVersion = fmt.Errorf("no migration")
|
|
|
|
ErrLocked = fmt.Errorf("database locked")
|
|
|
|
ErrNoMigrationFiles = fmt.Errorf("no migration files found")
|
|
|
|
ErrLockTimeout = fmt.Errorf("timeout: can't acquire database lock")
|
|
|
|
ErrApplied = fmt.Errorf("Version already applied in database")
|
|
|
|
ErrNotApplied = fmt.Errorf("Migration not applied in database")
|
|
|
|
ErrNoMigrationMode = fmt.Errorf("Migration mode is disabled")
|
|
|
|
ErrMigrationMode = fmt.Errorf("Migration mode is enabled")
|
|
|
|
)
|
|
|
|
|
|
|
|
// ErrShortLimit is an error returned when not enough migrations
|
|
|
|
// can be returned by a source for a given limit.
|
|
|
|
type ErrShortLimit struct {
|
|
|
|
Short uint64
|
|
|
|
}
|
|
|
|
|
|
|
|
// Error implements the error interface.
|
|
|
|
func (e ErrShortLimit) Error() string {
|
|
|
|
return fmt.Sprintf("limit %v short", e.Short)
|
|
|
|
}
|
|
|
|
|
|
|
|
type ErrDirty struct {
|
|
|
|
Version int64
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e ErrDirty) Error() string {
|
|
|
|
return fmt.Sprintf("Dirty database version %v. Fix and force version.", e.Version)
|
|
|
|
}
|
|
|
|
|
|
|
|
type Migrate struct {
|
|
|
|
sourceName string
|
|
|
|
sourceURL string
|
|
|
|
sourceDrv source.Driver
|
|
|
|
|
|
|
|
databaseName string
|
|
|
|
databaseURL string
|
|
|
|
databaseDrv database.Driver
|
|
|
|
|
2018-07-09 16:47:38 +03:00
|
|
|
// Logger is the global logger object to print logs.
|
|
|
|
Logger *log.Logger
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
// GracefulStop accepts `true` and will stop executing migrations
|
|
|
|
// as soon as possible at a safe break point, so that the database
|
|
|
|
// is not corrupted.
|
|
|
|
GracefulStop chan bool
|
|
|
|
isGracefulStop bool
|
|
|
|
|
|
|
|
isLockedMu *sync.Mutex
|
|
|
|
isLocked bool
|
|
|
|
|
|
|
|
// PrefetchMigrations defaults to DefaultPrefetchMigrations,
|
|
|
|
// but can be set per Migrate instance.
|
|
|
|
PrefetchMigrations uint64
|
|
|
|
|
|
|
|
// LockTimeout defaults to DefaultLockTimeout,
|
|
|
|
// but can be set per Migrate instance.
|
|
|
|
LockTimeout time.Duration
|
|
|
|
|
|
|
|
//CMD
|
|
|
|
isCMD bool
|
|
|
|
|
|
|
|
status *Status
|
2019-03-18 19:40:04 +03:00
|
|
|
|
|
|
|
SkipExecution bool
|
2020-06-03 14:19:36 +03:00
|
|
|
DryRun bool
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// New returns a new Migrate instance from a source URL and a database URL.
|
|
|
|
// The URL scheme is defined by each driver.
|
2020-04-28 14:59:57 +03:00
|
|
|
func New(sourceUrl string, databaseUrl string, cmd bool, configVersion int, tlsConfig *tls.Config, logger *log.Logger) (*Migrate, error) {
|
2018-06-24 16:40:48 +03:00
|
|
|
m := newCommon(cmd)
|
|
|
|
|
|
|
|
sourceName, err := schemeFromUrl(sourceUrl)
|
|
|
|
if err != nil {
|
|
|
|
log.Debug(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m.sourceName = sourceName
|
|
|
|
m.sourceURL = sourceUrl
|
|
|
|
|
|
|
|
databaseName, err := schemeFromUrl(databaseUrl)
|
|
|
|
if err != nil {
|
|
|
|
log.Debug(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m.databaseName = databaseName
|
|
|
|
m.databaseURL = databaseUrl
|
|
|
|
|
2018-07-09 16:47:38 +03:00
|
|
|
if logger == nil {
|
|
|
|
logger = log.New()
|
|
|
|
}
|
2019-09-18 08:36:16 +03:00
|
|
|
m.Logger = logger
|
2018-07-09 16:47:38 +03:00
|
|
|
|
|
|
|
sourceDrv, err := source.Open(sourceUrl, logger)
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Debug(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m.sourceDrv = sourceDrv
|
2020-02-24 19:14:46 +03:00
|
|
|
if configVersion >= 2 {
|
|
|
|
m.sourceDrv.DefaultParser(source.DefaultParsev2)
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
|
2020-04-28 14:59:57 +03:00
|
|
|
databaseDrv, err := database.Open(databaseUrl, cmd, tlsConfig, logger)
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
|
|
|
log.Debug(err)
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
m.databaseDrv = databaseDrv
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
err = m.ReScan()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
return m, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCommon(cmd bool) *Migrate {
|
|
|
|
return &Migrate{
|
|
|
|
GracefulStop: make(chan bool, 1),
|
|
|
|
PrefetchMigrations: DefaultPrefetchMigrations,
|
|
|
|
isLockedMu: &sync.Mutex{},
|
|
|
|
LockTimeout: DefaultLockTimeout,
|
|
|
|
isCMD: cmd,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-28 11:36:57 +03:00
|
|
|
func (m *Migrate) ReScan() error {
|
2019-09-18 08:36:16 +03:00
|
|
|
err := m.sourceDrv.Scan()
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
2018-07-09 16:47:38 +03:00
|
|
|
m.Logger.Debug(err)
|
2018-06-24 16:40:48 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-09-18 08:36:16 +03:00
|
|
|
err = m.databaseDrv.Scan()
|
2018-06-28 11:36:57 +03:00
|
|
|
if err != nil {
|
2018-07-09 16:47:38 +03:00
|
|
|
m.Logger.Debug(err)
|
2018-06-28 11:36:57 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
err = m.calculateStatus()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-15 09:59:51 +03:00
|
|
|
// Close closes the source and the database.
|
2018-06-24 16:40:48 +03:00
|
|
|
func (m *Migrate) Close() (source error) {
|
|
|
|
sourceSrvClose := make(chan error)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
sourceSrvClose <- m.sourceDrv.Close()
|
|
|
|
}()
|
|
|
|
|
|
|
|
return <-sourceSrvClose
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) calculateStatus() (err error) {
|
2018-07-06 08:06:27 +03:00
|
|
|
m.status = NewStatus()
|
2018-06-24 16:40:48 +03:00
|
|
|
err = m.readStatusFromSource()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return m.readStatusFromDatabase()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) readStatusFromSource() (err error) {
|
|
|
|
firstVersion, err := m.sourceDrv.First()
|
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(*os.PathError); ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
m.status.Append(m.newMigrationStatus(firstVersion, "source"))
|
|
|
|
from := int64(firstVersion)
|
|
|
|
|
|
|
|
lastVersion, err := m.sourceDrv.GetLocalVersion()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
m.status.Append(m.newMigrationStatus(lastVersion, "source"))
|
|
|
|
to := int64(lastVersion)
|
|
|
|
|
|
|
|
for from < to {
|
|
|
|
next, err := m.sourceDrv.Next(suint64(from))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
m.status.Append(m.newMigrationStatus(next, "source"))
|
|
|
|
from = int64(next)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) readStatusFromDatabase() (err error) {
|
|
|
|
firstVersion, ok := m.databaseDrv.First()
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
m.status.Append(m.newMigrationStatus(firstVersion, "database"))
|
|
|
|
from := int64(firstVersion)
|
|
|
|
|
|
|
|
lastVersion, ok := m.databaseDrv.Last()
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
m.status.Append(m.newMigrationStatus(lastVersion, "database"))
|
|
|
|
to := int64(lastVersion)
|
|
|
|
|
|
|
|
for from < to {
|
|
|
|
next, ok := m.databaseDrv.Next(suint64(from))
|
|
|
|
if !ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
m.status.Append(m.newMigrationStatus(next, "database"))
|
|
|
|
from = int64(next)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) newMigrationStatus(version uint64, driverType string) *MigrationStatus {
|
|
|
|
var migrStatus *MigrationStatus
|
|
|
|
migrStatus, ok := m.status.Read(version)
|
|
|
|
if !ok {
|
|
|
|
migrStatus = &MigrationStatus{
|
|
|
|
Version: version,
|
2019-11-28 14:36:16 +03:00
|
|
|
Name: m.sourceDrv.ReadName(version),
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch driverType {
|
|
|
|
case "source":
|
|
|
|
migrStatus.IsPresent = true
|
|
|
|
case "database":
|
|
|
|
migrStatus.IsApplied = true
|
|
|
|
default:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return migrStatus
|
|
|
|
}
|
|
|
|
|
2018-06-28 11:36:57 +03:00
|
|
|
func (m *Migrate) GetStatus() (*Status, error) {
|
|
|
|
err := m.calculateStatus()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return m.status, nil
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) GetSetting(name string) (string, error) {
|
|
|
|
val, err := m.databaseDrv.GetSetting(name)
|
|
|
|
if err != nil {
|
|
|
|
return "", err
|
|
|
|
}
|
|
|
|
return val, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) UpdateSetting(name string, value string) error {
|
|
|
|
return m.databaseDrv.UpdateSetting(name, value)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) Version() (version uint64, dirty bool, err error) {
|
|
|
|
v, d, err := m.databaseDrv.Version()
|
|
|
|
if err != nil {
|
|
|
|
return 0, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if v == database.NilVersion {
|
|
|
|
return 0, false, ErrNilVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
return suint64(v), d, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) GetUnappliedMigrations(version uint64) []uint64 {
|
|
|
|
return m.sourceDrv.GetUnappliedMigrations(version)
|
|
|
|
}
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
func (m *Migrate) GetIntroSpectionSchema() (interface{}, error) {
|
|
|
|
return m.databaseDrv.GetIntroSpectionSchema()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) SetMetadataPlugins(plugins types.MetadataPlugins) {
|
|
|
|
m.databaseDrv.SetMetadataPlugins(plugins)
|
|
|
|
}
|
|
|
|
|
2020-04-22 13:22:02 +03:00
|
|
|
func (m *Migrate) EnableCheckMetadataConsistency(enabled bool) {
|
|
|
|
m.databaseDrv.EnableCheckMetadataConsistency(enabled)
|
|
|
|
}
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
func (m *Migrate) ExportMetadata() (map[string][]byte, error) {
|
2018-06-24 16:40:48 +03:00
|
|
|
return m.databaseDrv.ExportMetadata()
|
|
|
|
}
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
func (m *Migrate) WriteMetadata(files map[string][]byte) error {
|
|
|
|
return m.sourceDrv.WriteMetadata(files)
|
|
|
|
}
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
func (m *Migrate) ResetMetadata() error {
|
|
|
|
return m.databaseDrv.ResetMetadata()
|
|
|
|
}
|
|
|
|
|
2018-10-16 09:25:30 +03:00
|
|
|
// ReloadMetadata - Reload metadata on the database
|
|
|
|
func (m *Migrate) ReloadMetadata() error {
|
|
|
|
return m.databaseDrv.ReloadMetadata()
|
|
|
|
}
|
|
|
|
|
2019-12-25 11:33:06 +03:00
|
|
|
func (m *Migrate) GetInconsistentMetadata() (bool, []database.InconsistentMetadataInterface, error) {
|
|
|
|
return m.databaseDrv.GetInconsistentMetadata()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) DropInconsistentMetadata() error {
|
|
|
|
return m.databaseDrv.DropInconsistentMetadata()
|
|
|
|
}
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
func (m *Migrate) BuildMetadata() (yaml.MapSlice, error) {
|
|
|
|
return m.databaseDrv.BuildMetadata()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) ApplyMetadata() error {
|
|
|
|
return m.databaseDrv.ApplyMetadata()
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
2019-04-30 11:34:08 +03:00
|
|
|
func (m *Migrate) ExportSchemaDump(schemName []string) ([]byte, error) {
|
|
|
|
return m.databaseDrv.ExportSchemaDump(schemName)
|
|
|
|
}
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
func (m *Migrate) RemoveVersions(versions []uint64) error {
|
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mode != "true" {
|
|
|
|
return ErrNoMigrationMode
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.lock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, version := range versions {
|
|
|
|
m.databaseDrv.RemoveVersion(int64(version))
|
|
|
|
}
|
|
|
|
return m.unlockErr(nil)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) Query(data interface{}) error {
|
2018-06-24 16:40:48 +03:00
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mode == "true" {
|
|
|
|
return ErrMigrationMode
|
|
|
|
}
|
|
|
|
return m.databaseDrv.Query(data)
|
|
|
|
}
|
|
|
|
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
// Squash migrations from version v into a new migration.
|
|
|
|
// Returns a list of migrations that are squashed: vs
|
|
|
|
// the squashed metadata for all UP steps: um
|
|
|
|
// the squashed SQL for all UP steps: us
|
|
|
|
// the squashed metadata for all down steps: dm
|
|
|
|
// the squashed SQL for all down steps: ds
|
|
|
|
func (m *Migrate) Squash(v uint64) (vs []int64, um []interface{}, us []byte, dm []interface{}, ds []byte, err error) {
|
|
|
|
// check the migration mode on the database
|
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// if migration_mode is false, set err to ErrNoMigrationMode and return
|
|
|
|
if mode != "true" {
|
|
|
|
err = ErrNoMigrationMode
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// concurrently squash all the up migrations
|
2020-06-16 09:40:20 +03:00
|
|
|
// read all up migrations from source and send each migration
|
|
|
|
// to the returned channel
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
retUp := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
go m.squashUp(v, retUp)
|
|
|
|
|
|
|
|
// concurrently squash all down migrations
|
2020-06-16 09:40:20 +03:00
|
|
|
// read all down migrations from source and send each migration
|
|
|
|
// to the returned channel
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
retDown := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
go m.squashDown(v, retDown)
|
|
|
|
|
|
|
|
// combine squashed up and down migrations into a single one when they're ready
|
|
|
|
dataUp := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
dataDown := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
retVersions := make(chan int64, m.PrefetchMigrations)
|
|
|
|
go m.squashMigrations(retUp, retDown, dataUp, dataDown, retVersions)
|
|
|
|
|
|
|
|
// make a chan for errors
|
2019-12-27 09:05:20 +03:00
|
|
|
errChn := make(chan error, 2)
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
|
|
|
|
// create a waitgroup to wait for all goroutines to finish execution
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
// add three tasks to waitgroup since we used 3 goroutines above
|
|
|
|
wg.Add(3)
|
|
|
|
|
|
|
|
// read from dataUp chan when all up migrations are squashed and compiled
|
|
|
|
go func() {
|
|
|
|
// defer to mark one task in the waitgroup as complete
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
for r := range dataUp {
|
|
|
|
// check the type of value returned through the chan
|
|
|
|
switch data := r.(type) {
|
|
|
|
case error:
|
|
|
|
// it's an error, set error and return
|
|
|
|
// note: this return is returning the goroutine, not the current function
|
2019-12-27 09:05:20 +03:00
|
|
|
m.isGracefulStop = true
|
|
|
|
errChn <- r.(error)
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
return
|
|
|
|
case []byte:
|
|
|
|
// it's SQL, concat all of them
|
|
|
|
buf.WriteString("\n")
|
|
|
|
buf.Write(data)
|
|
|
|
case interface{}:
|
|
|
|
// it's metadata, append into the array
|
|
|
|
um = append(um, data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// set us as the bytes written into buf
|
|
|
|
us = buf.Bytes()
|
|
|
|
}()
|
|
|
|
|
|
|
|
// read from dataDown when it is ready:
|
|
|
|
go func() {
|
|
|
|
// defer to mark another task in the waitgroup as complete
|
|
|
|
defer wg.Done()
|
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
for r := range dataDown {
|
|
|
|
// check the type of value returned through the chan
|
|
|
|
switch data := r.(type) {
|
|
|
|
case error:
|
|
|
|
// it's an error, set error and return
|
|
|
|
// note: this return is returning the goroutine, not the current function
|
2019-12-27 09:05:20 +03:00
|
|
|
m.isGracefulStop = true
|
|
|
|
errChn <- r.(error)
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
return
|
|
|
|
case []byte:
|
|
|
|
// it's SQL, concat all of them
|
|
|
|
buf.WriteString("\n")
|
|
|
|
buf.Write(data)
|
|
|
|
case interface{}:
|
|
|
|
// it's metadata, append into the array
|
|
|
|
dm = append(dm, data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// set ds as the bytes written into buf
|
|
|
|
ds = buf.Bytes()
|
|
|
|
}()
|
|
|
|
|
|
|
|
// read retVersions - versions that are squashed
|
|
|
|
go func() {
|
|
|
|
// defer to mark another task in the waitgroup as complete
|
|
|
|
defer wg.Done()
|
|
|
|
for r := range retVersions {
|
|
|
|
// append each version into the versions array
|
|
|
|
vs = append(vs, r)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// returns from the above goroutines pass the control here.
|
|
|
|
|
|
|
|
// wait until all tasks (3) in the workgroup are completed
|
|
|
|
wg.Wait()
|
|
|
|
|
2019-12-27 09:05:20 +03:00
|
|
|
// close the errChn
|
|
|
|
close(errChn)
|
|
|
|
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
// check for errors in the error channel
|
2019-12-27 09:05:20 +03:00
|
|
|
for e := range errChn {
|
|
|
|
err = e
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
return
|
|
|
|
}
|
2019-12-27 09:05:20 +03:00
|
|
|
|
|
|
|
return
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
}
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
// Migrate looks at the currently active migration version,
|
|
|
|
// then migrates either up or down to the specified version.
|
|
|
|
func (m *Migrate) Migrate(version uint64, direction string) error {
|
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mode != "true" {
|
|
|
|
return ErrNoMigrationMode
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.lock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
go m.read(version, direction, ret)
|
2020-06-03 14:19:36 +03:00
|
|
|
if m.DryRun {
|
|
|
|
return m.unlockErr(m.runDryRun(ret))
|
|
|
|
} else {
|
|
|
|
return m.unlockErr(m.runMigrations(ret))
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
2020-06-02 08:11:47 +03:00
|
|
|
func (m *Migrate) QueryWithVersion(version uint64, data io.ReadCloser, skipExecution bool) error {
|
2020-02-24 19:14:46 +03:00
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mode != "true" {
|
|
|
|
return ErrNoMigrationMode
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.lock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-02 08:11:47 +03:00
|
|
|
if !skipExecution {
|
|
|
|
if err := m.databaseDrv.Run(data, "meta", ""); err != nil {
|
|
|
|
m.databaseDrv.ResetQuery()
|
|
|
|
return m.unlockErr(err)
|
|
|
|
}
|
2020-02-24 19:14:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if version != 0 {
|
|
|
|
if err := m.databaseDrv.InsertVersion(int64(version)); err != nil {
|
|
|
|
m.databaseDrv.ResetQuery()
|
|
|
|
return m.unlockErr(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return m.unlockErr(nil)
|
|
|
|
}
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
// Steps looks at the currently active migration version.
|
|
|
|
// It will migrate up if n > 0, and down if n < 0.
|
|
|
|
func (m *Migrate) Steps(n int64) error {
|
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mode != "true" {
|
|
|
|
return ErrNoMigrationMode
|
|
|
|
}
|
|
|
|
|
|
|
|
if n == 0 {
|
|
|
|
return ErrNoChange
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.lock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
|
|
|
|
if n > 0 {
|
|
|
|
go m.readUp(n, ret)
|
|
|
|
} else {
|
|
|
|
go m.readDown(-n, ret)
|
|
|
|
}
|
|
|
|
|
2020-06-03 14:19:36 +03:00
|
|
|
if m.DryRun {
|
|
|
|
return m.unlockErr(m.runDryRun(ret))
|
|
|
|
} else {
|
|
|
|
return m.unlockErr(m.runMigrations(ret))
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Up looks at the currently active migration version
|
|
|
|
// and will migrate all the way up (applying all up migrations).
|
|
|
|
func (m *Migrate) Up() error {
|
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mode != "true" {
|
|
|
|
return ErrNoMigrationMode
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.lock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
curVersion, dirty, err := m.databaseDrv.Version()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if dirty {
|
|
|
|
return ErrDirty{curVersion}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
|
|
|
|
go m.readUp(-1, ret)
|
|
|
|
|
2020-06-03 14:19:36 +03:00
|
|
|
if m.DryRun {
|
|
|
|
return m.unlockErr(m.runDryRun(ret))
|
|
|
|
} else {
|
|
|
|
return m.unlockErr(m.runMigrations(ret))
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Down looks at the currently active migration version
|
|
|
|
// and will migrate all the way down (applying all down migrations).
|
|
|
|
func (m *Migrate) Down() error {
|
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if mode != "true" {
|
|
|
|
return ErrNoMigrationMode
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.lock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
curVersion, dirty, err := m.databaseDrv.Version()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if dirty {
|
|
|
|
return ErrDirty{curVersion}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := make(chan interface{}, m.PrefetchMigrations)
|
|
|
|
go m.readDown(-1, ret)
|
|
|
|
|
2020-06-03 14:19:36 +03:00
|
|
|
if m.DryRun {
|
|
|
|
return m.unlockErr(m.runDryRun(ret))
|
|
|
|
} else {
|
|
|
|
return m.unlockErr(m.runMigrations(ret))
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
func (m *Migrate) squashUp(version uint64, ret chan<- interface{}) {
|
|
|
|
defer close(ret)
|
|
|
|
currentVersion := version
|
|
|
|
count := int64(0)
|
|
|
|
limit := int64(-1)
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for limit == -1 {
|
|
|
|
if currentVersion == version {
|
2020-06-16 09:40:20 +03:00
|
|
|
// during the first iteration of the loop
|
|
|
|
// check if a next version exists for "--from" version
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
if err := m.versionUpExists(version); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-16 09:40:20 +03:00
|
|
|
// If next version exists this function will return an instance of
|
|
|
|
// migration.go.Migrate struct
|
|
|
|
// this reads the SQL up migration
|
|
|
|
// even if a migration file does'nt exist in the source
|
|
|
|
// a empty migration will be returned
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
migr, err := m.newMigration(version, int64(version))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
2020-06-16 09:40:20 +03:00
|
|
|
// write the body of the migration to reader
|
|
|
|
// the migr instance sent via the channel will then start reading
|
|
|
|
// from it
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
go migr.Buffer()
|
|
|
|
|
2020-06-16 09:40:20 +03:00
|
|
|
// read next version of meta up migration
|
|
|
|
// even if a migration file does'nt exist in the source
|
|
|
|
// a empty migration will be returned
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
migr, err = m.metanewMigration(version, int64(version))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
|
2020-06-16 09:40:20 +03:00
|
|
|
// get the next version using source driver
|
|
|
|
// earlier in the first iteration we knew what version to operate on
|
|
|
|
// but here we have to find the next version
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
next, err := m.sourceDrv.Next(currentVersion)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// no limit, but no migrations applied?
|
|
|
|
if count == 0 {
|
|
|
|
ret <- ErrNoChange
|
|
|
|
return
|
|
|
|
}
|
2020-06-16 09:40:20 +03:00
|
|
|
// when there is no more migrations return
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
if limit == -1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if next files exists (yaml or sql)
|
|
|
|
if err = m.versionUpExists(next); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.newMigration(next, int64(next))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.metanewMigration(next, int64(next))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
currentVersion = next
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) squashDown(version uint64, ret chan<- interface{}) {
|
|
|
|
defer close(ret)
|
|
|
|
|
2020-06-16 09:40:20 +03:00
|
|
|
// get the last version from the source driver
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
from, err := m.sourceDrv.GetLocalVersion()
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-12-27 09:05:20 +03:00
|
|
|
if from < version {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
err = m.versionDownExists(from)
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
prev, err := m.sourceDrv.Prev(from)
|
2020-02-25 09:46:11 +03:00
|
|
|
if os.IsNotExist(err) {
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
migr, err := m.metanewMigration(from, -1)
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.newMigration(from, -1)
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
2020-02-25 09:46:11 +03:00
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.metanewMigration(from, int64(prev))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.newMigration(from, int64(prev))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
from = prev
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// read reads either up or down migrations from source `from` to `to`.
|
|
|
|
// Each migration is then written to the ret channel.
|
|
|
|
// If an error occurs during reading, that error is written to the ret channel, too.
|
|
|
|
// Once read is done reading it will close the ret channel.
|
|
|
|
func (m *Migrate) read(version uint64, direction string, ret chan<- interface{}) {
|
|
|
|
defer close(ret)
|
|
|
|
|
|
|
|
if direction == "up" {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this version present in DB
|
|
|
|
ok := m.databaseDrv.Read(version)
|
|
|
|
if ok {
|
|
|
|
ret <- ErrApplied
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
// Check if next version exists (yaml or sql)
|
2018-06-24 16:40:48 +03:00
|
|
|
if err := m.versionUpExists(version); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.newMigration(version, int64(version))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.metanewMigration(version, int64(version))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// it's going down
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this version present in DB
|
|
|
|
ok := m.databaseDrv.Read(version)
|
|
|
|
if !ok {
|
|
|
|
ret <- ErrNotApplied
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.versionDownExists(version); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
prev, err := m.sourceDrv.Prev(version)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// apply nil migration
|
2020-01-06 12:51:00 +03:00
|
|
|
migr, err := m.metanewMigration(version, -1)
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
2020-01-06 12:51:00 +03:00
|
|
|
migr, err = m.newMigration(version, -1)
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
return
|
|
|
|
} else if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-01-06 12:51:00 +03:00
|
|
|
migr, err := m.metanewMigration(version, int64(prev))
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
2020-01-06 12:51:00 +03:00
|
|
|
migr, err = m.newMigration(version, int64(prev))
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readUp reads up migrations from `from` limitted by `limit`.
|
|
|
|
// limit can be -1, implying no limit and reading until there are no more migrations.
|
|
|
|
// Each migration is then written to the ret channel.
|
|
|
|
// If an error occurs during reading, that error is written to the ret channel, too.
|
|
|
|
// Once readUp is done reading it will close the ret channel.
|
|
|
|
func (m *Migrate) readUp(limit int64, ret chan<- interface{}) {
|
|
|
|
defer close(ret)
|
|
|
|
|
|
|
|
if limit == 0 {
|
|
|
|
ret <- ErrNoChange
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
count := int64(0)
|
|
|
|
from := int64(-1)
|
|
|
|
for count < limit || limit == -1 {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if from == -1 {
|
|
|
|
firstVersion, err := m.sourceDrv.First()
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this version present in DB
|
|
|
|
ok := m.databaseDrv.Read(firstVersion)
|
|
|
|
if ok {
|
|
|
|
from = int64(firstVersion)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if firstVersion files exists (yaml or sql)
|
|
|
|
if err = m.versionUpExists(firstVersion); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.newMigration(firstVersion, int64(firstVersion))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.metanewMigration(firstVersion, int64(firstVersion))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
from = int64(firstVersion)
|
|
|
|
count++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// apply next migration
|
|
|
|
next, err := m.sourceDrv.Next(suint64(from))
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// no limit, but no migrations applied?
|
|
|
|
if limit == -1 && count == 0 {
|
|
|
|
ret <- ErrNoChange
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// no limit, reached end
|
|
|
|
if limit == -1 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// reached end, and didn't apply any migrations
|
|
|
|
if limit > 0 && count == 0 {
|
2018-06-29 13:02:33 +03:00
|
|
|
ret <- ErrNoChange
|
2018-06-24 16:40:48 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// applied less migrations than limit?
|
|
|
|
if count < limit {
|
|
|
|
ret <- ErrShortLimit{suint64(limit - count)}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this version present in DB
|
|
|
|
ok := m.databaseDrv.Read(next)
|
|
|
|
if ok {
|
|
|
|
from = int64(next)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if next files exists (yaml or sql)
|
|
|
|
if err = m.versionUpExists(next); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.newMigration(next, int64(next))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.metanewMigration(next, int64(next))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
from = int64(next)
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readDown reads down migrations from `from` limitted by `limit`.
|
|
|
|
// limit can be -1, implying no limit and reading until there are no more migrations.
|
|
|
|
// Each migration is then written to the ret channel.
|
|
|
|
// If an error occurs during reading, that error is written to the ret channel, too.
|
|
|
|
// Once readDown is done reading it will close the ret channel.
|
|
|
|
func (m *Migrate) readDown(limit int64, ret chan<- interface{}) {
|
|
|
|
defer close(ret)
|
|
|
|
|
|
|
|
if limit == 0 {
|
|
|
|
ret <- ErrNoChange
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
from, _, err := m.databaseDrv.Version()
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// no change if already at nil version
|
|
|
|
if from == -1 && limit == -1 {
|
|
|
|
ret <- ErrNoChange
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// can't go over limit if already at nil version
|
|
|
|
if from == -1 && limit > 0 {
|
2018-06-29 13:02:33 +03:00
|
|
|
ret <- ErrNoChange
|
2018-06-24 16:40:48 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
count := int64(0)
|
|
|
|
for count < limit || limit == -1 {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-29 13:02:33 +03:00
|
|
|
err = m.versionDownExists(suint64(from))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
prev, ok := m.databaseDrv.Prev(suint64(from))
|
|
|
|
if !ok {
|
|
|
|
// no limit or haven't reached limit, apply "first" migration
|
|
|
|
if limit == -1 || limit-count > 0 {
|
|
|
|
migr, err := m.metanewMigration(suint64(from), -1)
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.newMigration(suint64(from), -1)
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
|
|
|
|
if count < limit {
|
|
|
|
ret <- ErrShortLimit{suint64(limit - count)}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.metanewMigration(suint64(from), int64(prev))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.newMigration(suint64(from), int64(prev))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
from = int64(prev)
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// runMigrations reads *Migration and error from a channel. Any other type
|
|
|
|
// sent on this channel will result in a panic. Each migration is then
|
|
|
|
// proxied to the database driver and run against the database.
|
|
|
|
// Before running a newly received migration it will check if it's supposed
|
|
|
|
// to stop execution because it might have received a stop signal on the
|
|
|
|
// GracefulStop channel.
|
|
|
|
func (m *Migrate) runMigrations(ret <-chan interface{}) error {
|
2018-07-12 17:24:46 +03:00
|
|
|
var lastInsertVersion int64
|
2018-06-24 16:40:48 +03:00
|
|
|
for r := range ret {
|
|
|
|
if m.stop() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r.(type) {
|
|
|
|
case error:
|
|
|
|
// Clear Migration query
|
|
|
|
m.databaseDrv.ResetQuery()
|
|
|
|
return r.(error)
|
|
|
|
case *Migration:
|
|
|
|
migr := r.(*Migration)
|
|
|
|
if migr.Body != nil {
|
2019-03-18 19:40:04 +03:00
|
|
|
if !m.SkipExecution {
|
|
|
|
if err := m.databaseDrv.Run(migr.BufferedBody, migr.FileType, migr.FileName); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
2018-07-12 17:24:46 +03:00
|
|
|
version := int64(migr.Version)
|
|
|
|
if version == migr.TargetVersion {
|
|
|
|
if version != lastInsertVersion {
|
|
|
|
// Insert Version number into the table
|
|
|
|
if err := m.databaseDrv.InsertVersion(version); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lastInsertVersion = version
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Delete Version number from the table
|
2018-07-12 17:24:46 +03:00
|
|
|
if err := m.databaseDrv.RemoveVersion(version); err != nil {
|
2018-06-24 16:40:48 +03:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-03 14:19:36 +03:00
|
|
|
func (m *Migrate) runDryRun(ret <-chan interface{}) error {
|
|
|
|
migrations := make([]*Migration, 0)
|
|
|
|
var lastInsertVersion int64
|
|
|
|
for r := range ret {
|
|
|
|
if m.stop() {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
switch r.(type) {
|
|
|
|
case error:
|
|
|
|
return r.(error)
|
|
|
|
case *Migration:
|
|
|
|
migr := r.(*Migration)
|
|
|
|
if migr.Body != nil {
|
|
|
|
version := int64(migr.Version)
|
|
|
|
if version != lastInsertVersion {
|
|
|
|
migrations = append(migrations, migr)
|
|
|
|
lastInsertVersion = version
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
fmt.Fprintf(os.Stdout, "%s", printDryRunStatus(migrations))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
func (m *Migrate) squashMigrations(retUp <-chan interface{}, retDown <-chan interface{}, dataUp chan<- interface{}, dataDown chan<- interface{}, versions chan<- int64) error {
|
|
|
|
var latestVersion int64
|
|
|
|
go func() {
|
|
|
|
defer close(dataUp)
|
|
|
|
defer close(versions)
|
|
|
|
|
2019-12-27 09:05:20 +03:00
|
|
|
var err error
|
|
|
|
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
squashList := database.CustomList{
|
|
|
|
list.New(),
|
|
|
|
}
|
|
|
|
|
2019-12-27 09:05:20 +03:00
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
m.databaseDrv.Squash(&squashList, dataUp)
|
|
|
|
}
|
|
|
|
}()
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
|
|
|
|
for r := range retUp {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch r.(type) {
|
|
|
|
case error:
|
|
|
|
dataUp <- r.(error)
|
|
|
|
case *Migration:
|
|
|
|
migr := r.(*Migration)
|
|
|
|
if migr.Body != nil {
|
2020-06-16 09:40:20 +03:00
|
|
|
// read migration body and push it to squash list
|
2019-12-27 09:05:20 +03:00
|
|
|
if err = m.databaseDrv.PushToList(migr.BufferedBody, migr.FileType, &squashList); err != nil {
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
dataUp <- err
|
2019-12-27 09:05:20 +03:00
|
|
|
return
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
version := int64(migr.Version)
|
|
|
|
if version == migr.TargetVersion && version != latestVersion {
|
|
|
|
versions <- version
|
|
|
|
latestVersion = version
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
defer close(dataDown)
|
2019-12-27 09:05:20 +03:00
|
|
|
var err error
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
|
|
|
|
squashList := database.CustomList{
|
|
|
|
list.New(),
|
|
|
|
}
|
|
|
|
|
2019-12-27 09:05:20 +03:00
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
m.databaseDrv.Squash(&squashList, dataDown)
|
|
|
|
}
|
|
|
|
}()
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
|
|
|
|
for r := range retDown {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch r.(type) {
|
|
|
|
case error:
|
|
|
|
dataDown <- r.(error)
|
|
|
|
case *Migration:
|
|
|
|
migr := r.(*Migration)
|
|
|
|
if migr.Body != nil {
|
2019-12-27 09:05:20 +03:00
|
|
|
if err = m.databaseDrv.PushToList(migr.BufferedBody, migr.FileType, &squashList); err != nil {
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
dataDown <- err
|
2019-12-27 09:05:20 +03:00
|
|
|
return
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
// versionUpExists checks the source if either the up or down migration for
|
|
|
|
// the specified migration version exists.
|
|
|
|
func (m *Migrate) versionUpExists(version uint64) error {
|
|
|
|
// try up migration first
|
|
|
|
directions := m.sourceDrv.GetDirections(version)
|
|
|
|
if !directions[source.Up] && !directions[source.MetaUp] {
|
2018-06-29 13:02:33 +03:00
|
|
|
return fmt.Errorf("%d up migration not found", version)
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
2018-06-29 13:02:33 +03:00
|
|
|
if directions[source.Up] {
|
2018-06-24 16:40:48 +03:00
|
|
|
up, _, _, err := m.sourceDrv.ReadUp(version)
|
|
|
|
if err == nil {
|
|
|
|
defer up.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
if os.IsExist(err) {
|
|
|
|
return nil
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if directions[source.MetaUp] {
|
|
|
|
up, _, _, err := m.sourceDrv.ReadMetaUp(version)
|
|
|
|
if err == nil {
|
|
|
|
defer up.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
if os.IsExist(err) {
|
|
|
|
return nil
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return os.ErrNotExist
|
|
|
|
}
|
|
|
|
|
2018-07-20 13:31:33 +03:00
|
|
|
// versionDownExists checks the source if either the up or down migration for
|
2018-06-24 16:40:48 +03:00
|
|
|
// the specified migration version exists.
|
|
|
|
func (m *Migrate) versionDownExists(version uint64) error {
|
2020-02-03 10:03:32 +03:00
|
|
|
// try down migration first
|
2018-06-24 16:40:48 +03:00
|
|
|
directions := m.sourceDrv.GetDirections(version)
|
|
|
|
if !directions[source.Down] && !directions[source.MetaDown] {
|
2018-06-29 13:02:33 +03:00
|
|
|
return fmt.Errorf("%d down migration not found", version)
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
if directions[source.Down] {
|
|
|
|
up, _, _, err := m.sourceDrv.ReadDown(version)
|
|
|
|
if err == nil {
|
|
|
|
defer up.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
if os.IsExist(err) {
|
|
|
|
return nil
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if directions[source.MetaDown] {
|
|
|
|
up, _, _, err := m.sourceDrv.ReadMetaDown(version)
|
|
|
|
if err == nil {
|
|
|
|
defer up.Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
if os.IsExist(err) {
|
|
|
|
return nil
|
|
|
|
} else if !os.IsNotExist(err) {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return os.ErrNotExist
|
|
|
|
}
|
|
|
|
|
|
|
|
// newMigration is a helper func that returns a *Migration for the
|
|
|
|
// specified version and targetVersion (sql).
|
2020-06-16 09:40:20 +03:00
|
|
|
// will return the down migration
|
2018-06-24 16:40:48 +03:00
|
|
|
func (m *Migrate) newMigration(version uint64, targetVersion int64) (*Migration, error) {
|
|
|
|
var migr *Migration
|
|
|
|
|
|
|
|
if targetVersion >= int64(version) {
|
|
|
|
r, identifier, fileName, err := m.sourceDrv.ReadUp(version)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// create "empty" migration
|
|
|
|
migr, err = NewMigration(nil, "", version, targetVersion, "sql", "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// create migration from up source
|
|
|
|
migr, err = NewMigration(r, identifier, version, targetVersion, "sql", fileName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
r, identifier, fileName, err := m.sourceDrv.ReadDown(version)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// create "empty" migration
|
|
|
|
migr, err = NewMigration(nil, "", version, targetVersion, "sql", "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// create migration from down source
|
|
|
|
migr, err = NewMigration(r, identifier, version, targetVersion, "sql", fileName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.PrefetchMigrations > 0 && migr.Body != nil {
|
|
|
|
//m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
|
|
|
|
} else {
|
|
|
|
//m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
|
|
|
|
}
|
|
|
|
|
|
|
|
return migr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// metanewMigration is a helper func that returns a *Migration for the
|
|
|
|
// specified version and targetVersion (yaml).
|
|
|
|
func (m *Migrate) metanewMigration(version uint64, targetVersion int64) (*Migration, error) {
|
|
|
|
var migr *Migration
|
|
|
|
|
|
|
|
if targetVersion >= int64(version) {
|
|
|
|
r, identifier, fileName, err := m.sourceDrv.ReadMetaUp(version)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// create "empty" migration
|
|
|
|
migr, err = NewMigration(nil, "", version, targetVersion, "meta", "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// create migration from up source
|
|
|
|
migr, err = NewMigration(r, identifier, version, targetVersion, "meta", fileName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
} else {
|
|
|
|
r, identifier, fileName, err := m.sourceDrv.ReadMetaDown(version)
|
|
|
|
if os.IsNotExist(err) {
|
|
|
|
// create "empty" migration
|
|
|
|
migr, err = NewMigration(nil, "", version, targetVersion, "meta", "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
} else if err != nil {
|
|
|
|
return nil, err
|
|
|
|
|
|
|
|
} else {
|
|
|
|
// create migration from down source
|
|
|
|
migr, err = NewMigration(r, identifier, version, targetVersion, "meta", fileName)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.PrefetchMigrations > 0 && migr.Body != nil {
|
|
|
|
//m.logVerbosePrintf("Start buffering %v\n", migr.LogString())
|
|
|
|
} else {
|
|
|
|
//m.logVerbosePrintf("Scheduled %v\n", migr.LogString())
|
|
|
|
}
|
|
|
|
|
|
|
|
return migr, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// stop returns true if no more migrations should be run against the database
|
|
|
|
// because a stop signal was received on the GracefulStop channel.
|
|
|
|
// Calls are cheap and this function is not blocking.
|
|
|
|
func (m *Migrate) stop() bool {
|
|
|
|
if m.isGracefulStop {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-m.GracefulStop:
|
|
|
|
m.isGracefulStop = true
|
|
|
|
return true
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lock is a thread safe helper function to lock the database.
|
|
|
|
// It should be called as late as possible when running migrations.
|
|
|
|
func (m *Migrate) lock() error {
|
|
|
|
m.isLockedMu.Lock()
|
|
|
|
defer m.isLockedMu.Unlock()
|
|
|
|
|
|
|
|
if m.isLocked {
|
|
|
|
return ErrLocked
|
|
|
|
}
|
|
|
|
|
|
|
|
// create done channel, used in the timeout goroutine
|
|
|
|
done := make(chan bool, 1)
|
|
|
|
defer func() {
|
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
|
|
|
|
// use errchan to signal error back to this context
|
|
|
|
errchan := make(chan error, 2)
|
|
|
|
|
|
|
|
// start timeout goroutine
|
|
|
|
timeout := time.After(m.LockTimeout)
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
case <-timeout:
|
|
|
|
errchan <- ErrLockTimeout
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
// now try to acquire the lock
|
|
|
|
go func() {
|
|
|
|
if err := m.databaseDrv.Lock(); err != nil {
|
|
|
|
errchan <- err
|
|
|
|
} else {
|
|
|
|
errchan <- nil
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}()
|
|
|
|
|
|
|
|
// wait until we either recieve ErrLockTimeout or error from Lock operation
|
|
|
|
err := <-errchan
|
|
|
|
if err == nil {
|
|
|
|
m.isLocked = true
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// unlock is a thread safe helper function to unlock the database.
|
|
|
|
// It should be called as early as possible when no more migrations are
|
|
|
|
// expected to be executed.
|
|
|
|
func (m *Migrate) unlock() error {
|
|
|
|
m.isLockedMu.Lock()
|
|
|
|
defer m.isLockedMu.Unlock()
|
|
|
|
|
2019-09-18 08:36:16 +03:00
|
|
|
defer func() {
|
|
|
|
m.isLocked = false
|
|
|
|
}()
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
if err := m.databaseDrv.UnLock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// unlockErr calls unlock and returns a combined error
|
|
|
|
// if a prevErr is not nil.
|
|
|
|
func (m *Migrate) unlockErr(prevErr error) error {
|
|
|
|
if err := m.unlock(); err != nil {
|
|
|
|
return NewMultiError(prevErr, err)
|
|
|
|
}
|
|
|
|
return prevErr
|
|
|
|
}
|
2020-02-03 10:03:32 +03:00
|
|
|
|
|
|
|
// GotoVersion will apply a version also applying the migration chain
|
|
|
|
// leading to it
|
|
|
|
func (m *Migrate) GotoVersion(gotoVersion int64) error {
|
|
|
|
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if mode != "true" {
|
|
|
|
return ErrNoMigrationMode
|
|
|
|
}
|
|
|
|
|
|
|
|
currentVersion, dirty, err := m.Version()
|
|
|
|
currVersion := int64(currentVersion)
|
|
|
|
if err != nil {
|
|
|
|
if err == ErrNilVersion {
|
|
|
|
currVersion = database.NilVersion
|
|
|
|
} else {
|
|
|
|
return errors.Wrap(err, "cannot determine version")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if dirty {
|
|
|
|
return ErrDirty{currVersion}
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := m.lock(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
ret := make(chan interface{})
|
|
|
|
if currVersion <= gotoVersion {
|
|
|
|
go m.readUpFromVersion(-1, gotoVersion, ret)
|
|
|
|
} else if currVersion > gotoVersion {
|
|
|
|
go m.readDownFromVersion(currVersion, gotoVersion, ret)
|
|
|
|
}
|
|
|
|
|
2020-06-03 14:19:36 +03:00
|
|
|
if m.DryRun {
|
|
|
|
return m.unlockErr(m.runDryRun(ret))
|
|
|
|
} else {
|
|
|
|
return m.unlockErr(m.runMigrations(ret))
|
|
|
|
}
|
2020-02-03 10:03:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// readUpFromVersion reads up migrations from `from` limitted by `limit`. (is a modified version of readUp)
|
|
|
|
// limit can be -1, implying no limit and reading until there are no more migrations.
|
|
|
|
// Each migration is then written to the ret channel.
|
|
|
|
// If an error occurs during reading, that error is written to the ret channel, too.
|
|
|
|
// Once readUpFromVersion is done reading it will close the ret channel.
|
|
|
|
func (m *Migrate) readUpFromVersion(from int64, to int64, ret chan<- interface{}) {
|
|
|
|
defer close(ret)
|
|
|
|
var noOfAppliedMigrations int
|
|
|
|
for {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if from == to {
|
|
|
|
if noOfAppliedMigrations == 0 {
|
|
|
|
ret <- ErrNoChange
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if from == -1 {
|
|
|
|
firstVersion, err := m.sourceDrv.First()
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this version present in DB
|
|
|
|
ok := m.databaseDrv.Read(firstVersion)
|
|
|
|
if ok {
|
|
|
|
from = int64(firstVersion)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if firstVersion files exists (yaml or sql)
|
|
|
|
if err = m.versionUpExists(firstVersion); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.newMigration(firstVersion, int64(firstVersion))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.metanewMigration(firstVersion, int64(firstVersion))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- migr
|
|
|
|
|
|
|
|
go migr.Buffer()
|
|
|
|
from = int64(firstVersion)
|
|
|
|
noOfAppliedMigrations++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// apply next migration
|
|
|
|
next, err := m.sourceDrv.Next(suint64(from))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this version present in DB
|
|
|
|
ok := m.databaseDrv.Read(next)
|
|
|
|
if ok {
|
|
|
|
from = int64(next)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if next files exists (yaml or sql)
|
|
|
|
if err = m.versionUpExists(next); err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.newMigration(next, int64(next))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.metanewMigration(next, int64(next))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
from = int64(next)
|
|
|
|
noOfAppliedMigrations++
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// readDownFromVersion reads down migrations from `from` limitted by `limit`. (modified version of readDown)
|
|
|
|
// limit can be -1, implying no limit and reading until there are no more migrations.
|
|
|
|
// Each migration is then written to the ret channel.
|
|
|
|
// If an error occurs during reading, that error is written to the ret channel, too.
|
|
|
|
// Once readDownFromVersion is done reading it will close the ret channel.
|
|
|
|
func (m *Migrate) readDownFromVersion(from int64, to int64, ret chan<- interface{}) {
|
|
|
|
defer close(ret)
|
|
|
|
var err error
|
|
|
|
var noOfAppliedMigrations int
|
|
|
|
for {
|
|
|
|
if m.stop() {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if from == to {
|
|
|
|
if noOfAppliedMigrations == 0 {
|
|
|
|
ret <- ErrNoChange
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err = m.versionDownExists(suint64(from))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
prev, ok := m.databaseDrv.Prev(suint64(from))
|
|
|
|
if !ok {
|
|
|
|
// Check if any prev version available in source
|
|
|
|
prev, err = m.sourceDrv.Prev(suint64(from))
|
|
|
|
if os.IsNotExist(err) && to == -1 {
|
|
|
|
// apply nil migration
|
|
|
|
migr, err := m.metanewMigration(suint64(from), -1)
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.newMigration(suint64(from), -1)
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
from = database.NilVersion
|
|
|
|
noOfAppliedMigrations++
|
|
|
|
continue
|
|
|
|
} else if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
ret <- fmt.Errorf("%v not applied on database", prev)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
migr, err := m.metanewMigration(suint64(from), int64(prev))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
|
|
|
|
migr, err = m.newMigration(suint64(from), int64(prev))
|
|
|
|
if err != nil {
|
|
|
|
ret <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ret <- migr
|
|
|
|
go migr.Buffer()
|
|
|
|
from = int64(prev)
|
|
|
|
noOfAppliedMigrations++
|
|
|
|
}
|
|
|
|
}
|
2020-06-03 14:19:36 +03:00
|
|
|
|
2020-06-16 15:15:04 +03:00
|
|
|
func (m *Migrate) ApplySeed(q interface{}) error {
|
|
|
|
return m.databaseDrv.ApplySeed(q)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *Migrate) ExportDataDump(tableNames []string) ([]byte, error) {
|
|
|
|
return m.databaseDrv.ExportDataDump(tableNames)
|
|
|
|
}
|
|
|
|
|
2020-06-03 14:19:36 +03:00
|
|
|
func printDryRunStatus(migrations []*Migration) *bytes.Buffer {
|
|
|
|
out := new(tabwriter.Writer)
|
|
|
|
buf := &bytes.Buffer{}
|
|
|
|
out.Init(buf, 0, 8, 2, ' ', 0)
|
|
|
|
w := util.NewPrefixWriter(out)
|
|
|
|
w.Write(util.LEVEL_0, "VERSION\tTYPE\tNAME\n")
|
|
|
|
for _, migration := range migrations {
|
|
|
|
var direction string
|
|
|
|
if int64(migration.Version) == migration.TargetVersion {
|
|
|
|
direction = "up"
|
|
|
|
} else {
|
|
|
|
direction = "down"
|
|
|
|
}
|
|
|
|
w.Write(util.LEVEL_0, "%d\t%s\t%s\n",
|
|
|
|
migration.Version,
|
|
|
|
direction,
|
|
|
|
migration.Identifier,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
out.Flush()
|
|
|
|
return buf
|
|
|
|
}
|