2018-06-24 16:40:48 +03:00
|
|
|
package api
|
|
|
|
|
|
|
|
import (
|
2020-02-24 19:14:46 +03:00
|
|
|
"bytes"
|
|
|
|
"encoding/json"
|
2019-06-26 16:12:44 +03:00
|
|
|
"fmt"
|
2020-02-24 19:14:46 +03:00
|
|
|
"io/ioutil"
|
2018-06-24 16:40:48 +03:00
|
|
|
"net/http"
|
2021-01-18 20:11:05 +03:00
|
|
|
"path/filepath"
|
2018-06-24 16:40:48 +03:00
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
2021-04-01 08:13:24 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli/internal/metadataobject"
|
|
|
|
|
2021-03-08 14:59:35 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli/internal/hasura"
|
|
|
|
"github.com/hasura/graphql-engine/cli/internal/metadatautil"
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
"github.com/gin-gonic/gin"
|
2020-02-24 19:14:46 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli"
|
2018-06-24 16:40:48 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli/migrate"
|
|
|
|
"github.com/hasura/graphql-engine/cli/migrate/cmd"
|
2020-02-24 19:14:46 +03:00
|
|
|
"github.com/hasura/graphql-engine/cli/migrate/database/hasuradb"
|
2018-07-09 16:47:38 +03:00
|
|
|
"github.com/sirupsen/logrus"
|
2018-06-24 16:40:48 +03:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
DataAPIError = "Data Error: "
|
|
|
|
MigrationMode = "migration_mode"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Response struct {
|
|
|
|
Code string `json:"code,omitempty"`
|
|
|
|
Message string `json:"message,omitempty"`
|
|
|
|
Name string `json:"name,omitempty"`
|
|
|
|
StatusCode int `json:"-"`
|
|
|
|
}
|
|
|
|
|
|
|
|
type Request struct {
|
2020-06-02 08:11:47 +03:00
|
|
|
Name string `json:"name"`
|
|
|
|
Up []requestType `json:"up"`
|
|
|
|
Down []requestType `json:"down"`
|
|
|
|
SkipExecution bool `json:"skip_execution"`
|
2021-03-08 14:59:35 +03:00
|
|
|
SourceName string `json:"datasource,omitempty"`
|
2020-02-24 19:14:46 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
type requestType struct {
|
2021-02-17 15:51:43 +03:00
|
|
|
Version int `json:"version,omitempty"`
|
|
|
|
Type string `json:"type"`
|
|
|
|
Database string `json:"datasource,omitempty"`
|
|
|
|
Args interface{} `json:"args"`
|
2018-06-24 16:40:48 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
func MigrateAPI(c *gin.Context) {
|
2021-01-18 20:11:05 +03:00
|
|
|
ecPtr, ok := c.Get("ec")
|
2018-06-24 16:40:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
2019-09-18 08:36:16 +03:00
|
|
|
// Get File url
|
2021-01-18 20:11:05 +03:00
|
|
|
//sourcePtr, ok := c.Get("filedir")
|
2018-06-24 16:40:48 +03:00
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-07-09 16:47:38 +03:00
|
|
|
// Get Logger
|
|
|
|
loggerPtr, ok := c.Get("logger")
|
|
|
|
if !ok {
|
2018-06-24 16:40:48 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
// Get version
|
|
|
|
version := c.GetInt("version")
|
|
|
|
|
2018-07-09 16:47:38 +03:00
|
|
|
// Convert to url.URL
|
2021-01-18 20:11:05 +03:00
|
|
|
ec, ok := ecPtr.(*cli.ExecutionContext)
|
|
|
|
if !ok {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: "cannot get execution context"})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
//sourceURL := sourcePtr.(*url.URL)
|
2018-07-09 16:47:38 +03:00
|
|
|
logger := loggerPtr.(*logrus.Logger)
|
|
|
|
|
2021-04-01 08:13:24 +03:00
|
|
|
mdHandler := metadataobject.NewHandlerFromEC(ec)
|
2018-06-24 16:40:48 +03:00
|
|
|
// Switch on request method
|
|
|
|
switch c.Request.Method {
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
case "GET":
|
2021-03-08 14:59:35 +03:00
|
|
|
sourceName := c.Query("datasource")
|
|
|
|
if ec.Config.Version >= cli.V3 && sourceName == "" {
|
2021-01-18 20:11:05 +03:00
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: "datasource query parameter is required"})
|
|
|
|
return
|
|
|
|
}
|
2021-03-08 14:59:35 +03:00
|
|
|
sourceKind := hasura.SourceKindPG
|
|
|
|
if ec.Config.Version >= cli.V3 {
|
|
|
|
kind, err := metadatautil.GetSourceKind(ec.APIClient.V1Metadata.ExportMetadata, sourceName)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sourceKind = *kind
|
|
|
|
}
|
|
|
|
|
|
|
|
t, err := migrate.NewMigrate(ec, false, sourceName, sourceKind)
|
2021-01-18 20:11:05 +03:00
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
// Rescan file system
|
2021-01-18 20:11:05 +03:00
|
|
|
err = t.ReScan()
|
cli(migrations): new folder structure and squash (#3072)
### Description
This PR introduces three new features:
- Support for a new migrations folder structure.
- Add `squash` command in preview.
- ~List of migrations on the Console and ability to squash them from console.~
#### New migrations folder structure
Starting with this commit, Hasura CLI supports a new directory structure for migrations folder and defaults to that for all new migrations created.
Each migration will get a new directory with the name format `timestamp_name` and inside the directory, there will be four files:
```bash
└── migrations
├── 1572237730898_squashed
│ ├── up.sql
│ ├── up.yaml
│ ├── down.yaml
│ └── down.sql
```
Existing files old migration format `timestamp_name.up|down.yaml|sql` will continue to work alongside new migration files.
#### Squash command
Lots of users have expressed their interest in squashing migrations (see #2724 and #2254) and some even built [their own tools](https://github.com/domasx2/hasura-squasher) to do squash. In this PR, we take a systematic approach to squash migrations.
A new command called `migrate squash` is introduced. Note that this command is in **PREVIEW** and the correctness of squashed migration is not guaranteed (especially for down migrations). From our tests, **it works for most use cases**, but we have found some issues with squashing all the down migrations, partly because the console doesn't generate down migrations for all actions.
Hence, until we add an extensive test suite for squashing, we'll keep the command in preview. We recommend you to confirm the correctness yourself by diffing the SQL and Metadata before and after applying the squashed migrations (we're also thinking about embedding some checks into the command itself).
```bash
$ hasura migrate squash --help
(PREVIEW) Squash multiple migrations leading upto the latest one into a single migration file
Usage:
hasura migrate squash [flags]
Examples:
# NOTE: This command is in PREVIEW, correctness is not guaranteed and the usage may change.
# squash all migrations from version 1572238297262 to the latest one:
hasura migrate squash --from 1572238297262
Flags:
--from uint start squashing form this version
--name string name for the new squashed migration (default "squashed")
--delete-source delete the source files after squashing without any confirmation
```
### Affected components
<!-- Remove non-affected components from the list -->
- CLI
### Related Issues
<!-- Please make sure you have an issue associated with this Pull Request -->
<!-- And then add `(close #<issue-no>)` to the pull request title -->
<!-- Add the issue number below (e.g. #234) -->
Close #2724, Close #2254,
### Solution and Design
<!-- How is this issue solved/fixed? What is the design? -->
<!-- It's better if we elaborate -->
For the squash command, a state machine is implemented to track changes to Hasura metadata. After applying each action on the metadata state, a list of incremental changes is created.
### Steps to test and verify
1. Open console via cli and create some migrations.
2. Run `hasura migrate squash --from <version>`
### Limitations, known bugs & workarounds
<!-- Limitations of the PR, known bugs and suggested workarounds -->
<!-- Feel free to delete these comment lines -->
- The `squash` command is in preview
- Support for squashing from the console is WIP
- Support for squashing migrations that are not committed yet is planned.
- Un-tracking or dropping a table will cause inconsistent squashed down migration since console doesn't generate correct down migration.
- If cascade setting is set to `true` on any of the metadata action, generated migration may be wrong
2019-10-31 05:21:15 +03:00
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
status, err := t.GetStatus()
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: "Something went wrong"})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.JSON(http.StatusOK, status)
|
2018-06-24 16:40:48 +03:00
|
|
|
case "POST":
|
|
|
|
var request Request
|
2020-04-08 08:24:47 +03:00
|
|
|
var err error
|
2018-06-24 16:40:48 +03:00
|
|
|
|
|
|
|
// Bind Request body to Request struct
|
2020-04-08 08:24:47 +03:00
|
|
|
if err = c.BindJSON(&request); err != nil {
|
2020-02-24 19:14:46 +03:00
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "request_parse_error", Message: err.Error()})
|
2018-06-24 16:40:48 +03:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-03-08 14:59:35 +03:00
|
|
|
if ec.Config.Version >= cli.V3 && request.SourceName == "" {
|
2021-01-18 20:11:05 +03:00
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: "datasource key not found in body"})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
startTime := time.Now()
|
|
|
|
timestamp := startTime.UnixNano() / int64(time.Millisecond)
|
2021-03-08 14:59:35 +03:00
|
|
|
sourceName := request.SourceName
|
2021-01-18 20:11:05 +03:00
|
|
|
if ec.Config.Version < cli.V3 {
|
2021-03-08 14:59:35 +03:00
|
|
|
sourceName = ""
|
|
|
|
}
|
|
|
|
sourceKind := hasura.SourceKindPG
|
|
|
|
if ec.Config.Version >= cli.V3 {
|
|
|
|
kind, err := metadatautil.GetSourceKind(ec.APIClient.V1Metadata.ExportMetadata, sourceName)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sourceKind = *kind
|
2021-01-18 20:11:05 +03:00
|
|
|
}
|
2021-03-08 14:59:35 +03:00
|
|
|
t, err := migrate.NewMigrate(ec, false, sourceName, sourceKind)
|
2021-01-18 20:11:05 +03:00
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
2021-03-08 14:59:35 +03:00
|
|
|
createOptions := cmd.New(timestamp, request.Name, filepath.Join(ec.MigrationDir, sourceName))
|
2020-02-24 19:14:46 +03:00
|
|
|
if version != int(cli.V1) {
|
|
|
|
sqlUp := &bytes.Buffer{}
|
|
|
|
sqlDown := &bytes.Buffer{}
|
|
|
|
for _, arg := range request.Up {
|
|
|
|
if arg.Type == hasuradb.RunSQL {
|
|
|
|
argByt, err := json.Marshal(arg.Args)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "request_parse_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var to hasuradb.RunSQLInput
|
|
|
|
err = json.Unmarshal(argByt, &to)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "request_parse_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sqlUp.WriteString(to.SQL)
|
|
|
|
sqlUp.WriteString("\n")
|
|
|
|
}
|
|
|
|
}
|
2018-07-20 13:31:33 +03:00
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
for _, arg := range request.Down {
|
|
|
|
if arg.Type == hasuradb.RunSQL {
|
|
|
|
argByt, err := json.Marshal(arg.Args)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "request_parse_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var to hasuradb.RunSQLInput
|
|
|
|
err = json.Unmarshal(argByt, &to)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "request_parse_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
sqlDown.WriteString(to.SQL)
|
|
|
|
sqlDown.WriteString("\n")
|
|
|
|
}
|
|
|
|
}
|
2018-06-24 16:40:48 +03:00
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
if sqlUp.String() != "" {
|
2020-04-08 08:24:47 +03:00
|
|
|
err = createOptions.SetSQLUp(sqlUp.String())
|
2019-09-18 08:36:16 +03:00
|
|
|
if err != nil {
|
2020-02-24 19:14:46 +03:00
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "create_file_error", Message: err.Error()})
|
|
|
|
return
|
2019-09-18 08:36:16 +03:00
|
|
|
}
|
|
|
|
}
|
2020-02-24 19:14:46 +03:00
|
|
|
if sqlDown.String() != "" {
|
2020-04-08 08:24:47 +03:00
|
|
|
err = createOptions.SetSQLDown(sqlDown.String())
|
2020-02-24 19:14:46 +03:00
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "create_file_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if sqlUp.String() != "" || sqlDown.String() != "" {
|
2020-04-08 08:24:47 +03:00
|
|
|
err = createOptions.Create()
|
2020-02-24 19:14:46 +03:00
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "create_file_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
timestamp = 0
|
|
|
|
}
|
|
|
|
} else {
|
2020-04-08 08:24:47 +03:00
|
|
|
err = createOptions.SetMetaUp(request.Up)
|
2020-02-24 19:14:46 +03:00
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "create_file_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
err = createOptions.SetMetaDown(request.Down)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "create_file_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
err = createOptions.Create()
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "create_file_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
2020-04-08 08:24:47 +03:00
|
|
|
}
|
2020-02-24 19:14:46 +03:00
|
|
|
|
2020-04-08 08:24:47 +03:00
|
|
|
defer func() {
|
|
|
|
if err != nil && timestamp != 0 {
|
|
|
|
err := createOptions.Delete()
|
2020-02-24 19:14:46 +03:00
|
|
|
if err != nil {
|
2020-04-08 08:24:47 +03:00
|
|
|
logger.Debug(err)
|
2020-02-24 19:14:46 +03:00
|
|
|
}
|
2020-04-08 08:24:47 +03:00
|
|
|
}
|
|
|
|
}()
|
2019-09-18 08:36:16 +03:00
|
|
|
|
2018-06-24 16:40:48 +03:00
|
|
|
// Rescan file system
|
2020-04-08 08:24:47 +03:00
|
|
|
err = t.ReScan()
|
2018-06-24 16:40:48 +03:00
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-02-24 19:14:46 +03:00
|
|
|
upByt, err := json.Marshal(request.Up)
|
|
|
|
if err != nil {
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-06-02 08:11:47 +03:00
|
|
|
if err = t.QueryWithVersion(uint64(timestamp), ioutil.NopCloser(bytes.NewReader(upByt)), request.SkipExecution); err != nil {
|
2018-06-24 16:40:48 +03:00
|
|
|
if strings.HasPrefix(err.Error(), DataAPIError) {
|
|
|
|
c.JSON(http.StatusBadRequest, &Response{Code: "data_api_error", Message: strings.TrimPrefix(err.Error(), DataAPIError)})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err == migrate.ErrNoMigrationMode {
|
|
|
|
c.JSON(http.StatusBadRequest, &Response{Code: "migration_mode_disabled", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.JSON(http.StatusInternalServerError, &Response{Code: "internal_error", Message: err.Error()})
|
|
|
|
return
|
|
|
|
}
|
2020-02-24 19:14:46 +03:00
|
|
|
defer func() {
|
2021-04-01 08:13:24 +03:00
|
|
|
var files map[string][]byte
|
|
|
|
files, err = mdHandler.ExportMetadata()
|
2020-02-24 19:14:46 +03:00
|
|
|
if err != nil {
|
|
|
|
logger.Debug(err)
|
|
|
|
return
|
|
|
|
}
|
2021-04-01 08:13:24 +03:00
|
|
|
err = mdHandler.WriteMetadata(files)
|
2020-02-24 19:14:46 +03:00
|
|
|
if err != nil {
|
|
|
|
logger.Debug(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}()
|
2019-06-26 16:12:44 +03:00
|
|
|
c.JSON(http.StatusOK, &Response{Name: fmt.Sprintf("%d_%s", timestamp, request.Name)})
|
2018-06-24 16:40:48 +03:00
|
|
|
default:
|
|
|
|
c.JSON(http.StatusMethodNotAllowed, &gin.H{"message": "Method not allowed"})
|
|
|
|
}
|
|
|
|
}
|