mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-14 08:02:15 +03:00
parent
8389a7e273
commit
71cf017197
@ -157,7 +157,7 @@ jobs:
|
||||
# build the server binary, and package into docker image
|
||||
build_server:
|
||||
docker:
|
||||
- image: hasura/graphql-engine-server-builder:20190314
|
||||
- image: hasura/graphql-engine-server-builder:20190415-1
|
||||
working_directory: ~/graphql-engine
|
||||
steps:
|
||||
- attach_workspace:
|
||||
@ -171,6 +171,11 @@ jobs:
|
||||
- restore_cache:
|
||||
keys:
|
||||
- server-app-cache-{{ .Branch }}-{{ .Revision }}
|
||||
- run:
|
||||
name: Install latest postgresql client tools
|
||||
command: |
|
||||
apt-get -y update
|
||||
apt-get -y install postgresql-client-11
|
||||
- run:
|
||||
name: Build the server
|
||||
working_directory: ./server
|
||||
@ -218,7 +223,7 @@ jobs:
|
||||
environment:
|
||||
PG_VERSION: "11_1"
|
||||
docker:
|
||||
- image: hasura/graphql-engine-server-builder:20190314
|
||||
- image: hasura/graphql-engine-server-builder:20190415-1
|
||||
# TODO: change this to circleci postgis when they have one for pg 11
|
||||
- image: mdillon/postgis:11-alpine
|
||||
<<: *test_pg_env
|
||||
@ -228,7 +233,7 @@ jobs:
|
||||
environment:
|
||||
PG_VERSION: "10_6"
|
||||
docker:
|
||||
- image: hasura/graphql-engine-server-builder:20190314
|
||||
- image: hasura/graphql-engine-server-builder:20190415-1
|
||||
- image: circleci/postgres:10.6-alpine-postgis
|
||||
<<: *test_pg_env
|
||||
|
||||
@ -237,7 +242,7 @@ jobs:
|
||||
environment:
|
||||
PG_VERSION: "9_6"
|
||||
docker:
|
||||
- image: hasura/graphql-engine-server-builder:20190314
|
||||
- image: hasura/graphql-engine-server-builder:20190415-1
|
||||
- image: circleci/postgres:9.6-alpine-postgis
|
||||
<<: *test_pg_env
|
||||
|
||||
@ -246,7 +251,7 @@ jobs:
|
||||
environment:
|
||||
PG_VERSION: "9_5"
|
||||
docker:
|
||||
- image: hasura/graphql-engine-server-builder:20190314
|
||||
- image: hasura/graphql-engine-server-builder:20190415-1
|
||||
- image: circleci/postgres:9.5-alpine-postgis
|
||||
<<: *test_pg_env
|
||||
|
||||
|
@ -5,10 +5,15 @@ FROM debian:stretch-20190228-slim
|
||||
ARG docker_ver="17.09.0-ce"
|
||||
ARG resolver="lts-13.12"
|
||||
ARG stack_ver="1.9.3"
|
||||
ARG postgres_ver="11"
|
||||
|
||||
# Install GNU make, curl, git and docker client. Required to build the server
|
||||
RUN apt-get -y update \
|
||||
&& apt-get install -y curl g++ gcc libc6-dev libpq-dev libffi-dev libgmp-dev make xz-utils zlib1g-dev git gnupg upx netcat python3 python3-pip \
|
||||
&& apt-get -y install curl gnupg2 \
|
||||
&& echo "deb http://apt.postgresql.org/pub/repos/apt/ stretch-pgdg main" > /etc/apt/sources.list.d/pgdg.list \
|
||||
&& curl -s https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
||||
&& apt-get -y update \
|
||||
&& apt-get install -y g++ gcc libc6-dev libpq-dev libffi-dev libgmp-dev make xz-utils zlib1g-dev git gnupg upx netcat python3 python3-pip postgresql-client-${postgres_ver} postgresql-client-common \
|
||||
&& curl -Lo /tmp/docker-${docker_ver}.tgz https://download.docker.com/linux/static/stable/x86_64/docker-${docker_ver}.tgz \
|
||||
&& tar -xz -C /tmp -f /tmp/docker-${docker_ver}.tgz \
|
||||
&& mv /tmp/docker/* /usr/bin \
|
||||
|
@ -7,14 +7,19 @@ import (
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/hasura/graphql-engine/cli"
|
||||
mig "github.com/hasura/graphql-engine/cli/migrate/cmd"
|
||||
"github.com/hasura/graphql-engine/cli/migrate"
|
||||
"github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
mig "github.com/hasura/graphql-engine/cli/migrate/cmd"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const migrateCreateCmdExamples = ` # Setup migration files for the first time by introspecting a server:
|
||||
hasura migrate create "init" --from-sever`
|
||||
|
||||
func newMigrateCreateCmd(ec *cli.ExecutionContext) *cobra.Command {
|
||||
v := viper.New()
|
||||
opts := &migrateCreateOptions{
|
||||
@ -25,6 +30,7 @@ func newMigrateCreateCmd(ec *cli.ExecutionContext) *cobra.Command {
|
||||
Use: "create [migration-name]",
|
||||
Short: "Create files required for a migration",
|
||||
Long: "Create sql and yaml files required for a migration",
|
||||
Example: migrateCreateCmdExamples,
|
||||
SilenceUsage: true,
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
@ -48,13 +54,17 @@ func newMigrateCreateCmd(ec *cli.ExecutionContext) *cobra.Command {
|
||||
}
|
||||
f := migrateCreateCmd.Flags()
|
||||
opts.flags = f
|
||||
f.StringVar(&opts.sqlFile, "sql-from-file", "", "path to an sql file which contains the up actions")
|
||||
f.BoolVar(&opts.fromServer, "from-server", false, "get SQL statements and hasura metadata from the server")
|
||||
f.StringVar(&opts.sqlFile, "sql-from-file", "", "path to an sql file which contains the SQL statements")
|
||||
f.BoolVar(&opts.sqlServer, "sql-from-server", false, "take pg_dump from server and save it as a migration")
|
||||
f.StringArrayVar(&opts.schemaNames, "schema", []string{"public"}, "name of Postgres schema to export as migration")
|
||||
f.StringVar(&opts.metaDataFile, "metadata-from-file", "", "path to a hasura metadata file to be used for up actions")
|
||||
f.BoolVar(&opts.metaDataServer, "metadata-from-server", false, "take metadata from the server and write it as an up migration file")
|
||||
f.String("endpoint", "", "http(s) endpoint for Hasura GraphQL Engine")
|
||||
f.String("admin-secret", "", "admin secret for Hasura GraphQL Engine")
|
||||
f.String("access-key", "", "access key for Hasura GraphQL Engine")
|
||||
f.MarkDeprecated("access-key", "use --admin-secret instead")
|
||||
|
||||
migrateCreateCmd.MarkFlagFilename("sql-from-file")
|
||||
migrateCreateCmd.MarkFlagFilename("metadata-from-file")
|
||||
|
||||
@ -73,15 +83,38 @@ type migrateCreateOptions struct {
|
||||
flags *pflag.FlagSet
|
||||
|
||||
// Flags
|
||||
fromServer bool
|
||||
sqlFile string
|
||||
sqlServer bool
|
||||
metaDataFile string
|
||||
metaDataServer bool
|
||||
schemaNames []string
|
||||
}
|
||||
|
||||
func (o *migrateCreateOptions) run() (version int64, err error) {
|
||||
timestamp := getTime()
|
||||
createOptions := mig.New(timestamp, o.name, o.EC.MigrationDir)
|
||||
|
||||
if o.fromServer {
|
||||
o.sqlServer = true
|
||||
o.metaDataServer = true
|
||||
}
|
||||
|
||||
if o.flags.Changed("metadata-from-file") && o.sqlServer {
|
||||
return 0, errors.New("only one sql type can be set")
|
||||
}
|
||||
if o.flags.Changed("metadata-from-file") && o.metaDataServer {
|
||||
return 0, errors.New("only one metadata type can be set")
|
||||
}
|
||||
|
||||
var migrateDrv *migrate.Migrate
|
||||
if o.sqlServer || o.metaDataServer {
|
||||
migrateDrv, err = newMigrate(o.EC.MigrationDir, o.EC.ServerConfig.ParsedEndpoint, o.EC.ServerConfig.AdminSecret, o.EC.Logger, o.EC.Version)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "cannot create migrate instance")
|
||||
}
|
||||
}
|
||||
|
||||
if o.flags.Changed("sql-from-file") {
|
||||
// sql-file flag is set
|
||||
err := createOptions.SetSQLUpFromFile(o.sqlFile)
|
||||
@ -89,9 +122,12 @@ func (o *migrateCreateOptions) run() (version int64, err error) {
|
||||
return 0, errors.Wrap(err, "cannot set sql file")
|
||||
}
|
||||
}
|
||||
|
||||
if o.flags.Changed("metadata-from-file") && o.metaDataServer {
|
||||
return 0, errors.New("only one metadata type can be set")
|
||||
if o.sqlServer {
|
||||
data, err := migrateDrv.ExportSchemaDump(o.schemaNames)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "cannot fetch schema dump")
|
||||
}
|
||||
createOptions.SetSQLUp(string(data))
|
||||
}
|
||||
|
||||
if o.flags.Changed("metadata-from-file") {
|
||||
@ -103,12 +139,6 @@ func (o *migrateCreateOptions) run() (version int64, err error) {
|
||||
}
|
||||
|
||||
if o.metaDataServer {
|
||||
// create new migrate instance
|
||||
migrateDrv, err := newMigrate(o.EC.MigrationDir, o.EC.ServerConfig.ParsedEndpoint, o.EC.ServerConfig.AdminSecret, o.EC.Logger, o.EC.Version)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "cannot create migrate instance")
|
||||
}
|
||||
|
||||
// fetch metadata from server
|
||||
metaData, err := migrateDrv.ExportMetadata()
|
||||
if err != nil {
|
||||
@ -138,7 +168,7 @@ func (o *migrateCreateOptions) run() (version int64, err error) {
|
||||
}
|
||||
}
|
||||
|
||||
if !o.flags.Changed("sql-from-file") && !o.flags.Changed("metadata-from-file") && !o.metaDataServer {
|
||||
if !o.flags.Changed("sql-from-file") && !o.flags.Changed("metadata-from-file") && !o.metaDataServer && !o.sqlServer {
|
||||
// Set empty data for [up|down].yaml
|
||||
createOptions.MetaUp = []byte(`[]`)
|
||||
createOptions.MetaDown = []byte(`[]`)
|
||||
@ -153,7 +183,7 @@ func (o *migrateCreateOptions) run() (version int64, err error) {
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "error creating migration files")
|
||||
}
|
||||
return 0, nil
|
||||
return timestamp, nil
|
||||
}
|
||||
|
||||
func getTime() int64 {
|
||||
|
@ -105,6 +105,8 @@ type Driver interface {
|
||||
SettingsDriver
|
||||
|
||||
MetadataDriver
|
||||
|
||||
SchemaDriver
|
||||
}
|
||||
|
||||
// Open returns a new driver instance.
|
||||
|
@ -39,7 +39,8 @@ var (
|
||||
type Config struct {
|
||||
MigrationsTable string
|
||||
SettingsTable string
|
||||
URL *nurl.URL
|
||||
v1URL *nurl.URL
|
||||
schemDumpURL *nurl.URL
|
||||
Headers map[string]string
|
||||
isCMD bool
|
||||
}
|
||||
@ -117,11 +118,16 @@ func (h *HasuraDB) Open(url string, isCMD bool, logger *log.Logger) (database.Dr
|
||||
hx, err := WithInstance(&Config{
|
||||
MigrationsTable: DefaultMigrationsTable,
|
||||
SettingsTable: DefaultSettingsTable,
|
||||
URL: &nurl.URL{
|
||||
v1URL: &nurl.URL{
|
||||
Scheme: scheme,
|
||||
Host: hurl.Host,
|
||||
Path: path.Join(hurl.Path, "v1/query"),
|
||||
},
|
||||
schemDumpURL: &nurl.URL{
|
||||
Scheme: scheme,
|
||||
Host: hurl.Host,
|
||||
Path: path.Join(hurl.Path, "v1alpha1/pg_dump"),
|
||||
},
|
||||
isCMD: isCMD,
|
||||
Headers: headers,
|
||||
}, logger)
|
||||
@ -162,7 +168,7 @@ func (h *HasuraDB) UnLock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(h.migrationQuery)
|
||||
resp, body, err := h.sendv1Query(h.migrationQuery)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -283,7 +289,7 @@ func (h *HasuraDB) getVersions() (err error) {
|
||||
}
|
||||
|
||||
// Send Query
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -368,7 +374,7 @@ func (h *HasuraDB) Reset() error {
|
||||
},
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -401,7 +407,7 @@ func (h *HasuraDB) ensureVersionTable() error {
|
||||
},
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return err
|
||||
@ -443,7 +449,7 @@ func (h *HasuraDB) ensureVersionTable() error {
|
||||
},
|
||||
}
|
||||
|
||||
resp, body, err = h.sendQuery(query)
|
||||
resp, body, err = h.sendv1Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -469,10 +475,30 @@ func (h *HasuraDB) ensureVersionTable() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *HasuraDB) sendQuery(m interface{}) (resp *http.Response, body []byte, err error) {
|
||||
func (h *HasuraDB) sendv1Query(m interface{}) (resp *http.Response, body []byte, err error) {
|
||||
request := gorequest.New()
|
||||
|
||||
request = request.Post(h.config.URL.String()).Send(m)
|
||||
request = request.Post(h.config.v1URL.String()).Send(m)
|
||||
|
||||
for headerName, headerValue := range h.config.Headers {
|
||||
request.Set(headerName, headerValue)
|
||||
}
|
||||
|
||||
resp, body, errs := request.EndBytes()
|
||||
|
||||
if len(errs) == 0 {
|
||||
err = nil
|
||||
} else {
|
||||
err = errs[0]
|
||||
}
|
||||
|
||||
return resp, body, err
|
||||
}
|
||||
|
||||
func (h *HasuraDB) sendSchemaDumpQuery(m interface{}) (resp *http.Response, body []byte, err error) {
|
||||
request := gorequest.New()
|
||||
|
||||
request = request.Post(h.config.schemDumpURL.String()).Send(m)
|
||||
|
||||
for headerName, headerValue := range h.config.Headers {
|
||||
request.Set(headerName, headerValue)
|
||||
|
@ -13,7 +13,7 @@ func (h *HasuraDB) ExportMetadata() (interface{}, error) {
|
||||
Args: HasuraArgs{},
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return nil, err
|
||||
@ -46,7 +46,7 @@ func (h *HasuraDB) ResetMetadata() error {
|
||||
Args: HasuraArgs{},
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return err
|
||||
@ -72,7 +72,7 @@ func (h *HasuraDB) ReloadMetadata() error {
|
||||
Args: HasuraArgs{},
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return err
|
||||
@ -106,7 +106,7 @@ func (h *HasuraDB) ApplyMetadata(data interface{}) error {
|
||||
},
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return err
|
||||
@ -151,7 +151,7 @@ func (h *HasuraDB) Query(data []interface{}) error {
|
||||
Args: data,
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return err
|
||||
|
36
cli/migrate/database/hasuradb/schema_dump.go
Normal file
36
cli/migrate/database/hasuradb/schema_dump.go
Normal file
@ -0,0 +1,36 @@
|
||||
package hasuradb
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
func (h *HasuraDB) ExportSchemaDump(schemaNames []string) ([]byte, error) {
|
||||
opts := []string{"-O", "-x", "--schema-only"}
|
||||
for _, s := range schemaNames {
|
||||
opts = append(opts, "--schema", s)
|
||||
}
|
||||
query := SchemaDump{
|
||||
Opts: opts,
|
||||
CleanOutput: true,
|
||||
}
|
||||
|
||||
resp, body, err := h.sendSchemaDumpQuery(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return nil, err
|
||||
}
|
||||
h.logger.Debug("response: ", string(body))
|
||||
|
||||
var horror HasuraError
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
err = json.Unmarshal(body, &horror)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return nil, err
|
||||
}
|
||||
return nil, horror.Error(h.config.isCMD)
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
@ -19,7 +19,7 @@ func (h *HasuraDB) ensureSettingsTable() error {
|
||||
},
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
h.logger.Debug(err)
|
||||
return err
|
||||
@ -60,7 +60,7 @@ func (h *HasuraDB) ensureSettingsTable() error {
|
||||
},
|
||||
}
|
||||
|
||||
resp, body, err = h.sendQuery(query)
|
||||
resp, body, err = h.sendv1Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -105,7 +105,7 @@ func (h *HasuraDB) setDefaultSettings() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -132,7 +132,7 @@ func (h *HasuraDB) GetSetting(name string) (value string, err error) {
|
||||
}
|
||||
|
||||
// Send Query
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
return value, err
|
||||
}
|
||||
@ -181,7 +181,7 @@ func (h *HasuraDB) UpdateSetting(name string, value string) error {
|
||||
}
|
||||
|
||||
// Send Query
|
||||
resp, body, err := h.sendQuery(query)
|
||||
resp, body, err := h.sendv1Query(query)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -85,6 +85,11 @@ type PostgresError struct {
|
||||
Hint string `json:"hint"`
|
||||
}
|
||||
|
||||
type SchemaDump struct {
|
||||
Opts []string `json:"opts"`
|
||||
CleanOutput bool `json:"clean_output"`
|
||||
}
|
||||
|
||||
func (h *HasuraError) CMDError() error {
|
||||
var errorStrings []string
|
||||
errorStrings = append(errorStrings, fmt.Sprintf("[%s] %s (%s)", h.Code, h.ErrorMessage, h.Path))
|
||||
|
5
cli/migrate/database/schema_dump.go
Normal file
5
cli/migrate/database/schema_dump.go
Normal file
@ -0,0 +1,5 @@
|
||||
package database
|
||||
|
||||
type SchemaDriver interface {
|
||||
ExportSchemaDump(schemaName []string) ([]byte, error)
|
||||
}
|
@ -322,6 +322,10 @@ func (m *Migrate) ApplyMetadata(data interface{}) error {
|
||||
return m.databaseDrv.ApplyMetadata(data)
|
||||
}
|
||||
|
||||
func (m *Migrate) ExportSchemaDump(schemName []string) ([]byte, error) {
|
||||
return m.databaseDrv.ExportSchemaDump(schemName)
|
||||
}
|
||||
|
||||
func (m *Migrate) Query(data []interface{}) error {
|
||||
mode, err := m.databaseDrv.GetSetting("migration_mode")
|
||||
if err != nil {
|
||||
|
@ -261,4 +261,4 @@ watch:
|
||||
inotifywait -q -m --recursive -e modify -e move -e create -e delete --exclude '($(BUILDDIR)|.git)' . | while read -r CHANGE; do $(MAKE) html; done
|
||||
|
||||
livehtml: html-images
|
||||
sphinx-autobuild -b html -i "$(BUILDDIR)/*" $(ALLSPHINXOPTS) $(BUILDDIR)/html --ignore ".git/*"
|
||||
sphinx-autobuild -b html --host 0.0.0.0 -i "$(BUILDDIR)/*" $(ALLSPHINXOPTS) $(BUILDDIR)/html --ignore ".git/*"
|
||||
|
29
docs/graphql/manual/api-reference/graphql-api/index.rst
Normal file
29
docs/graphql/manual/api-reference/graphql-api/index.rst
Normal file
@ -0,0 +1,29 @@
|
||||
GraphQL API Reference
|
||||
=====================
|
||||
|
||||
.. contents:: Table of contents
|
||||
:backlinks: none
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
All GraphQL requests for queries, subscriptions and mutations are made to the GraphQL API.
|
||||
|
||||
Endpoint
|
||||
--------
|
||||
|
||||
All requests are ``POST`` requests to the ``/v1alpha1/graphql`` endpoint.
|
||||
|
||||
Request types
|
||||
-------------
|
||||
|
||||
The following types of requests can be made using the GraphQL API:
|
||||
|
||||
- :doc:`Query / Subscription <query>`
|
||||
- :doc:`Mutation <mutation>`
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
Query / Subscription <query>
|
||||
Mutation <mutation>
|
@ -6,37 +6,79 @@ API Reference
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
Available APIs
|
||||
--------------
|
||||
|
||||
+-----------------+----------------------------------------+------------------+
|
||||
| API | Endpoint | Access |
|
||||
+=================+========================================+==================+
|
||||
| GraphQL | :ref:`/v1alpha1/graphql <graphql_api>` | Permission rules |
|
||||
+-----------------+----------------------------------------+------------------+
|
||||
| Schema/Metadata | :ref:`/v1/query <schema_metadata_api>` | Admin only |
|
||||
+-----------------+----------------------------------------+------------------+
|
||||
| Version | :ref:`/v1/version <version_api>` | Public |
|
||||
+-----------------+----------------------------------------+------------------+
|
||||
| Health | :ref:`/healthz <health_api>` | Public |
|
||||
+-----------------+----------------------------------------+------------------+
|
||||
| PG Dump | :ref:`/v1alpha1/pg_dump <pg_dump_api>` | Admin only |
|
||||
+-----------------+----------------------------------------+------------------+
|
||||
|
||||
.. _graphql_api:
|
||||
|
||||
GraphQL API
|
||||
-----------
|
||||
^^^^^^^^^^^
|
||||
|
||||
All GraphQL requests for queries, subscriptions and mutations are made to the GraphQL API.
|
||||
|
||||
All requests are ``POST`` requests to the ``/v1alpha1/graphql`` endpoint.
|
||||
See details at :doc:`graphql-api/index`
|
||||
|
||||
Request types
|
||||
^^^^^^^^^^^^^
|
||||
|
||||
The following types of requests can be made using the GraphQL API:
|
||||
|
||||
- :doc:`Query / Subscription <query>`
|
||||
- :doc:`Mutation <mutation>`
|
||||
.. _schema_metadata_api:
|
||||
|
||||
Schema / Metadata API
|
||||
---------------------
|
||||
^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Hasura exposes a Schema / Metadata API for managing metadata for permissions/relationships or for directly
|
||||
executing SQL on the underlying Postgres.
|
||||
|
||||
This is primarily intended to be used as an ``admin`` API to manage Hasura schema and metadata.
|
||||
|
||||
All requests are ``POST`` requests to the ``/v1/query`` endpoint.
|
||||
|
||||
Request types
|
||||
^^^^^^^^^^^^^
|
||||
See details at :doc:`schema-metadata-api/index`
|
||||
|
||||
The following lists all the types of requests that can be made using the Schema/Metadata API:
|
||||
.. _version_api:
|
||||
|
||||
- :ref:`Schema / Metadata API query types <Query>`
|
||||
Version API
|
||||
^^^^^^^^^^^
|
||||
|
||||
A ``GET`` request to the public ``/v1/version`` endpoint responds with the current server version
|
||||
in JSON format:
|
||||
|
||||
.. code-block:: js
|
||||
|
||||
{"version": "v1.0.0-alpha01"}
|
||||
|
||||
.. _health_api:
|
||||
|
||||
Health check API
|
||||
^^^^^^^^^^^^^^^^
|
||||
|
||||
A ``GET`` request to the public ``/healthz`` endpoint will respond with ``200``
|
||||
if GraphQL Engine is ready to serve requests and there are no inconsistencies
|
||||
with the metadata. The response will be ``500`` if there are metadata
|
||||
inconsistencies and you should use the console or check the server logs to find
|
||||
out what the errors are.
|
||||
|
||||
|
||||
.. _pg_dump_api:
|
||||
|
||||
pg_dump API
|
||||
^^^^^^^^^^^
|
||||
|
||||
The ``/v1alpha1/pg_dump`` is an admin-only endpoint that can be used to execute ``pg_dump`` on the
|
||||
Postgres instance connected to Hasura. The ``pg_dump`` CLI tool's argument can
|
||||
be passed as POST request body to the API and the response is sent back to the
|
||||
client.
|
||||
|
||||
See details at :doc:`pgdump`
|
||||
|
||||
Supported PostgreSQL types
|
||||
--------------------------
|
||||
@ -48,7 +90,7 @@ You can refer to the following to know about all PostgreSQL types supported by t
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
||||
Query / Subscription <query>
|
||||
Mutation <mutation>
|
||||
GraphQL API <graphql-api/index>
|
||||
Schema / Metadata APIs <schema-metadata-api/index>
|
||||
PG Dump API <pgdump>
|
||||
Supported PostgreSQL types <postgresql-types>
|
||||
|
97
docs/graphql/manual/api-reference/pgdump.rst
Normal file
97
docs/graphql/manual/api-reference/pgdump.rst
Normal file
@ -0,0 +1,97 @@
|
||||
.. _pg_dump_api_reference:
|
||||
|
||||
PG Dump API Reference
|
||||
=====================
|
||||
|
||||
.. contents:: Table of contents
|
||||
:backlinks: none
|
||||
:depth: 1
|
||||
:local:
|
||||
|
||||
The PG Dump API is an admin-only endpoint that can be used to execute ``pg_dump`` on the
|
||||
Postgres instance that Hasura is configured with.
|
||||
|
||||
The primary motive of this API is to provide convenience methods to initialise migrations from an
|
||||
existing Hasura instance. But the functionality can be later expanded to do other things
|
||||
such as taking data dump etc.
|
||||
|
||||
Endpoint
|
||||
--------
|
||||
|
||||
All requests are ``POST`` requests to the ``/v1alpha1/pg_dump`` endpoint.
|
||||
|
||||
API Spec
|
||||
--------
|
||||
|
||||
Request
|
||||
^^^^^^^
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
POST /v1alpha1/pg_dump HTTP/1.1
|
||||
Content-Type: application/json
|
||||
X-Hasura-Role: admin
|
||||
|
||||
{
|
||||
"opts": ["-O", "-x", "--schema-only", "--schema", "public"],
|
||||
"clean_output": true
|
||||
}
|
||||
|
||||
- ``opts``: Arguments to be passed to the ``pg_dump`` tool. Represented as array
|
||||
of strings. The underlying command that is executed is:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
pg_dump $DATABASE_URL $OPTS -f $FILENAME
|
||||
|
||||
- ``clean_output``: When this optional argument is set to ``true``, the output SQL from
|
||||
the command is cleaned to remove the following:
|
||||
|
||||
- SQL front matter, like SET statements.
|
||||
- ``CREATE SCHEMA public``.
|
||||
- ``COMMENT ON SCHMEA public is 'standard public schema'``;
|
||||
- Comments (``--``) and empty newlines.
|
||||
- Postgres triggers created by Hasura for event triggers.
|
||||
|
||||
|
||||
Source code for the script that is executed can be found `here <https://github.com/hasura/graphql-engine/tree/master/server/src-rsr/run_pg_dump.sh>`_.
|
||||
|
||||
Sample response
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
.. code-block:: http
|
||||
|
||||
HTTP/1.1 200 OK
|
||||
Content-Type: application/sql
|
||||
|
||||
CREATE TABLE public.author (
|
||||
id integer NOT NULL,
|
||||
name text NOT NULL
|
||||
);
|
||||
CREATE SEQUENCE public.author_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
ALTER SEQUENCE public.author_id_seq OWNED BY public.author.id;
|
||||
ALTER TABLE ONLY public.author ALTER COLUMN id SET DEFAULT nextval('public.author_id_seq'::regclass);
|
||||
|
||||
Disabling PG Dump API
|
||||
---------------------
|
||||
|
||||
Since this API can be used to dump all the Postgres data and schema, it can be
|
||||
disabled, especially in production deployments.
|
||||
|
||||
The ``enabled-apis`` flag or the ``HASURA_GRAPHQL_ENABLED_APIS`` env var can be used to
|
||||
enable/disable this API. By default, The PG DumpAPI is enabled. To disable it, you need to explicitly
|
||||
state that this API is not enabled. i.e. remove it from the list of enabled APIs.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# enable only graphql & metadata apis, disable pgdump
|
||||
--enabled-apis="graphql,metadata"
|
||||
HASURA_GRAPHQL_ENABLED_APIS="graphql,metadata"
|
||||
|
||||
See :doc:`../deployment/graphql-engine-flags/reference` for info on setting the above flag/env var
|
@ -239,6 +239,24 @@ Error codes
|
||||
:widths: 10, 20, 70
|
||||
:header-rows: 1
|
||||
|
||||
Disabling Schema/Metadata API
|
||||
-----------------------------
|
||||
|
||||
Since this API can be used to make changes to the GraphQL schema, it can be
|
||||
disabled, especially in production deployments.
|
||||
|
||||
The ``enabled-apis`` flag or the ``HASURA_GRAPHQL_ENABLED_APIS`` env var can be used to
|
||||
enable/disable this API. By default, The schema/metadata API is enabled. To disable it, you need
|
||||
to explicitly state that this API is not enabled. i.e. remove it from the list of enabled APIs.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# enable only graphql api, disable metadata and pgdump
|
||||
--enabled-apis="graphql"
|
||||
HASURA_GRAPHQL_ENABLED_APIS="graphql"
|
||||
|
||||
See :doc:`../../deployment/graphql-engine-flags/reference` for info on setting the above flag/env var
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:hidden:
|
||||
|
@ -154,8 +154,8 @@ For ``serve`` sub-command these are the flags and ENV variables available:
|
||||
|
||||
* - ``--enabled-apis <APIS>``
|
||||
- ``HASURA_GRAPHQL_ENABLED_APIS``
|
||||
- Comma separated list of APIs (options: ``metadata`` & ``graphql``) to be enabled.
|
||||
(default: ``metadata,graphql``)
|
||||
- Comma separated list of APIs (options: ``metadata``, ``graphql``, ``pgdump``) to be enabled.
|
||||
(default: ``metadata,graphql,pgdump``)
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -66,60 +66,24 @@ Hasura migrations. You can commit this directory to version control.
|
||||
Step 3: Initialize the migrations as per your current state
|
||||
-----------------------------------------------------------
|
||||
|
||||
- Use ``pg_dump`` to export the database schema:
|
||||
Create a migration called ``init`` by exporting the current Postgres schema and
|
||||
metadata from server:
|
||||
|
||||
If Postgres is running in docker, we can use the ``pg_dump``
|
||||
command bundled within the ``postgres`` docker container. If you have
|
||||
``pg_dump`` installed on your machine, you could use that as well.
|
||||
.. code-block:: bash
|
||||
|
||||
.. code-block:: bash
|
||||
# create migration files (note that this will only export public schema from postgres)
|
||||
hasura migrate create "init" --from-server
|
||||
|
||||
# get the container id for postgres
|
||||
docker ps
|
||||
# note down the version
|
||||
# mark the migration as applied on this server
|
||||
hasura migrate apply --version "<version>" --skip-execution
|
||||
|
||||
# dump the public schema into public-schema.sql (repeat for other schemas)
|
||||
docker exec <postgres-container-id> pg_dump -O -x -U postgres --schema-only --schema public > public-schema.sql
|
||||
|
||||
If Postgres is on Heroku or elsewhere, install ``pg_dump`` on your machine and
|
||||
use it. It comes with a standard Postgres installation which you can download
|
||||
and install from `here <https://www.postgresql.org/download/>`__.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# Get the DATABASE_URL from Heroku Dashbaord -> Settings -> Reveal Config Vars
|
||||
# dump the public schema into public-schema.sql (repeat for other schemas)
|
||||
pg_dump -O -x "<DATABASE_URL>" --schema-only --schema public > public-schema.sql
|
||||
|
||||
This command will create ``public-schema.sql`` which contains the SQL
|
||||
definitions for the public schema.
|
||||
|
||||
- Clean up the SQL file to remove some un-necessary statements:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# POST the SQL to a serverless function and save the response
|
||||
curl --data-binary @public-schema.sql https://hasura-edit-pg-dump.now.sh > public-schema-edited.sql
|
||||
|
||||
(The source code for this function can be found on `GitHub <https://github.com/hasura/graphql-engine/tree/master/scripts/edit-pg-dump>`__ along with a bash script if you'd prefer that.)
|
||||
|
||||
- Create a migration called ``init`` using this SQL file and the metadata that
|
||||
is on the server right now:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
# create migration files
|
||||
hasura migrate create "init" --sql-from-file "public-schema-edited.sql" --metadata-from-server
|
||||
|
||||
# note down the version
|
||||
# mark the migration as applied on this server
|
||||
hasura migrate apply --version "<version>" --skip-execution
|
||||
|
||||
This command will create a new "migration" under the ``migrations`` directory
|
||||
with the file name as ``<timestamp(version)>_init.up.yaml``. This file will
|
||||
contain the required information to reproduce the current state of the server
|
||||
including the Postgres schema and Hasura metadata. The apply command will mark
|
||||
this migration as "applied" on the server. If you'd like to read more about
|
||||
the format of migration files, check out the :ref:`migration_file_format`.
|
||||
This command will create a new "migration" under the ``migrations`` directory
|
||||
with the file name as ``<timestamp(version)>_init.up.yaml``. This file will
|
||||
contain the required information to reproduce the current state of the server
|
||||
including the Postgres (public) schema and Hasura metadata. The apply command
|
||||
will mark this migration as "applied" on the server. If you'd like to read more
|
||||
about the format of migration files, check out the :ref:`migration_file_format`.
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -7,6 +7,7 @@ nproc := $(shell nproc)
|
||||
# TODO: needs to be replaced with something like yq
|
||||
stack_resolver := $(shell awk '/^resolver:/ {print $$2;}' stack.yaml)
|
||||
packager_ver := 20190326
|
||||
pg_dump_ver := 11
|
||||
project_dir := $(shell pwd)
|
||||
build_dir := $(project_dir)/$(shell stack path --dist-dir)/build
|
||||
|
||||
@ -59,6 +60,7 @@ ci-image:
|
||||
docker cp $(build_dir)/$(project)/$(project) dummy:/root/
|
||||
docker run --rm --volumes-from dummy $(registry)/graphql-engine-packager:$(packager_ver) /build.sh $(project) | tar -x -C packaging/build/rootfs
|
||||
strip --strip-unneeded packaging/build/rootfs/bin/$(project)
|
||||
cp /usr/lib/postgresql/$(pg_dump_ver)/bin/pg_dump packaging/build/rootfs/bin/pg_dump
|
||||
upx packaging/build/rootfs/bin/$(project)
|
||||
docker build -t $(registry)/$(project):$(VERSION) packaging/build/
|
||||
|
||||
|
@ -165,6 +165,7 @@ library
|
||||
, Hasura.Server.CheckUpdates
|
||||
, Hasura.Server.Telemetry
|
||||
, Hasura.Server.SchemaUpdate
|
||||
, Hasura.Server.PGDump
|
||||
, Hasura.RQL.Types
|
||||
, Hasura.RQL.Instances
|
||||
, Hasura.RQL.Types.SchemaCache
|
||||
|
@ -144,7 +144,7 @@ main = do
|
||||
prepareEvents logger ci
|
||||
|
||||
(app, cacheRef, cacheInitTime) <-
|
||||
mkWaiApp isoL loggerCtx sqlGenCtx pool httpManager am
|
||||
mkWaiApp isoL loggerCtx sqlGenCtx pool ci httpManager am
|
||||
corsCfg enableConsole enableTelemetry instanceId enabledAPIs lqOpts
|
||||
|
||||
-- log inconsistent schema objects
|
||||
|
@ -52,6 +52,7 @@ import Hasura.Server.Cors
|
||||
import Hasura.Server.Init
|
||||
import Hasura.Server.Logging
|
||||
import Hasura.Server.Middleware (corsMiddleware)
|
||||
import qualified Hasura.Server.PGDump as PGD
|
||||
import Hasura.Server.Query
|
||||
import Hasura.Server.Utils
|
||||
import Hasura.Server.Version
|
||||
@ -130,16 +131,17 @@ withSCUpdate scr logger action = do
|
||||
|
||||
data ServerCtx
|
||||
= ServerCtx
|
||||
{ scPGExecCtx :: PGExecCtx
|
||||
, scLogger :: L.Logger
|
||||
, scCacheRef :: SchemaCacheRef
|
||||
, scAuthMode :: AuthMode
|
||||
, scManager :: HTTP.Manager
|
||||
, scSQLGenCtx :: SQLGenCtx
|
||||
, scEnabledAPIs :: S.HashSet API
|
||||
, scInstanceId :: InstanceId
|
||||
, scPlanCache :: E.PlanCache
|
||||
, scLQState :: EL.LiveQueriesState
|
||||
{ scPGExecCtx :: PGExecCtx
|
||||
, scConnInfo :: Q.ConnInfo
|
||||
, scLogger :: L.Logger
|
||||
, scCacheRef :: SchemaCacheRef
|
||||
, scAuthMode :: AuthMode
|
||||
, scManager :: HTTP.Manager
|
||||
, scSQLGenCtx :: SQLGenCtx
|
||||
, scEnabledAPIs :: S.HashSet API
|
||||
, scInstanceId :: InstanceId
|
||||
, scPlanCache :: E.PlanCache
|
||||
, scLQState :: EL.LiveQueriesState
|
||||
}
|
||||
|
||||
data HandlerCtx
|
||||
@ -152,12 +154,27 @@ data HandlerCtx
|
||||
|
||||
type Handler = ExceptT QErr (ReaderT HandlerCtx IO)
|
||||
|
||||
data APIResp
|
||||
= JSONResp !EncJSON
|
||||
| RawResp !T.Text !BL.ByteString -- content-type, body
|
||||
|
||||
apiRespToLBS :: APIResp -> BL.ByteString
|
||||
apiRespToLBS = \case
|
||||
JSONResp j -> encJToLBS j
|
||||
RawResp _ b -> b
|
||||
|
||||
mkAPIRespHandler :: Handler EncJSON -> Handler APIResp
|
||||
mkAPIRespHandler = fmap JSONResp
|
||||
|
||||
isMetadataEnabled :: ServerCtx -> Bool
|
||||
isMetadataEnabled sc = S.member METADATA $ scEnabledAPIs sc
|
||||
|
||||
isGraphQLEnabled :: ServerCtx -> Bool
|
||||
isGraphQLEnabled sc = S.member GRAPHQL $ scEnabledAPIs sc
|
||||
|
||||
isPGDumpEnabled :: ServerCtx -> Bool
|
||||
isPGDumpEnabled sc = S.member PGDUMP $ scEnabledAPIs sc
|
||||
|
||||
isDeveloperAPIEnabled :: ServerCtx -> Bool
|
||||
isDeveloperAPIEnabled sc = S.member DEVELOPER $ scEnabledAPIs sc
|
||||
|
||||
@ -204,7 +221,7 @@ mkSpockAction
|
||||
:: (MonadIO m)
|
||||
=> (Bool -> QErr -> Value)
|
||||
-> ServerCtx
|
||||
-> Handler EncJSON
|
||||
-> Handler APIResp
|
||||
-> ActionT m ()
|
||||
mkSpockAction qErrEncoder serverCtx handler = do
|
||||
req <- request
|
||||
@ -219,14 +236,13 @@ mkSpockAction qErrEncoder serverCtx handler = do
|
||||
let handlerState = HandlerCtx serverCtx reqBody userInfo headers
|
||||
|
||||
t1 <- liftIO getCurrentTime -- for measuring response time purposes
|
||||
result <- liftIO $ runReaderT (runExceptT handler) handlerState
|
||||
eResult <- liftIO $ runReaderT (runExceptT handler) handlerState
|
||||
t2 <- liftIO getCurrentTime -- for measuring response time purposes
|
||||
|
||||
let resLBS = fmap encJToLBS result
|
||||
|
||||
-- log result
|
||||
logResult (Just userInfo) req reqBody serverCtx resLBS $ Just (t1, t2)
|
||||
either (qErrToResp $ userRole userInfo == adminRole) resToResp resLBS
|
||||
logResult (Just userInfo) req reqBody serverCtx (apiRespToLBS <$> eResult) $ Just (t1, t2)
|
||||
either (qErrToResp $ userRole userInfo == adminRole) resToResp eResult
|
||||
|
||||
where
|
||||
logger = scLogger serverCtx
|
||||
@ -240,9 +256,14 @@ mkSpockAction qErrEncoder serverCtx handler = do
|
||||
logError Nothing req reqBody serverCtx qErr
|
||||
qErrToResp includeInternal qErr
|
||||
|
||||
resToResp resp = do
|
||||
uncurry setHeader jsonHeader
|
||||
lazyBytes resp
|
||||
resToResp eResult = do
|
||||
case eResult of
|
||||
JSONResp j -> do
|
||||
uncurry setHeader jsonHeader
|
||||
lazyBytes $ encJToLBS j
|
||||
RawResp ct b -> do
|
||||
setHeader "content-type" ct
|
||||
lazyBytes b
|
||||
|
||||
v1QueryHandler :: RQLQuery -> Handler EncJSON
|
||||
v1QueryHandler query = do
|
||||
@ -293,6 +314,13 @@ gqlExplainHandler query = do
|
||||
sqlGenCtx <- scSQLGenCtx . hcServerCtx <$> ask
|
||||
GE.explainGQLQuery pgExecCtx sc sqlGenCtx query
|
||||
|
||||
v1Alpha1PGDumpHandler :: PGD.PGDumpReqBody -> Handler APIResp
|
||||
v1Alpha1PGDumpHandler b = do
|
||||
onlyAdmin
|
||||
ci <- scConnInfo . hcServerCtx <$> ask
|
||||
output <- PGD.execPGDump b ci
|
||||
return $ RawResp "application/sql" output
|
||||
|
||||
newtype QueryParser
|
||||
= QueryParser { getQueryParser :: QualifiedTable -> Handler RQLQuery }
|
||||
|
||||
@ -330,12 +358,12 @@ initErrExit e = do
|
||||
|
||||
mkWaiApp
|
||||
:: Q.TxIsolation -> L.LoggerCtx -> SQLGenCtx
|
||||
-> Q.PGPool -> HTTP.Manager -> AuthMode
|
||||
-> Q.PGPool -> Q.ConnInfo -> HTTP.Manager -> AuthMode
|
||||
-> CorsConfig -> Bool -> Bool
|
||||
-> InstanceId -> S.HashSet API
|
||||
-> EL.LQOpts
|
||||
-> IO (Wai.Application, SchemaCacheRef, Maybe UTCTime)
|
||||
mkWaiApp isoLevel loggerCtx sqlGenCtx pool httpManager mode corsCfg
|
||||
mkWaiApp isoLevel loggerCtx sqlGenCtx pool ci httpManager mode corsCfg
|
||||
enableConsole enableTelemetry instanceId apis
|
||||
lqOpts = do
|
||||
let pgExecCtx = PGExecCtx pool isoLevel
|
||||
@ -361,7 +389,7 @@ mkWaiApp isoLevel loggerCtx sqlGenCtx pool httpManager mode corsCfg
|
||||
|
||||
let schemaCacheRef =
|
||||
SchemaCacheRef cacheLock cacheRef (E.clearPlanCache planCache)
|
||||
serverCtx = ServerCtx pgExecCtx logger
|
||||
serverCtx = ServerCtx pgExecCtx ci logger
|
||||
schemaCacheRef mode httpManager
|
||||
sqlGenCtx apis instanceId planCache lqState
|
||||
|
||||
@ -404,36 +432,46 @@ httpApp corsCfg serverCtx enableConsole enableTelemetry = do
|
||||
put ("v1/template" <//> var) tmpltPutOrPostH
|
||||
delete ("v1/template" <//> var) tmpltGetOrDeleteH
|
||||
|
||||
post "v1/query" $ mkSpockAction encodeQErr serverCtx $ do
|
||||
post "v1/query" $ mkSpockAction encodeQErr serverCtx $ mkAPIRespHandler $ do
|
||||
query <- parseBody
|
||||
v1QueryHandler query
|
||||
|
||||
post ("api/1/table" <//> var <//> var) $ \tableName queryType ->
|
||||
mkSpockAction encodeQErr serverCtx $
|
||||
mkSpockAction encodeQErr serverCtx $ mkAPIRespHandler $
|
||||
legacyQueryHandler (TableName tableName) queryType
|
||||
|
||||
when enableGraphQL $ do
|
||||
post "v1alpha1/graphql/explain" $ mkSpockAction encodeQErr serverCtx $ do
|
||||
expQuery <- parseBody
|
||||
gqlExplainHandler expQuery
|
||||
|
||||
post "v1alpha1/graphql" $ mkSpockAction GH.encodeGQErr serverCtx $ do
|
||||
when enablePGDump $
|
||||
post "v1alpha1/pg_dump" $ mkSpockAction encodeQErr serverCtx $ do
|
||||
query <- parseBody
|
||||
v1Alpha1GQHandler query
|
||||
v1Alpha1PGDumpHandler query
|
||||
|
||||
when enableGraphQL $ do
|
||||
post "v1alpha1/graphql/explain" $ mkSpockAction encodeQErr serverCtx $
|
||||
mkAPIRespHandler $ do
|
||||
expQuery <- parseBody
|
||||
gqlExplainHandler expQuery
|
||||
|
||||
post "v1alpha1/graphql" $ mkSpockAction GH.encodeGQErr serverCtx $
|
||||
mkAPIRespHandler $ do
|
||||
query <- parseBody
|
||||
v1Alpha1GQHandler query
|
||||
|
||||
when (isDeveloperAPIEnabled serverCtx) $ do
|
||||
get "dev/plan_cache" $ mkSpockAction encodeQErr serverCtx $ do
|
||||
onlyAdmin
|
||||
respJ <- liftIO $ E.dumpPlanCache $ scPlanCache serverCtx
|
||||
return $ encJFromJValue respJ
|
||||
get "dev/subscriptions" $ mkSpockAction encodeQErr serverCtx $ do
|
||||
onlyAdmin
|
||||
respJ <- liftIO $ EL.dumpLiveQueriesState False $ scLQState serverCtx
|
||||
return $ encJFromJValue respJ
|
||||
get "dev/subscriptions/extended" $ mkSpockAction encodeQErr serverCtx $ do
|
||||
onlyAdmin
|
||||
respJ <- liftIO $ EL.dumpLiveQueriesState True $ scLQState serverCtx
|
||||
return $ encJFromJValue respJ
|
||||
get "dev/plan_cache" $ mkSpockAction encodeQErr serverCtx $
|
||||
mkAPIRespHandler $ do
|
||||
onlyAdmin
|
||||
respJ <- liftIO $ E.dumpPlanCache $ scPlanCache serverCtx
|
||||
return $ encJFromJValue respJ
|
||||
get "dev/subscriptions" $ mkSpockAction encodeQErr serverCtx $
|
||||
mkAPIRespHandler $ do
|
||||
onlyAdmin
|
||||
respJ <- liftIO $ EL.dumpLiveQueriesState False $ scLQState serverCtx
|
||||
return $ encJFromJValue respJ
|
||||
get "dev/subscriptions/extended" $ mkSpockAction encodeQErr serverCtx $
|
||||
mkAPIRespHandler $ do
|
||||
onlyAdmin
|
||||
respJ <- liftIO $ EL.dumpLiveQueriesState True $ scLQState serverCtx
|
||||
return $ encJFromJValue respJ
|
||||
|
||||
forM_ [GET,POST] $ \m -> hookAny m $ \_ -> do
|
||||
let qErr = err404 NotFound "resource does not exist"
|
||||
@ -442,13 +480,15 @@ httpApp corsCfg serverCtx enableConsole enableTelemetry = do
|
||||
where
|
||||
enableGraphQL = isGraphQLEnabled serverCtx
|
||||
enableMetadata = isMetadataEnabled serverCtx
|
||||
enablePGDump = isPGDumpEnabled serverCtx
|
||||
tmpltGetOrDeleteH tmpltName = do
|
||||
tmpltArgs <- tmpltArgsFromQueryParams
|
||||
mkSpockAction encodeQErr serverCtx $ mkQTemplateAction tmpltName tmpltArgs
|
||||
mkSpockAction encodeQErr serverCtx $ mkAPIRespHandler $
|
||||
mkQTemplateAction tmpltName tmpltArgs
|
||||
|
||||
tmpltPutOrPostH tmpltName = do
|
||||
tmpltArgs <- tmpltArgsFromQueryParams
|
||||
mkSpockAction encodeQErr serverCtx $ do
|
||||
mkSpockAction encodeQErr serverCtx $ mkAPIRespHandler $ do
|
||||
bodyTmpltArgs <- parseBody
|
||||
mkQTemplateAction tmpltName $ M.union bodyTmpltArgs tmpltArgs
|
||||
|
||||
|
@ -103,6 +103,7 @@ data HGECommandG a
|
||||
data API
|
||||
= METADATA
|
||||
| GRAPHQL
|
||||
| PGDUMP
|
||||
| DEVELOPER
|
||||
deriving (Show, Eq, Read, Generic)
|
||||
|
||||
@ -273,9 +274,9 @@ mkServeOptions rso = do
|
||||
enableTelemetry strfyNum enabledAPIs lqOpts
|
||||
where
|
||||
#ifdef DeveloperAPIs
|
||||
defaultAPIs = [METADATA,GRAPHQL,DEVELOPER]
|
||||
defaultAPIs = [METADATA,GRAPHQL,PGDUMP,DEVELOPER]
|
||||
#else
|
||||
defaultAPIs = [METADATA,GRAPHQL]
|
||||
defaultAPIs = [METADATA,GRAPHQL,PGDUMP]
|
||||
#endif
|
||||
mkConnParams (RawConnParams s c i p) = do
|
||||
stripes <- fromMaybe 1 <$> withEnv s (fst pgStripesEnv)
|
||||
@ -535,7 +536,7 @@ stringifyNumEnv =
|
||||
enabledAPIsEnv :: (String, String)
|
||||
enabledAPIsEnv =
|
||||
( "HASURA_GRAPHQL_ENABLED_APIS"
|
||||
, "List of comma separated list of allowed APIs. (default: metadata,graphql)"
|
||||
, "List of comma separated list of allowed APIs. (default: metadata,graphql,pgdump)"
|
||||
)
|
||||
|
||||
parseRawConnInfo :: Parser RawConnInfo
|
||||
@ -693,8 +694,9 @@ readAPIs = mapM readAPI . T.splitOn "," . T.pack
|
||||
where readAPI si = case T.toUpper $ T.strip si of
|
||||
"METADATA" -> Right METADATA
|
||||
"GRAPHQL" -> Right GRAPHQL
|
||||
"PGDUMP" -> Right PGDUMP
|
||||
"DEVELOPER" -> Right DEVELOPER
|
||||
_ -> Left "Only expecting list of comma separated API types metadata / graphql"
|
||||
_ -> Left "Only expecting list of comma separated API types metadata,graphql,pgdump,developer"
|
||||
|
||||
parseWebHook :: Parser RawAuthHook
|
||||
parseWebHook =
|
||||
|
67
server/src-lib/Hasura/Server/PGDump.hs
Normal file
67
server/src-lib/Hasura/Server/PGDump.hs
Normal file
@ -0,0 +1,67 @@
|
||||
module Hasura.Server.PGDump
|
||||
( PGDumpReqBody
|
||||
, execPGDump
|
||||
) where
|
||||
|
||||
import Control.Exception (IOException, try)
|
||||
import Data.Aeson.Casing
|
||||
import Data.Aeson.TH
|
||||
import qualified Data.ByteString.Lazy as BL
|
||||
import qualified Data.FileEmbed as FE
|
||||
import qualified Data.List as L
|
||||
import qualified Data.Text as T
|
||||
import qualified Database.PG.Query as Q
|
||||
import Hasura.Prelude
|
||||
import qualified Hasura.RQL.Types.Error as RTE
|
||||
import System.Exit
|
||||
import System.Process
|
||||
|
||||
data PGDumpReqBody =
|
||||
PGDumpReqBody
|
||||
{ prbOpts :: ![String]
|
||||
, prbCleanOutput :: !(Maybe Bool)
|
||||
} deriving (Show, Eq)
|
||||
|
||||
$(deriveJSON (aesonDrop 3 snakeCase) ''PGDumpReqBody)
|
||||
|
||||
script :: IsString a => a
|
||||
script = $(FE.embedStringFile "src-rsr/run_pg_dump.sh")
|
||||
|
||||
runScript
|
||||
:: String
|
||||
-> [String]
|
||||
-> String
|
||||
-> IO (Either String BL.ByteString)
|
||||
runScript dbUrl opts clean = do
|
||||
(exitCode, filename, stdErr) <- readProcessWithExitCode "/bin/sh"
|
||||
["/dev/stdin", dbUrl, unwords opts, clean] script
|
||||
case exitCode of
|
||||
ExitSuccess -> do
|
||||
contents <- BL.readFile $ L.dropWhileEnd (== '\n') filename
|
||||
return $ Right contents
|
||||
ExitFailure _ -> return $ Left stdErr
|
||||
|
||||
execPGDump
|
||||
:: (MonadError RTE.QErr m, MonadIO m)
|
||||
=> PGDumpReqBody
|
||||
-> Q.ConnInfo
|
||||
-> m BL.ByteString
|
||||
execPGDump b ci = do
|
||||
eOutput <- liftIO $ try $ runScript dbUrl opts clean
|
||||
output <- either throwException return eOutput
|
||||
case output of
|
||||
Left err ->
|
||||
RTE.throw500 $ "error while executing pg_dump: " <> T.pack err
|
||||
Right dump -> return dump
|
||||
where
|
||||
throwException :: (MonadError RTE.QErr m) => IOException -> m a
|
||||
throwException _ = RTE.throw500 "internal exception while executing pg_dump"
|
||||
|
||||
-- FIXME(shahidhk): need to add connection options (Q.connOptions) too?
|
||||
dbUrl = "postgres://" <> Q.connUser ci <> ":" <> Q.connPassword ci
|
||||
<> "@" <> Q.connHost ci <> ":" <> show (Q.connPort ci)
|
||||
<> "/" <> Q.connDatabase ci
|
||||
opts = prbOpts b
|
||||
clean = case prbCleanOutput b of
|
||||
Just v -> show v
|
||||
Nothing -> show False
|
47
server/src-rsr/run_pg_dump.sh
Normal file
47
server/src-rsr/run_pg_dump.sh
Normal file
@ -0,0 +1,47 @@
|
||||
#! /usr/bin/env sh
|
||||
|
||||
set -e
|
||||
|
||||
filename=/tmp/pg_dump-$(date +%s).sql
|
||||
template_file=/tmp/hasura_del_lines_template.txt
|
||||
|
||||
# input args
|
||||
DB_URL=$1
|
||||
OPTS=$2
|
||||
CLEAN=$3
|
||||
|
||||
pg_dump "$DB_URL" $OPTS -f "$filename"
|
||||
|
||||
# clean the file the variable is True
|
||||
if [ "$CLEAN" = "True" ]; then
|
||||
# delete all comments
|
||||
sed -i '/^--/d' "$filename"
|
||||
|
||||
# delete front matter
|
||||
cat > $template_file << EOF
|
||||
SET statement_timeout = 0;
|
||||
SET lock_timeout = 0;
|
||||
SET idle_in_transaction_session_timeout = 0;
|
||||
SET client_encoding = 'UTF8';
|
||||
SET standard_conforming_strings = on;
|
||||
SELECT pg_catalog.set_config('search_path', '', false);
|
||||
SET check_function_bodies = false;
|
||||
SET client_min_messages = warning;
|
||||
SET row_security = off;
|
||||
SET default_tablespace = '';
|
||||
SET default_with_oids = false;
|
||||
CREATE SCHEMA public;
|
||||
COMMENT ON SCHEMA public IS 'standard public schema';
|
||||
EOF
|
||||
while read -r line; do
|
||||
sed -i '/^'"$line"'$/d' "$filename"
|
||||
done < $template_file
|
||||
|
||||
# delete notify triggers
|
||||
sed -i -E '/^CREATE TRIGGER "?notify_hasura_.+"? AFTER \w+ ON .+ FOR EACH ROW EXECUTE PROCEDURE "?hdb_views"?\."?notify_hasura_.+"?\(\);$/d' "$filename"
|
||||
|
||||
# delete empty lines
|
||||
sed -i '/^[[:space:]]*$/d' "$filename"
|
||||
fi
|
||||
|
||||
printf "%s" "$filename"
|
81
server/tests-py/pgdump/pg_dump_public.yaml
Normal file
81
server/tests-py/pgdump/pg_dump_public.yaml
Normal file
@ -0,0 +1,81 @@
|
||||
descriptions: Execute pg_dump on public schema
|
||||
url: /v1alpha1/pg_dump
|
||||
status: 200
|
||||
query:
|
||||
opts:
|
||||
- -O
|
||||
- -x
|
||||
- --schema-only
|
||||
- --schema
|
||||
- public
|
||||
clean_output: true
|
||||
# response on postgres 9.4 and 9.5
|
||||
response_9: |
|
||||
CREATE TABLE public.articles (
|
||||
id integer NOT NULL,
|
||||
author_id integer NOT NULL,
|
||||
title text NOT NULL,
|
||||
body text NOT NULL
|
||||
);
|
||||
CREATE SEQUENCE public.articles_id_seq
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
ALTER SEQUENCE public.articles_id_seq OWNED BY public.articles.id;
|
||||
CREATE TABLE public.authors (
|
||||
id integer NOT NULL,
|
||||
name text NOT NULL
|
||||
);
|
||||
CREATE SEQUENCE public.authors_id_seq
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
ALTER SEQUENCE public.authors_id_seq OWNED BY public.authors.id;
|
||||
ALTER TABLE ONLY public.articles ALTER COLUMN id SET DEFAULT nextval('public.articles_id_seq'::regclass);
|
||||
ALTER TABLE ONLY public.authors ALTER COLUMN id SET DEFAULT nextval('public.authors_id_seq'::regclass);
|
||||
ALTER TABLE ONLY public.articles
|
||||
ADD CONSTRAINT articles_pkey PRIMARY KEY (id);
|
||||
ALTER TABLE ONLY public.authors
|
||||
ADD CONSTRAINT authors_pkey PRIMARY KEY (id);
|
||||
ALTER TABLE ONLY public.articles
|
||||
ADD CONSTRAINT articles_author_id_fkey FOREIGN KEY (author_id) REFERENCES public.authors(id);
|
||||
# response on postgres 10 and 11
|
||||
response_10_11: |
|
||||
CREATE TABLE public.articles (
|
||||
id integer NOT NULL,
|
||||
author_id integer NOT NULL,
|
||||
title text NOT NULL,
|
||||
body text NOT NULL
|
||||
);
|
||||
CREATE SEQUENCE public.articles_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
ALTER SEQUENCE public.articles_id_seq OWNED BY public.articles.id;
|
||||
CREATE TABLE public.authors (
|
||||
id integer NOT NULL,
|
||||
name text NOT NULL
|
||||
);
|
||||
CREATE SEQUENCE public.authors_id_seq
|
||||
AS integer
|
||||
START WITH 1
|
||||
INCREMENT BY 1
|
||||
NO MINVALUE
|
||||
NO MAXVALUE
|
||||
CACHE 1;
|
||||
ALTER SEQUENCE public.authors_id_seq OWNED BY public.authors.id;
|
||||
ALTER TABLE ONLY public.articles ALTER COLUMN id SET DEFAULT nextval('public.articles_id_seq'::regclass);
|
||||
ALTER TABLE ONLY public.authors ALTER COLUMN id SET DEFAULT nextval('public.authors_id_seq'::regclass);
|
||||
ALTER TABLE ONLY public.articles
|
||||
ADD CONSTRAINT articles_pkey PRIMARY KEY (id);
|
||||
ALTER TABLE ONLY public.authors
|
||||
ADD CONSTRAINT authors_pkey PRIMARY KEY (id);
|
||||
ALTER TABLE ONLY public.articles
|
||||
ADD CONSTRAINT articles_author_id_fkey FOREIGN KEY (author_id) REFERENCES public.authors(id);
|
45
server/tests-py/pgdump/setup.yaml
Normal file
45
server/tests-py/pgdump/setup.yaml
Normal file
@ -0,0 +1,45 @@
|
||||
type: bulk
|
||||
args:
|
||||
- type: run_sql
|
||||
args:
|
||||
sql: |
|
||||
CREATE TABLE public.authors (
|
||||
id serial NOT NULL PRIMARY KEY,
|
||||
name text NOT NULL
|
||||
);
|
||||
CREATE TABLE public.articles (
|
||||
id serial NOT NULL PRIMARY KEY,
|
||||
author_id integer NOT NULL REFERENCES public.authors(id),
|
||||
title text NOT NULL,
|
||||
body text NOT NULL
|
||||
);
|
||||
- args:
|
||||
name: authors
|
||||
schema: public
|
||||
type: track_table
|
||||
- args:
|
||||
name: articles
|
||||
schema: public
|
||||
type: track_table
|
||||
- args:
|
||||
delete:
|
||||
columns: '*'
|
||||
headers: []
|
||||
insert:
|
||||
columns: '*'
|
||||
name: articles
|
||||
retry_conf:
|
||||
interval_sec: 10
|
||||
num_retries: 0
|
||||
timeout_sec: 60
|
||||
table:
|
||||
name: articles
|
||||
schema: public
|
||||
update:
|
||||
columns:
|
||||
- author_id
|
||||
- body
|
||||
- id
|
||||
- title
|
||||
webhook: https://httpbin.org/post
|
||||
type: create_event_trigger
|
8
server/tests-py/pgdump/teardown.yaml
Normal file
8
server/tests-py/pgdump/teardown.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
type: bulk
|
||||
args:
|
||||
- args:
|
||||
cascade: true
|
||||
sql: |
|
||||
DROP TABLE articles;
|
||||
DROP TABLE authors;
|
||||
type: run_sql
|
30
server/tests-py/test_pg_dump.py
Normal file
30
server/tests-py/test_pg_dump.py
Normal file
@ -0,0 +1,30 @@
|
||||
import yaml
|
||||
from super_classes import DefaultTestSelectQueries
|
||||
import os
|
||||
|
||||
resp_pg_version_map = {
|
||||
'9_5': 'response_9',
|
||||
'9_6': 'response_9',
|
||||
'10_6': 'response_10_11',
|
||||
'11_1': 'response_10_11',
|
||||
'latest': 'response_10_11'
|
||||
}
|
||||
|
||||
class TestPGDump(DefaultTestSelectQueries):
|
||||
|
||||
def test_pg_dump_for_public_schema(self, hge_ctx):
|
||||
query_file = self.dir() + '/pg_dump_public.yaml'
|
||||
PG_VERSION = os.getenv('PG_VERSION', 'latest')
|
||||
with open(query_file, 'r') as stream:
|
||||
q = yaml.safe_load(stream)
|
||||
headers = {}
|
||||
if hge_ctx.hge_key is not None:
|
||||
headers['x-hasura-admin-secret'] = hge_ctx.hge_key
|
||||
resp = hge_ctx.http.post(hge_ctx.hge_url + q['url'], json=q['query'], headers=headers)
|
||||
body = resp.text
|
||||
assert resp.status_code == q['status']
|
||||
assert body == q[resp_pg_version_map[PG_VERSION]]
|
||||
|
||||
@classmethod
|
||||
def dir(cls):
|
||||
return "pgdump"
|
Loading…
Reference in New Issue
Block a user