Add downgrade command (close #1156) (#3760)

* Add downgrade command

* Add docs per @lexi-lambda's suggestions

* make tests pass

* Update hdb_version once, from Haskell

* more work based on feedback

* Improve the usage message

* Small docs changes

* Test downgrades exist for each tag

* Update downgrading.rst

* Use git-log to find tags which are ancestors of the current commit

Co-authored-by: Vamshi Surabhi <0x777@users.noreply.github.com>
This commit is contained in:
Phil Freeman 2020-02-07 03:03:12 -08:00 committed by GitHub
parent e84f8b9b58
commit 94102c0460
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 1121 additions and 1312 deletions

File diff suppressed because it is too large Load Diff

View File

@ -191,6 +191,8 @@ library
-- testing
, QuickCheck
, generic-arbitrary
, directory
exposed-modules: Control.Arrow.Extended
, Control.Arrow.Trans
@ -401,7 +403,10 @@ test-suite graphql-engine-tests
, natural-transformation >=0.4 && <0.5
, optparse-applicative
, pg-client
, process
, QuickCheck
, safe
, split
, time
, transformers-base
, unordered-containers

View File

@ -11,7 +11,7 @@ import Hasura.RQL.DDL.Metadata (fetchMetadata)
import Hasura.RQL.DDL.Schema
import Hasura.RQL.Types
import Hasura.Server.Init
import Hasura.Server.Migrate (dropCatalog)
import Hasura.Server.Migrate (downgradeCatalog, dropCatalog)
import Hasura.Server.Version
import qualified Data.ByteString.Lazy as BL
@ -49,6 +49,13 @@ runApp (HGEOptionsG rci hgeCmd) =
& fmap (\(res, _, _) -> res)
either printErrJExit (liftIO . BLC.putStrLn) res
HCDowngrade opts -> do
(InitCtx{..}, initTime) <- initialiseCtx hgeCmd rci
let sqlGenCtx = SQLGenCtx False
res <- downgradeCatalog opts initTime
& runAsAdmin _icPgPool sqlGenCtx _icHttpManager
either printErrJExit (liftIO . print) res
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
where
runTx' initCtx tx =

View File

@ -71,6 +71,8 @@ parseHGECommand =
( progDesc "Clean graphql-engine's metadata to start afresh" ))
<> command "execute" (info (pure HCExecute)
( progDesc "Execute a query" ))
<> command "downgrade" (info (HCDowngrade <$> downgradeOptionsParser)
(progDesc "Downgrade the GraphQL Engine schema to the specified version"))
<> command "version" (info (pure HCVersion)
(progDesc "Prints the version of GraphQL Engine"))
)
@ -184,7 +186,6 @@ initialiseCtx hgeCmd rci = do
initRes <- runAsAdmin pool sqlGenCtx httpManager $ migrateCatalog currentTime
either printErrJExit (\(result, schemaCache) -> logger result $> schemaCache) initRes
runHGEServer
:: ( HasVersion
, MonadIO m

View File

@ -116,6 +116,12 @@ data ServeOptions impl
, soPlanCacheOptions :: !E.PlanCacheOptions
}
data DowngradeOptions
= DowngradeOptions
{ dgoTargetVersion :: !T.Text
, dgoDryRun :: !Bool
} deriving (Show, Eq)
data RawConnInfo =
RawConnInfo
{ connHost :: !(Maybe String)
@ -134,6 +140,7 @@ data HGECommandG a
| HCClean
| HCExecute
| HCVersion
| HCDowngrade !DowngradeOptions
deriving (Show, Eq)
data API
@ -286,11 +293,12 @@ mkHGEOptions (HGEOptionsG rawConnInfo rawCmd) =
where
connInfo = mkRawConnInfo rawConnInfo
cmd = case rawCmd of
HCServe rso -> HCServe <$> mkServeOptions rso
HCExport -> return HCExport
HCClean -> return HCClean
HCExecute -> return HCExecute
HCVersion -> return HCVersion
HCServe rso -> HCServe <$> mkServeOptions rso
HCExport -> return HCExport
HCClean -> return HCClean
HCExecute -> return HCExecute
HCVersion -> return HCVersion
HCDowngrade tgt -> return (HCDowngrade tgt)
mkRawConnInfo :: RawConnInfo -> WithEnv RawConnInfo
mkRawConnInfo rawConnInfo = do
@ -1071,3 +1079,45 @@ serveOptionsParser =
<*> parseEnabledLogs
<*> parseLogLevel
<*> parsePlanCacheSize
-- | This implements the mapping between application versions
-- and catalog schema versions.
downgradeShortcuts :: [(String, String)]
downgradeShortcuts =
[ ("v1.0.0-beta.1", "16")
, ("v1.0.0-beta.2", "17")
, ("v1.0.0-beta.3", "17")
, ("v1.0.0-beta.4", "19")
, ("v1.0.0-beta.5", "19")
, ("v1.0.0-beta.6", "22")
, ("v1.0.0-beta.7", "24")
, ("v1.0.0-beta.8", "26")
, ("v1.0.0-beta.9", "26")
, ("v1.0.0-beta.10", "27")
, ("v1.0.0-rc.1", "28")
, ("v1.0.0", "28")
, ("v1.1.0-beta.1", "29")
, ("v1.1.0-beta.2", "30")
]
downgradeOptionsParser :: Parser DowngradeOptions
downgradeOptionsParser =
DowngradeOptions
<$> choice
(strOption
( long "to-catalog-version" <>
metavar "<VERSION>" <>
help "The target catalog schema version (e.g. 31)"
)
: map (uncurry shortcut) downgradeShortcuts
)
<*> switch
( long "dryRun" <>
help "Don't run any migrations, just print out the SQL."
)
where
shortcut v catalogVersion =
flag' (DataString.fromString catalogVersion)
( long ("to-" <> v) <>
help ("Downgrade to graphql-engine version " <> v <> " (equivalent to --to-catalog-version " <> catalogVersion <> ")")
)

View File

@ -5,6 +5,10 @@
-- 1. Bump the catalog version number in "Hasura.Server.Migrate.Version".
-- 2. Add a migration script in the @src-rsr/migrations/@ directory with the name
-- @<old version>_to_<new version>.sql@.
-- 3. Create a downgrade script in the @src-rsr/migrations/@ directory with the name
-- @<new version>_to_<old version>.sql@.
-- 4. If making a new release, add the mapping from application version to catalog
-- schema version in @src-lib/Hasura/Server/Init.hs@.
--
-- The Template Haskell code in this module will automatically compile the new migration script into
-- the @graphql-engine@ executable.
@ -14,6 +18,7 @@ module Hasura.Server.Migrate
, latestCatalogVersion
, recreateSystemMetadata
, dropCatalog
, downgradeCatalog
) where
import Hasura.Prelude
@ -21,6 +26,7 @@ import Hasura.Prelude
import qualified Data.Aeson as A
import qualified Data.HashMap.Strict as HM
import qualified Data.Text as T
import qualified Data.Text.IO as TIO
import qualified Database.PG.Query as Q
import qualified Database.PG.Query.Connection as Q
import qualified Language.Haskell.TH.Lib as TH
@ -33,11 +39,13 @@ import Data.Time.Clock (UTCTime)
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.Schema
import Hasura.Server.Init (DowngradeOptions (..))
import Hasura.RQL.Types
import Hasura.Server.Logging (StartupLog (..))
import Hasura.Server.Migrate.Version (latestCatalogVersion, latestCatalogVersionString)
import Hasura.Server.Version (HasVersion)
import Hasura.SQL.Types
import System.Directory (doesFileExist)
dropCatalog :: (MonadTx m) => m ()
dropCatalog = liftTx $ Q.catchE defaultTxErrorHandler $ do
@ -66,6 +74,14 @@ instance ToEngineLog MigrationResult Hasura where
<> latestCatalogVersionString <> "."
}
-- A migration and (hopefully) also its inverse if we have it.
-- Polymorphic because `m` can be any `MonadTx`, `MonadIO` when
-- used in the `migrations` function below.
data MigrationPair m = MigrationPair
{ mpMigrate :: m ()
, mpDown :: Maybe (m ())
}
migrateCatalog
:: forall m
. ( HasVersion
@ -75,7 +91,8 @@ migrateCatalog
, HasHttpManager m
, HasSQLGenCtx m
)
=> UTCTime -> m (MigrationResult, RebuildableSchemaCache m)
=> UTCTime
-> m (MigrationResult, RebuildableSchemaCache m)
migrateCatalog migrationTime = do
doesSchemaExist (SchemaName "hdb_catalog") >>= \case
False -> initialize True
@ -128,53 +145,25 @@ migrateCatalog migrationTime = do
| previousVersion == latestCatalogVersionString = do
schemaCache <- buildRebuildableSchemaCache
pure (MRNothingToDo, schemaCache)
| [] <- neededMigrations = throw400 NotSupported $
"Cannot use database previously used with a newer version of graphql-engine (expected"
<> " a catalog version <=" <> latestCatalogVersionString <> ", but the current version"
<> " is " <> previousVersion <> ")."
| [] <- neededMigrations =
throw400 NotSupported $
"Cannot use database previously used with a newer version of graphql-engine (expected"
<> " a catalog version <=" <> latestCatalogVersionString <> ", but the current version"
<> " is " <> previousVersion <> ")."
| otherwise = do
traverse_ snd neededMigrations
traverse_ (mpMigrate . snd) neededMigrations
schemaCache <- buildCacheAndRecreateSystemMetadata
updateCatalogVersion
pure (MRMigrated previousVersion, schemaCache)
where
neededMigrations = dropWhile ((/= previousVersion) . fst) migrations
migrations :: [(T.Text, m ())]
migrations =
-- We need to build the list of migrations at compile-time so that we can compile the SQL
-- directly into the executable using `Q.sqlFromFile`. The GHC stage restriction makes
-- doing this a little bit awkward (we cant use any definitions in this module at
-- compile-time), but putting a `let` inside the splice itself is allowed.
$(let migrationFromFile from to =
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
in [| runTx $(Q.sqlFromFile path) |]
migrationsFromFile = map $ \(to :: Integer) ->
let from = to - 1
in [| ( $(TH.lift $ T.pack (show from))
, $(migrationFromFile (show from) (show to))
) |]
in TH.listE
-- version 0.8 is the only non-integral catalog version
$ [| ("0.8", $(migrationFromFile "08" "1")) |]
: migrationsFromFile [2..3]
++ [| ("3", from3To4) |]
: migrationsFromFile [5..latestCatalogVersion])
neededMigrations = dropWhile ((/= previousVersion) . fst) (migrations False)
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
buildCacheAndRecreateSystemMetadata = do
schemaCache <- buildRebuildableSchemaCache
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
-- the old 0.8 catalog version is non-integral, so we store it in the database as a string
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
[Q.sql| SELECT version FROM hdb_catalog.hdb_version |] () False
updateCatalogVersion = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
INSERT INTO hdb_catalog.hdb_version (version, upgraded_on) VALUES ($1, $2)
ON CONFLICT ((version IS NOT NULL))
DO UPDATE SET version = $1, upgraded_on = $2
|] (latestCatalogVersionString, migrationTime) False
updateCatalogVersion = setCatalogVersion latestCatalogVersionString migrationTime
doesSchemaExist schemaName =
liftTx $ (runIdentity . Q.getRow) <$> Q.withQE defaultTxErrorHandler [Q.sql|
@ -197,6 +186,109 @@ migrateCatalog migrationTime = do
WHERE name = $1
) |] (Identity schemaName) False
downgradeCatalog :: forall m. (MonadIO m, MonadTx m) => DowngradeOptions -> UTCTime -> m MigrationResult
downgradeCatalog opts time = do
downgradeFrom =<< getCatalogVersion
where
-- downgrades an existing catalog to the specified version
downgradeFrom :: T.Text -> m MigrationResult
downgradeFrom previousVersion
| previousVersion == dgoTargetVersion opts = do
pure MRNothingToDo
| otherwise =
case neededDownMigrations (dgoTargetVersion opts) of
Left reason ->
throw400 NotSupported $
"This downgrade path (from "
<> previousVersion <> " to "
<> dgoTargetVersion opts <>
") is not supported, because "
<> reason
Right path -> do
sequence_ path
setCatalogVersion (dgoTargetVersion opts) time
pure (MRMigrated previousVersion)
where
neededDownMigrations newVersion =
downgrade previousVersion newVersion
(reverse (migrations (dgoDryRun opts)))
downgrade
:: T.Text
-> T.Text
-> [(T.Text, MigrationPair m)]
-> Either T.Text [m ()]
downgrade lower upper = skipFutureDowngrades where
-- We find the list of downgrade scripts to run by first
-- dropping any downgrades which correspond to newer versions
-- of the schema than the one we're running currently.
-- Then we take migrations as needed until we reach the target
-- version, dropping any remaining migrations from the end of the
-- (reversed) list.
skipFutureDowngrades, dropOlderDowngrades :: [(T.Text, MigrationPair m)] -> Either T.Text [m ()]
skipFutureDowngrades xs | previousVersion == lower = dropOlderDowngrades xs
skipFutureDowngrades [] = Left "the starting version is unrecognized."
skipFutureDowngrades ((x, _):xs)
| x == lower = dropOlderDowngrades xs
| otherwise = skipFutureDowngrades xs
dropOlderDowngrades [] = Left "the target version is unrecognized."
dropOlderDowngrades ((x, MigrationPair{ mpDown = Nothing }):_) =
Left $ "there is no available migration back to version " <> x <> "."
dropOlderDowngrades ((x, MigrationPair{ mpDown = Just y }):xs)
| x == upper = Right [y]
| otherwise = (y:) <$> dropOlderDowngrades xs
-- | The old 0.8 catalog version is non-integral, so we store it in the database as a
-- string.
getCatalogVersion :: MonadTx m => m Text
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
[Q.sql| SELECT version FROM hdb_catalog.hdb_version |] () False
setCatalogVersion :: MonadTx m => Text -> UTCTime -> m ()
setCatalogVersion ver time = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
INSERT INTO hdb_catalog.hdb_version (version, upgraded_on) VALUES ($1, $2)
ON CONFLICT ((version IS NOT NULL))
DO UPDATE SET version = $1, upgraded_on = $2
|] (ver, time) False
migrations :: forall m. (MonadIO m, MonadTx m) => Bool -> [(T.Text, MigrationPair m)]
migrations dryRun =
-- We need to build the list of migrations at compile-time so that we can compile the SQL
-- directly into the executable using `Q.sqlFromFile`. The GHC stage restriction makes
-- doing this a little bit awkward (we cant use any definitions in this module at
-- compile-time), but putting a `let` inside the splice itself is allowed.
$(let migrationFromFile from to =
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
in [| runTxOrPrint $(Q.sqlFromFile path) |]
migrationFromFileMaybe from to = do
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
exists <- TH.runIO (doesFileExist path)
if exists
then [| Just (runTxOrPrint $(Q.sqlFromFile path)) |]
else [| Nothing |]
migrationsFromFile = map $ \(to :: Integer) ->
let from = to - 1
in [| ( $(TH.lift $ T.pack (show from))
, MigrationPair
$(migrationFromFile (show from) (show to))
$(migrationFromFileMaybe (show to) (show from))
) |]
in TH.listE
-- version 0.8 is the only non-integral catalog version
$ [| ("0.8", (MigrationPair $(migrationFromFile "08" "1") Nothing)) |]
: migrationsFromFile [2..3]
++ [| ("3", (MigrationPair from3To4 Nothing)) |]
: migrationsFromFile [5..latestCatalogVersion])
where
runTxOrPrint :: Q.Query -> m ()
runTxOrPrint
| dryRun =
liftIO . TIO.putStrLn . Q.getQueryText
| otherwise = runTx
from3To4 = liftTx $ Q.catchE defaultTxErrorHandler $ do
Q.unitQ [Q.sql|
ALTER TABLE hdb_catalog.event_triggers

View File

@ -0,0 +1,68 @@
CREATE OR REPLACE VIEW hdb_catalog.hdb_function_agg AS
(
SELECT
p.proname::text AS function_name,
pn.nspname::text AS function_schema,
CASE
WHEN (p.provariadic = (0) :: oid) THEN false
ELSE true
END AS has_variadic,
CASE
WHEN (
(p.provolatile) :: text = ('i' :: character(1)) :: text
) THEN 'IMMUTABLE' :: text
WHEN (
(p.provolatile) :: text = ('s' :: character(1)) :: text
) THEN 'STABLE' :: text
WHEN (
(p.provolatile) :: text = ('v' :: character(1)) :: text
) THEN 'VOLATILE' :: text
ELSE NULL :: text
END AS function_type,
pg_get_functiondef(p.oid) AS function_definition,
rtn.nspname::text AS return_type_schema,
rt.typname::text AS return_type_name,
CASE
WHEN ((rt.typtype) :: text = ('b' :: character(1)) :: text) THEN 'BASE' :: text
WHEN ((rt.typtype) :: text = ('c' :: character(1)) :: text) THEN 'COMPOSITE' :: text
WHEN ((rt.typtype) :: text = ('d' :: character(1)) :: text) THEN 'DOMAIN' :: text
WHEN ((rt.typtype) :: text = ('e' :: character(1)) :: text) THEN 'ENUM' :: text
WHEN ((rt.typtype) :: text = ('r' :: character(1)) :: text) THEN 'RANGE' :: text
WHEN ((rt.typtype) :: text = ('p' :: character(1)) :: text) THEN 'PSUEDO' :: text
ELSE NULL :: text
END AS return_type_type,
p.proretset AS returns_set,
( SELECT
COALESCE(json_agg(pt.typname), '[]')
FROM
(
unnest(
COALESCE(p.proallargtypes, (p.proargtypes) :: oid [])
) WITH ORDINALITY pat(oid, ordinality)
LEFT JOIN pg_type pt ON ((pt.oid = pat.oid))
)
) AS input_arg_types,
to_json(COALESCE(p.proargnames, ARRAY [] :: text [])) AS input_arg_names
FROM
pg_proc p
JOIN pg_namespace pn ON (pn.oid = p.pronamespace)
JOIN pg_type rt ON (rt.oid = p.prorettype)
JOIN pg_namespace rtn ON (rtn.oid = rt.typnamespace)
WHERE
pn.nspname :: text NOT LIKE 'pg_%'
AND pn.nspname :: text NOT IN ('information_schema', 'hdb_catalog', 'hdb_views')
AND (NOT EXISTS (
SELECT
1
FROM
pg_aggregate
WHERE
((pg_aggregate.aggfnoid) :: oid = p.oid)
)
)
);

View File

@ -0,0 +1,2 @@
DROP VIEW IF EXISTS hdb_catalog.hdb_table_info_agg;
DROP VIEW IF EXISTS hdb_catalog.hdb_function_info_agg;

View File

@ -0,0 +1 @@
DROP FUNCTION IF EXISTS hdb_catalog.insert_event_log(text, text, text, text, json);

View File

@ -0,0 +1,5 @@
DELETE FROM hdb_catalog.hdb_table WHERE (table_schema, table_name)
IN (('hdb_catalog', 'hdb_query_collection'), ('hdb_catalog', 'hdb_allowlist'));
DROP TABLE IF EXISTS hdb_catalog.hdb_allowlist;
DROP TABLE IF EXISTS hdb_catalog.hdb_query_collection;

View File

@ -0,0 +1,4 @@
UPDATE hdb_catalog.hdb_table
SET is_system_defined = 'false'
WHERE table_schema = 'hdb_catalog'
AND table_name = 'hdb_allowlist';

View File

@ -0,0 +1,10 @@
CREATE TABLE hdb_catalog.hdb_query_template
(
template_name TEXT PRIMARY KEY,
template_defn JSONB NOT NULL,
comment TEXT NULL,
is_system_defined boolean default false
);
INSERT INTO hdb_catalog.hdb_table (table_schema, table_name)
VALUES ('hdb_catalog', 'hdb_query_template');

View File

@ -0,0 +1,9 @@
DROP TRIGGER hdb_schema_update_event_notifier ON hdb_catalog.hdb_schema_update_event;
CREATE TRIGGER hdb_schema_update_event_notifier AFTER INSERT ON
hdb_catalog.hdb_schema_update_event FOR EACH ROW EXECUTE PROCEDURE
hdb_catalog.hdb_schema_update_event_notifier();
DROP INDEX hdb_catalog."hdb_schema_update_event_one_row";
ALTER TABLE hdb_catalog.hdb_schema_update_event ADD COLUMN id SERIAL PRIMARY KEY;

View File

@ -0,0 +1,144 @@
ALTER TABLE hdb_catalog.hdb_table DROP COLUMN is_enum;
CREATE FUNCTION hdb_catalog.hdb_table_oid_check() RETURNS trigger AS
$function$
BEGIN
IF (EXISTS (SELECT 1 FROM information_schema.tables st WHERE st.table_schema = NEW.table_schema AND st.table_name = NEW.table_name)) THEN
return NEW;
ELSE
RAISE foreign_key_violation using message = 'table_schema, table_name not in information_schema.tables';
return NULL;
END IF;
END;
$function$
LANGUAGE plpgsql;
CREATE TRIGGER hdb_table_oid_check BEFORE INSERT OR UPDATE ON hdb_catalog.hdb_table
FOR EACH ROW EXECUTE PROCEDURE hdb_catalog.hdb_table_oid_check();
DROP VIEW hdb_catalog.hdb_table_info_agg;
DROP VIEW hdb_catalog.hdb_column;
DROP VIEW hdb_catalog.hdb_foreign_key_constraint;
CREATE VIEW hdb_catalog.hdb_foreign_key_constraint AS
SELECT
q.table_schema :: text,
q.table_name :: text,
q.constraint_name :: text,
min(q.constraint_oid) :: integer as constraint_oid,
min(q.ref_table_table_schema) :: text as ref_table_table_schema,
min(q.ref_table) :: text as ref_table,
json_object_agg(ac.attname, afc.attname) as column_mapping,
min(q.confupdtype) :: text as on_update,
min(q.confdeltype) :: text as on_delete
FROM
(SELECT
ctn.nspname AS table_schema,
ct.relname AS table_name,
r.conrelid AS table_id,
r.conname as constraint_name,
r.oid as constraint_oid,
cftn.nspname AS ref_table_table_schema,
cft.relname as ref_table,
r.confrelid as ref_table_id,
r.confupdtype,
r.confdeltype,
UNNEST (r.conkey) AS column_id,
UNNEST (r.confkey) AS ref_column_id
FROM
pg_catalog.pg_constraint r
JOIN pg_catalog.pg_class ct
ON r.conrelid = ct.oid
JOIN pg_catalog.pg_namespace ctn
ON ct.relnamespace = ctn.oid
JOIN pg_catalog.pg_class cft
ON r.confrelid = cft.oid
JOIN pg_catalog.pg_namespace cftn
ON cft.relnamespace = cftn.oid
WHERE
r.contype = 'f'
) q
JOIN pg_catalog.pg_attribute ac
ON q.column_id = ac.attnum
AND q.table_id = ac.attrelid
JOIN pg_catalog.pg_attribute afc
ON q.ref_column_id = afc.attnum
AND q.ref_table_id = afc.attrelid
GROUP BY q.table_schema, q.table_name, q.constraint_name;
CREATE VIEW hdb_catalog.hdb_table_info_agg AS (
select
tables.table_name as table_name,
tables.table_schema as table_schema,
coalesce(columns.columns, '[]') as columns,
coalesce(pk.columns, '[]') as primary_key_columns,
coalesce(constraints.constraints, '[]') as constraints,
coalesce(views.view_info, 'null') as view_info
from
information_schema.tables as tables
left outer join (
select
c.table_name,
c.table_schema,
json_agg(
json_build_object(
'name',
column_name,
'type',
udt_name,
'is_nullable',
is_nullable :: boolean
)
) as columns
from
information_schema.columns c
group by
c.table_schema,
c.table_name
) columns on (
tables.table_schema = columns.table_schema
AND tables.table_name = columns.table_name
)
left outer join (
select * from hdb_catalog.hdb_primary_key
) pk on (
tables.table_schema = pk.table_schema
AND tables.table_name = pk.table_name
)
left outer join (
select
c.table_schema,
c.table_name,
json_agg(constraint_name) as constraints
from
information_schema.table_constraints c
where
c.constraint_type = 'UNIQUE'
or c.constraint_type = 'PRIMARY KEY'
group by
c.table_schema,
c.table_name
) constraints on (
tables.table_schema = constraints.table_schema
AND tables.table_name = constraints.table_name
)
left outer join (
select
table_schema,
table_name,
json_build_object(
'is_updatable',
(is_updatable::boolean OR is_trigger_updatable::boolean),
'is_deletable',
(is_updatable::boolean OR is_trigger_deletable::boolean),
'is_insertable',
(is_insertable_into::boolean OR is_trigger_insertable_into::boolean)
) as view_info
from
information_schema.views v
) views on (
tables.table_schema = views.table_schema
AND tables.table_name = views.table_name
)
);

View File

@ -0,0 +1 @@
DROP INDEX hdb_catalog."event_log_locked_idx";

View File

@ -0,0 +1,111 @@
DROP VIEW IF EXISTS hdb_catalog.hdb_function_info_agg;
DROP VIEW IF EXISTS hdb_catalog.hdb_function_agg;
CREATE VIEW hdb_catalog.hdb_function_agg AS
(
SELECT
p.proname::text AS function_name,
pn.nspname::text AS function_schema,
CASE
WHEN (p.provariadic = (0) :: oid) THEN false
ELSE true
END AS has_variadic,
CASE
WHEN (
(p.provolatile) :: text = ('i' :: character(1)) :: text
) THEN 'IMMUTABLE' :: text
WHEN (
(p.provolatile) :: text = ('s' :: character(1)) :: text
) THEN 'STABLE' :: text
WHEN (
(p.provolatile) :: text = ('v' :: character(1)) :: text
) THEN 'VOLATILE' :: text
ELSE NULL :: text
END AS function_type,
pg_get_functiondef(p.oid) AS function_definition,
rtn.nspname::text AS return_type_schema,
rt.typname::text AS return_type_name,
CASE
WHEN ((rt.typtype) :: text = ('b' :: character(1)) :: text) THEN 'BASE' :: text
WHEN ((rt.typtype) :: text = ('c' :: character(1)) :: text) THEN 'COMPOSITE' :: text
WHEN ((rt.typtype) :: text = ('d' :: character(1)) :: text) THEN 'DOMAIN' :: text
WHEN ((rt.typtype) :: text = ('e' :: character(1)) :: text) THEN 'ENUM' :: text
WHEN ((rt.typtype) :: text = ('r' :: character(1)) :: text) THEN 'RANGE' :: text
WHEN ((rt.typtype) :: text = ('p' :: character(1)) :: text) THEN 'PSUEDO' :: text
ELSE NULL :: text
END AS return_type_type,
p.proretset AS returns_set,
( SELECT
COALESCE(json_agg(q.type_name), '[]')
FROM
(
SELECT
pt.typname AS type_name,
pat.ordinality
FROM
unnest(
COALESCE(p.proallargtypes, (p.proargtypes) :: oid [])
) WITH ORDINALITY pat(oid, ordinality)
LEFT JOIN pg_type pt ON ((pt.oid = pat.oid))
ORDER BY pat.ordinality ASC
) q
) AS input_arg_types,
to_json(COALESCE(p.proargnames, ARRAY [] :: text [])) AS input_arg_names
FROM
pg_proc p
JOIN pg_namespace pn ON (pn.oid = p.pronamespace)
JOIN pg_type rt ON (rt.oid = p.prorettype)
JOIN pg_namespace rtn ON (rtn.oid = rt.typnamespace)
WHERE
pn.nspname :: text NOT LIKE 'pg_%'
AND pn.nspname :: text NOT IN ('information_schema', 'hdb_catalog', 'hdb_views')
AND (NOT EXISTS (
SELECT
1
FROM
pg_aggregate
WHERE
((pg_aggregate.aggfnoid) :: oid = p.oid)
)
)
);
CREATE VIEW hdb_catalog.hdb_function_info_agg AS (
SELECT
function_name,
function_schema,
row_to_json (
(
SELECT
e
FROM
(
SELECT
has_variadic,
function_type,
return_type_schema,
return_type_name,
return_type_type,
returns_set,
input_arg_types,
input_arg_names,
exists(
SELECT
1
FROM
information_schema.tables
WHERE
table_schema = return_type_schema
AND table_name = return_type_name
) AS returns_table
) AS e
)
) AS "function_info"
FROM
hdb_catalog.hdb_function_agg
);

View File

@ -0,0 +1,221 @@
DROP VIEW IF EXISTS hdb_catalog.hdb_table_info_agg;
DROP VIEW IF EXISTS hdb_catalog.hdb_column;
CREATE VIEW hdb_catalog.hdb_column AS
WITH primary_key_references AS (
SELECT fkey.table_schema AS src_table_schema
, fkey.table_name AS src_table_name
, fkey.columns->>0 AS src_column_name
, json_agg(json_build_object(
'schema', fkey.ref_table_table_schema,
'name', fkey.ref_table
)) AS ref_tables
FROM hdb_catalog.hdb_foreign_key_constraint AS fkey
JOIN hdb_catalog.hdb_primary_key AS pkey
ON pkey.table_schema = fkey.ref_table_table_schema
AND pkey.table_name = fkey.ref_table
AND pkey.columns::jsonb = fkey.ref_columns::jsonb
WHERE json_array_length(fkey.columns) = 1
GROUP BY fkey.table_schema
, fkey.table_name
, fkey.columns->>0)
SELECT columns.table_schema
, columns.table_name
, columns.column_name AS name
, columns.udt_name AS type
, columns.is_nullable
, columns.ordinal_position
, coalesce(pkey_refs.ref_tables, '[]') AS primary_key_references
FROM information_schema.columns
LEFT JOIN primary_key_references AS pkey_refs
ON columns.table_schema = pkey_refs.src_table_schema
AND columns.table_name = pkey_refs.src_table_name
AND columns.column_name = pkey_refs.src_column_name;
CREATE VIEW hdb_catalog.hdb_table_info_agg AS (
select
tables.table_name as table_name,
tables.table_schema as table_schema,
coalesce(columns.columns, '[]') as columns,
coalesce(pk.columns, '[]') as primary_key_columns,
coalesce(constraints.constraints, '[]') as constraints,
coalesce(views.view_info, 'null') as view_info
from
information_schema.tables as tables
left outer join (
select
c.table_name,
c.table_schema,
json_agg(
json_build_object(
'name', name,
'type', type,
'is_nullable', is_nullable :: boolean,
'references', primary_key_references
)
) as columns
from
hdb_catalog.hdb_column c
group by
c.table_schema,
c.table_name
) columns on (
tables.table_schema = columns.table_schema
AND tables.table_name = columns.table_name
)
left outer join (
select * from hdb_catalog.hdb_primary_key
) pk on (
tables.table_schema = pk.table_schema
AND tables.table_name = pk.table_name
)
left outer join (
select
c.table_schema,
c.table_name,
json_agg(constraint_name) as constraints
from
information_schema.table_constraints c
where
c.constraint_type = 'UNIQUE'
or c.constraint_type = 'PRIMARY KEY'
group by
c.table_schema,
c.table_name
) constraints on (
tables.table_schema = constraints.table_schema
AND tables.table_name = constraints.table_name
)
left outer join (
select
table_schema,
table_name,
json_build_object(
'is_updatable',
(is_updatable::boolean OR is_trigger_updatable::boolean),
'is_deletable',
(is_updatable::boolean OR is_trigger_deletable::boolean),
'is_insertable',
(is_insertable_into::boolean OR is_trigger_insertable_into::boolean)
) as view_info
from
information_schema.views v
) views on (
tables.table_schema = views.table_schema
AND tables.table_name = views.table_name
)
);
DROP VIEW IF EXISTS hdb_catalog.hdb_function_info_agg;
DROP VIEW IF EXISTS hdb_catalog.hdb_function_agg;
CREATE VIEW hdb_catalog.hdb_function_agg AS
(
SELECT
p.proname::text AS function_name,
pn.nspname::text AS function_schema,
CASE
WHEN (p.provariadic = (0) :: oid) THEN false
ELSE true
END AS has_variadic,
CASE
WHEN (
(p.provolatile) :: text = ('i' :: character(1)) :: text
) THEN 'IMMUTABLE' :: text
WHEN (
(p.provolatile) :: text = ('s' :: character(1)) :: text
) THEN 'STABLE' :: text
WHEN (
(p.provolatile) :: text = ('v' :: character(1)) :: text
) THEN 'VOLATILE' :: text
ELSE NULL :: text
END AS function_type,
pg_get_functiondef(p.oid) AS function_definition,
rtn.nspname::text AS return_type_schema,
rt.typname::text AS return_type_name,
CASE
WHEN ((rt.typtype) :: text = ('b' :: character(1)) :: text) THEN 'BASE' :: text
WHEN ((rt.typtype) :: text = ('c' :: character(1)) :: text) THEN 'COMPOSITE' :: text
WHEN ((rt.typtype) :: text = ('d' :: character(1)) :: text) THEN 'DOMAIN' :: text
WHEN ((rt.typtype) :: text = ('e' :: character(1)) :: text) THEN 'ENUM' :: text
WHEN ((rt.typtype) :: text = ('r' :: character(1)) :: text) THEN 'RANGE' :: text
WHEN ((rt.typtype) :: text = ('p' :: character(1)) :: text) THEN 'PSUEDO' :: text
ELSE NULL :: text
END AS return_type_type,
p.proretset AS returns_set,
( SELECT
COALESCE(json_agg(q.type_name), '[]')
FROM
(
SELECT
pt.typname AS type_name,
pat.ordinality
FROM
unnest(
COALESCE(p.proallargtypes, (p.proargtypes) :: oid [])
) WITH ORDINALITY pat(oid, ordinality)
LEFT JOIN pg_type pt ON ((pt.oid = pat.oid))
ORDER BY pat.ordinality ASC
) q
) AS input_arg_types,
to_json(COALESCE(p.proargnames, ARRAY [] :: text [])) AS input_arg_names,
p.pronargdefaults AS default_args
FROM
pg_proc p
JOIN pg_namespace pn ON (pn.oid = p.pronamespace)
JOIN pg_type rt ON (rt.oid = p.prorettype)
JOIN pg_namespace rtn ON (rtn.oid = rt.typnamespace)
WHERE
pn.nspname :: text NOT LIKE 'pg_%'
AND pn.nspname :: text NOT IN ('information_schema', 'hdb_catalog', 'hdb_views')
AND (NOT EXISTS (
SELECT
1
FROM
pg_aggregate
WHERE
((pg_aggregate.aggfnoid) :: oid = p.oid)
)
)
);
CREATE VIEW hdb_catalog.hdb_function_info_agg AS (
SELECT
function_name,
function_schema,
row_to_json (
(
SELECT
e
FROM
(
SELECT
has_variadic,
function_type,
return_type_schema,
return_type_name,
return_type_type,
returns_set,
input_arg_types,
input_arg_names,
default_args,
exists(
SELECT
1
FROM
information_schema.tables
WHERE
table_schema = return_type_schema
AND table_name = return_type_name
) AS returns_table
) AS e
)
) AS "function_info"
FROM
hdb_catalog.hdb_function_agg
);

View File

@ -0,0 +1 @@
ALTER TABLE hdb_catalog.hdb_table DROP COLUMN configuration;

View File

@ -0,0 +1,34 @@
CREATE OR REPLACE VIEW hdb_catalog.hdb_column AS
WITH primary_key_references AS (
SELECT fkey.table_schema AS src_table_schema
, fkey.table_name AS src_table_name
, fkey.columns->>0 AS src_column_name
, json_agg(json_build_object(
'schema', fkey.ref_table_table_schema,
'name', fkey.ref_table
)) AS ref_tables
FROM hdb_catalog.hdb_foreign_key_constraint AS fkey
JOIN hdb_catalog.hdb_primary_key AS pkey
ON pkey.table_schema = fkey.ref_table_table_schema
AND pkey.table_name = fkey.ref_table
AND pkey.columns::jsonb = fkey.ref_columns::jsonb
WHERE json_array_length(fkey.columns) = 1
GROUP BY fkey.table_schema
, fkey.table_name
, fkey.columns->>0)
SELECT columns.table_schema
, columns.table_name
, columns.column_name AS name
, columns.udt_name AS type
, columns.is_nullable
, columns.ordinal_position
, coalesce(pkey_refs.ref_tables, '[]') AS primary_key_references
, col_description(pg_class.oid, columns.ordinal_position) AS description
FROM information_schema.columns
LEFT JOIN primary_key_references AS pkey_refs
ON columns.table_schema = pkey_refs.src_table_schema
AND columns.table_name = pkey_refs.src_table_name
AND columns.column_name = pkey_refs.src_column_name
LEFT JOIN pg_class ON pg_class.relname = columns.table_name
LEFT JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
AND pg_namespace.nspname = columns.table_schema;

View File

@ -0,0 +1,120 @@
DROP VIEW hdb_catalog.hdb_function_info_agg;
DROP VIEW hdb_catalog.hdb_function_agg;
CREATE VIEW hdb_catalog.hdb_function_agg AS
(
SELECT
p.proname::text AS function_name,
pn.nspname::text AS function_schema,
pd.description,
CASE
WHEN (p.provariadic = (0) :: oid) THEN false
ELSE true
END AS has_variadic,
CASE
WHEN (
(p.provolatile) :: text = ('i' :: character(1)) :: text
) THEN 'IMMUTABLE' :: text
WHEN (
(p.provolatile) :: text = ('s' :: character(1)) :: text
) THEN 'STABLE' :: text
WHEN (
(p.provolatile) :: text = ('v' :: character(1)) :: text
) THEN 'VOLATILE' :: text
ELSE NULL :: text
END AS function_type,
pg_get_functiondef(p.oid) AS function_definition,
rtn.nspname::text AS return_type_schema,
rt.typname::text AS return_type_name,
CASE
WHEN ((rt.typtype) :: text = ('b' :: character(1)) :: text) THEN 'BASE' :: text
WHEN ((rt.typtype) :: text = ('c' :: character(1)) :: text) THEN 'COMPOSITE' :: text
WHEN ((rt.typtype) :: text = ('d' :: character(1)) :: text) THEN 'DOMAIN' :: text
WHEN ((rt.typtype) :: text = ('e' :: character(1)) :: text) THEN 'ENUM' :: text
WHEN ((rt.typtype) :: text = ('r' :: character(1)) :: text) THEN 'RANGE' :: text
WHEN ((rt.typtype) :: text = ('p' :: character(1)) :: text) THEN 'PSEUDO' :: text
ELSE NULL :: text
END AS return_type_type,
p.proretset AS returns_set,
( SELECT
COALESCE(json_agg(q.type_name), '[]')
FROM
(
SELECT
pt.typname AS type_name,
pat.ordinality
FROM
unnest(
COALESCE(p.proallargtypes, (p.proargtypes) :: oid [])
) WITH ORDINALITY pat(oid, ordinality)
LEFT JOIN pg_type pt ON ((pt.oid = pat.oid))
ORDER BY pat.ordinality ASC
) q
) AS input_arg_types,
to_json(COALESCE(p.proargnames, ARRAY [] :: text [])) AS input_arg_names,
p.pronargdefaults AS default_args
FROM
pg_proc p
JOIN pg_namespace pn ON (pn.oid = p.pronamespace)
JOIN pg_type rt ON (rt.oid = p.prorettype)
JOIN pg_namespace rtn ON (rtn.oid = rt.typnamespace)
LEFT JOIN pg_description pd ON p.oid = pd.objoid
WHERE
pn.nspname :: text NOT LIKE 'pg_%'
AND pn.nspname :: text NOT IN ('information_schema', 'hdb_catalog', 'hdb_views')
AND (NOT EXISTS (
SELECT
1
FROM
pg_aggregate
WHERE
((pg_aggregate.aggfnoid) :: oid = p.oid)
)
)
);
CREATE VIEW hdb_catalog.hdb_function_info_agg AS (
SELECT
function_name,
function_schema,
row_to_json (
(
SELECT
e
FROM
(
SELECT
description,
has_variadic,
function_type,
return_type_schema,
return_type_name,
return_type_type,
returns_set,
input_arg_types,
input_arg_names,
default_args,
exists(
SELECT
1
FROM
information_schema.tables
WHERE
table_schema = return_type_schema
AND table_name = return_type_name
) AS returns_table
) AS e
)
) AS "function_info"
FROM
hdb_catalog.hdb_function_agg
);
DROP VIEW hdb_catalog.hdb_computed_field_function;
DROP TABLE hdb_catalog.hdb_computed_field;

View File

@ -0,0 +1,2 @@
ALTER TABLE hdb_catalog.event_log DROP COLUMN archived;
DROP INDEX hdb_catalog.event_log_delivered_idx;

View File

@ -0,0 +1,2 @@
ALTER TABLE hdb_catalog.hdb_function
DROP COLUMN configuration;

View File

@ -0,0 +1,145 @@
DROP VIEW hdb_catalog.hdb_table_info_agg;
DROP VIEW hdb_catalog.hdb_permission_agg;
ALTER TABLE hdb_catalog.hdb_table
ALTER COLUMN table_schema TYPE text,
ALTER COLUMN table_name TYPE text;
ALTER TABLE hdb_catalog.hdb_relationship
ALTER COLUMN table_schema TYPE text,
ALTER COLUMN table_name TYPE text;
ALTER TABLE hdb_catalog.hdb_permission
ALTER COLUMN table_schema TYPE text,
ALTER COLUMN table_name TYPE text;
CREATE VIEW hdb_catalog.hdb_permission_agg AS
SELECT
table_schema,
table_name,
role_name,
json_object_agg(perm_type, perm_def) as permissions
FROM hdb_catalog.hdb_permission
GROUP BY table_schema, table_name, role_name;
CREATE VIEW hdb_catalog.hdb_column AS
WITH primary_key_references AS (
SELECT fkey.table_schema AS src_table_schema
, fkey.table_name AS src_table_name
, fkey.columns->>0 AS src_column_name
, json_agg(json_build_object(
'schema', fkey.ref_table_table_schema,
'name', fkey.ref_table
)) AS ref_tables
FROM hdb_catalog.hdb_foreign_key_constraint AS fkey
JOIN hdb_catalog.hdb_primary_key AS pkey
ON pkey.table_schema = fkey.ref_table_table_schema
AND pkey.table_name = fkey.ref_table
AND pkey.columns::jsonb = fkey.ref_columns::jsonb
WHERE json_array_length(fkey.columns) = 1
GROUP BY fkey.table_schema
, fkey.table_name
, fkey.columns->>0)
SELECT columns.table_schema
, columns.table_name
, columns.column_name AS name
, columns.udt_name AS type
, columns.is_nullable
, columns.ordinal_position
, coalesce(pkey_refs.ref_tables, '[]') AS primary_key_references
, col_description(pg_class.oid, columns.ordinal_position) AS description
FROM information_schema.columns
JOIN pg_class ON pg_class.relname = columns.table_name
JOIN pg_namespace ON pg_namespace.oid = pg_class.relnamespace
AND pg_namespace.nspname = columns.table_schema
LEFT JOIN primary_key_references AS pkey_refs
ON columns.table_schema = pkey_refs.src_table_schema
AND columns.table_name = pkey_refs.src_table_name
AND columns.column_name = pkey_refs.src_column_name;
CREATE VIEW hdb_catalog.hdb_table_info_agg AS (
select
tables.table_name as table_name,
tables.table_schema as table_schema,
descriptions.description,
coalesce(columns.columns, '[]') as columns,
coalesce(pk.columns, '[]') as primary_key_columns,
coalesce(constraints.constraints, '[]') as constraints,
coalesce(views.view_info, 'null') as view_info
from
information_schema.tables as tables
left outer join (
select
c.table_name,
c.table_schema,
json_agg(
json_build_object(
'name', name,
'type', type,
'is_nullable', is_nullable :: boolean,
'references', primary_key_references,
'description', description
)
) as columns
from
hdb_catalog.hdb_column c
group by
c.table_schema,
c.table_name
) columns on (
tables.table_schema = columns.table_schema
AND tables.table_name = columns.table_name
)
left outer join (
select * from hdb_catalog.hdb_primary_key
) pk on (
tables.table_schema = pk.table_schema
AND tables.table_name = pk.table_name
)
left outer join (
select
c.table_schema,
c.table_name,
json_agg(constraint_name) as constraints
from
information_schema.table_constraints c
where
c.constraint_type = 'UNIQUE'
or c.constraint_type = 'PRIMARY KEY'
group by
c.table_schema,
c.table_name
) constraints on (
tables.table_schema = constraints.table_schema
AND tables.table_name = constraints.table_name
)
left outer join (
select
table_schema,
table_name,
json_build_object(
'is_updatable',
(is_updatable::boolean OR is_trigger_updatable::boolean),
'is_deletable',
(is_updatable::boolean OR is_trigger_deletable::boolean),
'is_insertable',
(is_insertable_into::boolean OR is_trigger_insertable_into::boolean)
) as view_info
from
information_schema.views v
) views on (
tables.table_schema = views.table_schema
AND tables.table_name = views.table_name
)
left outer join (
select
pc.relname as table_name,
pn.nspname as table_schema,
pd.description
from pg_class pc
left join pg_namespace pn on pn.oid = pc.relnamespace
left join pg_description pd on pd.objoid = pc.oid
where pd.objsubid = 0
) descriptions on (
tables.table_schema = descriptions.table_schema
AND tables.table_name = descriptions.table_name
)
);

View File

@ -0,0 +1 @@
DROP FUNCTION hdb_catalog.check_violation(text);

View File

@ -8,16 +8,21 @@ import Control.Concurrent.MVar.Lifted
import Control.Monad.Trans.Control (MonadBaseControl)
import Control.Monad.Unique
import Control.Natural ((:~>) (..))
import Data.List (isPrefixOf, stripPrefix)
import Data.List.Split (splitOn)
import Data.Time.Clock (getCurrentTime)
import Data.Tuple (swap)
import System.Process (readProcess)
import Test.Hspec.Core.Spec
import Test.Hspec.Expectations.Lifted
import qualified Database.PG.Query as Q
import qualified Safe
import Hasura.RQL.DDL.Metadata (ClearMetadata (..), runClearMetadata)
import Hasura.RQL.DDL.Schema
import Hasura.RQL.Types
import Hasura.Server.Init (DowngradeOptions (..), downgradeShortcuts)
import Hasura.Server.Migrate
import Hasura.Server.PGDump
import Hasura.Server.Version (HasVersion)
@ -63,20 +68,40 @@ spec
spec pgConnInfo = do
let dropAndInit time = CacheRefT $ flip modifyMVar \_ ->
dropCatalog *> (swap <$> migrateCatalog time)
describe "migrateCatalog" $ do
it "initializes the catalog" $ singleTransaction do
(dropAndInit =<< liftIO getCurrentTime) `shouldReturn` MRInitialized
it "is idempotent" \(NT transact) -> do
let dumpSchema = transact $
execPGDump (PGDumpReqBody ["--schema-only"] (Just False)) pgConnInfo
let dumpSchema = execPGDump (PGDumpReqBody ["--schema-only"] (Just False)) pgConnInfo
time <- getCurrentTime
transact (dropAndInit time) `shouldReturn` MRInitialized
firstDump <- dumpSchema
firstDump <- transact dumpSchema
transact (dropAndInit time) `shouldReturn` MRInitialized
secondDump <- dumpSchema
secondDump <- transact dumpSchema
secondDump `shouldBe` firstDump
it "supports upgrades after downgrade to version 12" \(NT transact) -> do
let downgradeTo v = downgradeCatalog DowngradeOptions{ dgoDryRun = False, dgoTargetVersion = v }
upgradeToLatest time = CacheRefT $ flip modifyMVar \_ ->
swap <$> migrateCatalog time
time <- getCurrentTime
transact (dropAndInit time) `shouldReturn` MRInitialized
downgradeResult <- (transact . lift) (downgradeTo "12" time)
downgradeResult `shouldSatisfy` \case
MRMigrated{} -> True
_ -> False
transact (upgradeToLatest time) `shouldReturn` MRMigrated "12"
it "supports downgrades for every Git tag" $ singleTransaction do
gitOutput <- liftIO $ readProcess "git" ["log", "--no-walk", "--tags", "--pretty=%D"] ""
let filterOldest = filter (not . isPrefixOf "v1.0.0-alpha")
extractTagName = Safe.headMay . splitOn ", " <=< stripPrefix "tag: "
supportedDowngrades = sort (map fst downgradeShortcuts)
gitTags = (sort . filterOldest . mapMaybe extractTagName . tail . lines) gitOutput
for_ gitTags \t ->
t `shouldSatisfy` (`elem` supportedDowngrades)
describe "recreateSystemMetadata" $ do
let dumpMetadata = execPGDump (PGDumpReqBody ["--schema=hdb_catalog"] (Just False)) pgConnInfo