graphql-engine/server/src-lib/Hasura/Server/Migrate.hs

440 lines
21 KiB
Haskell
Raw Normal View History

-- | Migrations for the Hasura catalog.
--
-- To add a new migration:
--
-- 1. Bump the catalog version number in @src-rsr/catalog_version.txt@.
-- 2. Add a migration script in the @src-rsr/migrations/@ directory with the name
-- @<old version>_to_<new version>.sql@.
-- 3. Create a downgrade script in the @src-rsr/migrations/@ directory with the name
-- @<new version>_to_<old version>.sql@.
-- 4. If making a new release, add the mapping from application version to catalog
-- schema version in @src-rsr/catalog_versions.txt@.
-- 5. If appropriate, add the change to @server/src-rsr/initialise.sql@ for fresh installations
-- of hasura.
--
-- The Template Haskell code in this module will automatically compile the new migration script into
-- the @graphql-engine@ executable.
module Hasura.Server.Migrate
( MigrationResult(..)
, migrateCatalog
, latestCatalogVersion
, recreateSystemMetadata
, dropCatalog
, downgradeCatalog
) where
import Hasura.Prelude
import qualified Data.Aeson as A
import qualified Data.Environment as Env
import qualified Data.HashMap.Strict as HM
import qualified Data.Text as T
import qualified Data.Text.IO as TIO
import qualified Database.PG.Query as Q
import qualified Database.PG.Query.Connection as Q
import qualified Language.Haskell.TH.Lib as TH
import qualified Language.Haskell.TH.Syntax as TH
import Control.Lens (_2, view)
import Control.Monad.Unique
import Data.Text.NonEmpty
import Data.Time.Clock (UTCTime)
import System.Directory (doesFileExist)
import Hasura.Backends.Postgres.SQL.Types
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.Schema
import Hasura.RQL.Types
import Hasura.Server.Init (DowngradeOptions (..))
import Hasura.Server.Logging (StartupLog (..))
import Hasura.Server.Migrate.Version (latestCatalogVersion,
latestCatalogVersionString)
import Hasura.Server.Version
dropCatalog :: (MonadTx m) => m ()
dropCatalog = liftTx $ Q.catchE defaultTxErrorHandler $ do
-- This is where the generated views and triggers are stored
Q.unitQ "DROP SCHEMA IF EXISTS hdb_views CASCADE" () False
Q.unitQ "DROP SCHEMA IF EXISTS hdb_catalog CASCADE" () False
data MigrationResult
= MRNothingToDo
| MRInitialized
| MRMigrated Text -- ^ old catalog version
deriving (Show, Eq)
instance ToEngineLog MigrationResult Hasura where
toEngineLog result = toEngineLog $ StartupLog
{ slLogLevel = LevelInfo
, slKind = "catalog_migrate"
, slInfo = A.toJSON $ case result of
MRNothingToDo ->
"Already at the latest catalog version (" <> latestCatalogVersionString
<> "); nothing to do."
MRInitialized ->
"Successfully initialized the catalog (at version " <> latestCatalogVersionString <> ")."
MRMigrated oldVersion ->
"Successfully migrated from catalog version " <> oldVersion <> " to version "
<> latestCatalogVersionString <> "."
}
-- A migration and (hopefully) also its inverse if we have it.
-- Polymorphic because `m` can be any `MonadTx`, `MonadIO` when
-- used in the `migrations` function below.
data MigrationPair m = MigrationPair
{ mpMigrate :: m ()
, mpDown :: Maybe (m ())
}
migrateCatalog
:: forall m
. ( HasVersion
, MonadIO m
, MonadTx m
, MonadUnique m
, HasHttpManager m
, HasSQLGenCtx m
)
=> Env.Environment
-> UTCTime
-> m (MigrationResult, RebuildableSchemaCache m)
migrateCatalog env migrationTime = do
doesSchemaExist (SchemaName "hdb_catalog") >>= \case
False -> initialize True
True -> doesTableExist (SchemaName "hdb_catalog") (TableName "hdb_version") >>= \case
False -> initialize False
True -> migrateFrom =<< getCatalogVersion
where
-- initializes the catalog, creating the schema if necessary
initialize :: Bool -> m (MigrationResult, RebuildableSchemaCache m)
initialize createSchema = do
liftTx $ Q.catchE defaultTxErrorHandler $
when createSchema $ do
Q.unitQ "CREATE SCHEMA hdb_catalog" () False
-- This is where the generated views and triggers are stored
Q.unitQ "CREATE SCHEMA hdb_views" () False
isExtensionAvailable "pgcrypto" >>= \case
-- only if we created the schema, create the extension
True -> when createSchema $ liftTx $ Q.unitQE needsPGCryptoError
"CREATE EXTENSION IF NOT EXISTS pgcrypto SCHEMA public" () False
False -> throw500 $
"pgcrypto extension is required, but could not find the extension in the "
<> "PostgreSQL server. Please make sure this extension is available."
runTx $(Q.sqlFromFile "src-rsr/initialise.sql")
schemaCache <- buildCacheAndRecreateSystemMetadata
updateCatalogVersion
pure (MRInitialized, schemaCache)
where
needsPGCryptoError e@(Q.PGTxErr _ _ _ err) =
case err of
Q.PGIUnexpected _ -> requiredError
Q.PGIStatement pgErr -> case Q.edStatusCode pgErr of
Just "42501" -> err500 PostgresError permissionsMessage
_ -> requiredError
where
requiredError =
(err500 PostgresError requiredMessage) { qeInternal = Just $ A.toJSON e }
requiredMessage =
"pgcrypto extension is required, but it could not be created;"
<> " encountered unknown postgres error"
permissionsMessage =
"pgcrypto extension is required, but the current user doesnt have permission to"
<> " create it. Please grant superuser permission, or setup the initial schema via"
<> " https://hasura.io/docs/1.0/graphql/manual/deployment/postgres-permissions.html"
-- migrates an existing catalog to the latest version from an existing verion
migrateFrom :: Text -> m (MigrationResult, RebuildableSchemaCache m)
migrateFrom previousVersion
| previousVersion == latestCatalogVersionString = do
schemaCache <- buildRebuildableSchemaCache env
pure (MRNothingToDo, schemaCache)
| [] <- neededMigrations =
throw400 NotSupported $
"Cannot use database previously used with a newer version of graphql-engine (expected"
<> " a catalog version <=" <> latestCatalogVersionString <> ", but the current version"
<> " is " <> previousVersion <> ")."
| otherwise = do
traverse_ (mpMigrate . snd) neededMigrations
schemaCache <- buildCacheAndRecreateSystemMetadata
updateCatalogVersion
pure (MRMigrated previousVersion, schemaCache)
where
neededMigrations = dropWhile ((/= previousVersion) . fst) (migrations False)
updateCatalogVersion = setCatalogVersion latestCatalogVersionString migrationTime
buildCacheAndRecreateSystemMetadata :: m (RebuildableSchemaCache m)
buildCacheAndRecreateSystemMetadata = do
schemaCache <- buildRebuildableSchemaCache env
view _2 <$> runCacheRWT schemaCache recreateSystemMetadata
downgradeCatalog :: forall m. (MonadIO m, MonadTx m) => DowngradeOptions -> UTCTime -> m MigrationResult
downgradeCatalog opts time = do
downgradeFrom =<< getCatalogVersion
where
-- downgrades an existing catalog to the specified version
downgradeFrom :: Text -> m MigrationResult
downgradeFrom previousVersion
| previousVersion == dgoTargetVersion opts = do
pure MRNothingToDo
| otherwise =
case neededDownMigrations (dgoTargetVersion opts) of
Left reason ->
throw400 NotSupported $
"This downgrade path (from "
<> previousVersion <> " to "
<> dgoTargetVersion opts <>
") is not supported, because "
<> reason
Right path -> do
sequence_ path
unless (dgoDryRun opts) do
setCatalogVersion (dgoTargetVersion opts) time
pure (MRMigrated previousVersion)
where
neededDownMigrations newVersion =
downgrade previousVersion newVersion
(reverse (migrations (dgoDryRun opts)))
downgrade
:: Text
-> Text
-> [(Text, MigrationPair m)]
-> Either Text [m ()]
downgrade lower upper = skipFutureDowngrades where
-- We find the list of downgrade scripts to run by first
-- dropping any downgrades which correspond to newer versions
-- of the schema than the one we're running currently.
-- Then we take migrations as needed until we reach the target
-- version, dropping any remaining migrations from the end of the
-- (reversed) list.
skipFutureDowngrades, dropOlderDowngrades :: [(Text, MigrationPair m)] -> Either Text [m ()]
skipFutureDowngrades xs | previousVersion == lower = dropOlderDowngrades xs
skipFutureDowngrades [] = Left "the starting version is unrecognized."
skipFutureDowngrades ((x, _):xs)
| x == lower = dropOlderDowngrades xs
| otherwise = skipFutureDowngrades xs
dropOlderDowngrades [] = Left "the target version is unrecognized."
dropOlderDowngrades ((x, MigrationPair{ mpDown = Nothing }):_) =
Left $ "there is no available migration back to version " <> x <> "."
dropOlderDowngrades ((x, MigrationPair{ mpDown = Just y }):xs)
| x == upper = Right [y]
| otherwise = (y:) <$> dropOlderDowngrades xs
-- | The old 0.8 catalog version is non-integral, so we store it in the database as a
-- string.
getCatalogVersion :: MonadTx m => m Text
getCatalogVersion = liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
[Q.sql| SELECT version FROM hdb_catalog.hdb_version |] () False
setCatalogVersion :: MonadTx m => Text -> UTCTime -> m ()
setCatalogVersion ver time = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
INSERT INTO hdb_catalog.hdb_version (version, upgraded_on) VALUES ($1, $2)
ON CONFLICT ((version IS NOT NULL))
DO UPDATE SET version = $1, upgraded_on = $2
|] (ver, time) False
migrations :: forall m. (MonadIO m, MonadTx m) => Bool -> [(Text, MigrationPair m)]
migrations dryRun =
-- We need to build the list of migrations at compile-time so that we can compile the SQL
-- directly into the executable using `Q.sqlFromFile`. The GHC stage restriction makes
-- doing this a little bit awkward (we cant use any definitions in this module at
-- compile-time), but putting a `let` inside the splice itself is allowed.
$(let migrationFromFile from to =
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
in [| runTxOrPrint $(Q.sqlFromFile path) |]
migrationFromFileMaybe from to = do
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
exists <- TH.runIO (doesFileExist path)
if exists
then [| Just (runTxOrPrint $(Q.sqlFromFile path)) |]
else [| Nothing |]
migrationsFromFile = map $ \(to :: Integer) ->
let from = to - 1
in [| ( $(TH.lift $ T.pack (show from))
, MigrationPair
$(migrationFromFile (show from) (show to))
$(migrationFromFileMaybe (show to) (show from))
) |]
in TH.listE
-- version 0.8 is the only non-integral catalog version
$ [| ("0.8", MigrationPair $(migrationFromFile "08" "1") Nothing) |]
: migrationsFromFile [2..3]
++ [| ("3", MigrationPair from3To4 Nothing) |]
: migrationsFromFile [5..latestCatalogVersion])
where
runTxOrPrint :: Q.Query -> m ()
runTxOrPrint
| dryRun =
liftIO . TIO.putStrLn . Q.getQueryText
| otherwise = runTx
from3To4 = liftTx $ Q.catchE defaultTxErrorHandler $ do
Q.unitQ [Q.sql|
ALTER TABLE hdb_catalog.event_triggers
ADD COLUMN configuration JSON |] () False
eventTriggers <- map uncurryEventTrigger <$> Q.listQ [Q.sql|
SELECT e.name, e.definition::json, e.webhook, e.num_retries, e.retry_interval, e.headers::json
FROM hdb_catalog.event_triggers e |] () False
forM_ eventTriggers updateEventTrigger3To4
Q.unitQ [Q.sql|
ALTER TABLE hdb_catalog.event_triggers
DROP COLUMN definition,
DROP COLUMN query,
DROP COLUMN webhook,
DROP COLUMN num_retries,
DROP COLUMN retry_interval,
DROP COLUMN headers |] () False
where
uncurryEventTrigger (trn, Q.AltJ tDef, w, nr, rint, Q.AltJ headers) =
EventTriggerConf trn tDef (Just w) Nothing (RetryConf nr rint Nothing) headers
updateEventTrigger3To4 etc@(EventTriggerConf name _ _ _ _ _) = Q.unitQ [Q.sql|
UPDATE hdb_catalog.event_triggers
SET
configuration = $1
WHERE name = $2
|] (Q.AltJ $ A.toJSON etc, name) True
-- | Drops and recreates all “system-defined” metadata, aka metadata for tables and views in the
-- @information_schema@ and @hdb_catalog@ schemas. These tables and views are tracked to expose them
-- to the console, which allows us to reuse the same functionality we use to implement user-defined
-- APIs to expose the catalog.
--
-- This process has a long and storied history.
--
-- In the past, we reused the same machinery we use for CLI migrations to define our own internal
-- metadata migrations. This caused trouble, however, as wed have to run those migrations in
-- lockstep with our SQL migrations to ensure the two didnt get out of sync. This in turn caused
-- trouble because those migrations would hit code paths inside @graphql-engine@ to add or remove
-- things from the @pg_catalog@ tables, and /that/ in turn would fail because we hadnt finished
-- running the SQL migrations, so we were running a new version of the code against an old version
-- of the schema! That caused #2826.
--
-- To fix that, #2379 switched to the approach of just dropping and recreating all system metadata
-- every time we run any SQL migrations. But /that/ in turn caused trouble due to the way we were
-- constantly rebuilding the schema cache (#3354), causing us to switch to incremental schema cache
-- construction (#3394). However, although that mostly resolved the problem, we still werent
-- totally out of the woods, as the incremental construction was still too slow on slow Postgres
-- instances (#3654).
--
-- To sidestep the whole issue, as of #3686 we now just create all the system metadata in code here,
-- and we only rebuild the schema cache once, at the very end. This is a little unsatisfying, since
-- it means our internal migrations are “blessed” compared to user-defined CLI migrations. If we
-- improve CLI migrations further in the future, maybe we can switch back to using that approach,
-- instead.
recreateSystemMetadata :: (MonadTx m, CacheRWM m) => m ()
recreateSystemMetadata = do
runTx $(Q.sqlFromFile "src-rsr/clear_system_metadata.sql")
runHasSystemDefinedT (SystemDefined True) $ for_ systemMetadata \(tableName, tableRels) -> do
saveTableToCatalog tableName False emptyTableConfig
for_ tableRels \case
Left relDef -> insertRelationshipToCatalog tableName ObjRel relDef
Right relDef -> insertRelationshipToCatalog tableName ArrRel relDef
buildSchemaCacheStrict
where
systemMetadata :: [(QualifiedTable, [Either ObjRelDef ArrRelDef])]
systemMetadata =
[ table "information_schema" "tables" []
, table "information_schema" "schemata" []
, table "information_schema" "views" []
, table "information_schema" "columns" []
, table "hdb_catalog" "hdb_table"
[ objectRel $$(nonEmptyText "detail") $
manualConfig "information_schema" "tables" tableNameMapping
, objectRel $$(nonEmptyText "primary_key") $
manualConfig "hdb_catalog" "hdb_primary_key" tableNameMapping
, arrayRel $$(nonEmptyText "columns") $
manualConfig "information_schema" "columns" tableNameMapping
, arrayRel $$(nonEmptyText "foreign_key_constraints") $
manualConfig "hdb_catalog" "hdb_foreign_key_constraint" tableNameMapping
, arrayRel $$(nonEmptyText "relationships") $
manualConfig "hdb_catalog" "hdb_relationship" tableNameMapping
, arrayRel $$(nonEmptyText "permissions") $
manualConfig "hdb_catalog" "hdb_permission_agg" tableNameMapping
, arrayRel $$(nonEmptyText "computed_fields") $
manualConfig "hdb_catalog" "hdb_computed_field" tableNameMapping
, arrayRel $$(nonEmptyText "check_constraints") $
manualConfig "hdb_catalog" "hdb_check_constraint" tableNameMapping
, arrayRel $$(nonEmptyText "unique_constraints") $
manualConfig "hdb_catalog" "hdb_unique_constraint" tableNameMapping
]
, table "hdb_catalog" "hdb_primary_key" []
, table "hdb_catalog" "hdb_foreign_key_constraint" []
, table "hdb_catalog" "hdb_relationship" []
, table "hdb_catalog" "hdb_permission_agg" []
, table "hdb_catalog" "hdb_computed_field" []
, table "hdb_catalog" "hdb_check_constraint" []
, table "hdb_catalog" "hdb_unique_constraint" []
, table "hdb_catalog" "hdb_remote_relationship" []
, table "hdb_catalog" "event_triggers"
[ arrayRel $$(nonEmptyText "events") $
manualConfig "hdb_catalog" "event_log" [("name", "trigger_name")] ]
, table "hdb_catalog" "event_log"
[ objectRel $$(nonEmptyText "trigger") $
manualConfig "hdb_catalog" "event_triggers" [("trigger_name", "name")]
, arrayRel $$(nonEmptyText "logs") $ RUFKeyOn $
ArrRelUsingFKeyOn (QualifiedObject "hdb_catalog" "event_invocation_logs") "event_id" ]
, table "hdb_catalog" "event_invocation_logs"
[ objectRel $$(nonEmptyText "event") $ RUFKeyOn "event_id" ]
, table "hdb_catalog" "hdb_function" []
, table "hdb_catalog" "hdb_function_agg"
[ objectRel $$(nonEmptyText "return_table_info") $ manualConfig "hdb_catalog" "hdb_table"
[ ("return_type_schema", "table_schema")
, ("return_type_name", "table_name") ] ]
, table "hdb_catalog" "remote_schemas" []
, table "hdb_catalog" "hdb_version" []
, table "hdb_catalog" "hdb_query_collection" []
, table "hdb_catalog" "hdb_allowlist" []
allow custom mutations through actions (#3042) * basic doc for actions * custom_types, sync and async actions * switch to graphql-parser-hs on github * update docs * metadata import/export * webhook calls are now supported * relationships in sync actions * initialise.sql is now in sync with the migration file * fix metadata tests * allow specifying arguments of actions * fix blacklist check on check_build_worthiness job * track custom_types and actions related tables * handlers are now triggered on async actions * default to pgjson unless a field is involved in relationships, for generating definition list * use 'true' for action filter for non admin role * fix create_action_permission sql query * drop permissions when dropping an action * add a hdb_role view (and relationships) to fetch all roles in the system * rename 'webhook' key in action definition to 'handler' * allow templating actions wehook URLs with env vars * add 'update_action' /v1/query type * allow forwarding client headers by setting `forward_client_headers` in action definition * add 'headers' configuration in action definition * handle webhook error response based on status codes * support array relationships for custom types * implement single row mutation, see https://github.com/hasura/graphql-engine/issues/3731 * single row mutation: rename 'pk_columns' -> 'columns' and no-op refactor * use top level primary key inputs for delete_by_pk & account select permissions for single row mutations * use only REST semantics to resolve the webhook response * use 'pk_columns' instead of 'columns' for update_by_pk input * add python basic tests for single row mutations * add action context (name) in webhook payload * Async action response is accessible for non admin roles only if the request session vars equals to action's * clean nulls, empty arrays for actions, custom types in export metadata * async action mutation returns only the UUID of the action * unit tests for URL template parser * Basic sync actions python tests * fix output in async query & add async tests * add admin secret header in async actions python test * document async action architecture in Resolve/Action.hs file * support actions returning array of objects * tests for list type response actions * update docs with actions and custom types metadata API reference * update actions python tests as per #f8e1330 Co-authored-by: Tirumarai Selvan <tirumarai.selvan@gmail.com> Co-authored-by: Aravind Shankar <face11301@gmail.com> Co-authored-by: Rakesh Emmadi <12475069+rakeshkky@users.noreply.github.com>
2020-02-13 20:38:23 +03:00
, table "hdb_catalog" "hdb_custom_types" []
, table "hdb_catalog" "hdb_action_permission" []
, table "hdb_catalog" "hdb_action"
[ arrayRel $$(nonEmptyText "permissions") $ manualConfig "hdb_catalog" "hdb_action_permission"
[("action_name", "action_name")]
]
, table "hdb_catalog" "hdb_action_log" []
, table "hdb_catalog" "hdb_role"
[ arrayRel $$(nonEmptyText "action_permissions") $ manualConfig "hdb_catalog" "hdb_action_permission"
[("role_name", "role_name")]
, arrayRel $$(nonEmptyText "permissions") $ manualConfig "hdb_catalog" "hdb_permission_agg"
[("role_name", "role_name")]
]
, table "hdb_catalog" "hdb_cron_triggers"
[ arrayRel $$(nonEmptyText "cron_events") $ RUFKeyOn $
ArrRelUsingFKeyOn (QualifiedObject "hdb_catalog" "hdb_cron_events") "trigger_name"
]
, table "hdb_catalog" "hdb_cron_events"
[ objectRel $$(nonEmptyText "cron_trigger") $ RUFKeyOn "trigger_name"
, arrayRel $$(nonEmptyText "cron_event_logs") $ RUFKeyOn $
ArrRelUsingFKeyOn (QualifiedObject "hdb_catalog" "hdb_cron_event_invocation_logs") "event_id"
]
, table "hdb_catalog" "hdb_cron_event_invocation_logs"
[ objectRel $$(nonEmptyText "cron_event") $ RUFKeyOn "event_id"
]
, table "hdb_catalog" "hdb_scheduled_events"
[ arrayRel $$(nonEmptyText "scheduled_event_logs") $ RUFKeyOn $
ArrRelUsingFKeyOn (QualifiedObject "hdb_catalog" "hdb_scheduled_event_invocation_logs") "event_id"
]
, table "hdb_catalog" "hdb_scheduled_event_invocation_logs"
[ objectRel $$(nonEmptyText "scheduled_event") $ RUFKeyOn "event_id"
]
]
tableNameMapping =
[ ("table_schema", "table_schema")
, ("table_name", "table_name") ]
table schemaName tableName relationships = (QualifiedObject schemaName tableName, relationships)
objectRel name using = Left $ RelDef (RelName name) using Nothing
arrayRel name using = Right $ RelDef (RelName name) using Nothing
manualConfig schemaName tableName columns =
RUManual $ RelManualConfig (QualifiedObject schemaName tableName) (HM.fromList columns)
runTx :: (MonadTx m) => Q.Query -> m ()
runTx = liftTx . Q.multiQE defaultTxErrorHandler