mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-17 12:31:52 +03:00
92026b769f
fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
306 lines
14 KiB
Haskell
306 lines
14 KiB
Haskell
-- | Migrations for the Hasura catalog.
|
||
--
|
||
-- To add a new migration:
|
||
--
|
||
-- 1. Bump the catalog version number in @src-rsr/catalog_version.txt@.
|
||
-- 2. Add a migration script in the @src-rsr/migrations/@ directory with the name
|
||
-- @<old version>_to_<new version>.sql@.
|
||
-- 3. Create a downgrade script in the @src-rsr/migrations/@ directory with the name
|
||
-- @<new version>_to_<old version>.sql@.
|
||
-- 4. If making a new release, add the mapping from application version to catalog
|
||
-- schema version in @src-rsr/catalog_versions.txt@.
|
||
-- 5. If appropriate, add the change to @server/src-rsr/initialise.sql@ for fresh installations
|
||
-- of hasura.
|
||
--
|
||
-- The Template Haskell code in this module will automatically compile the new migration script into
|
||
-- the @graphql-engine@ executable.
|
||
module Hasura.Server.Migrate
|
||
( MigrationResult(..)
|
||
, getMigratedFrom
|
||
, migrateCatalog
|
||
, latestCatalogVersion
|
||
, downgradeCatalog
|
||
) where
|
||
|
||
import Hasura.Prelude
|
||
|
||
import qualified Data.Aeson as A
|
||
import qualified Data.HashMap.Strict.InsOrd as OMap
|
||
import qualified Data.Text as T
|
||
import qualified Data.Text.IO as TIO
|
||
import qualified Database.PG.Query as Q
|
||
import qualified Language.Haskell.TH.Lib as TH
|
||
import qualified Language.Haskell.TH.Syntax as TH
|
||
|
||
import Control.Monad.Trans.Control (MonadBaseControl)
|
||
import Data.Time.Clock (UTCTime)
|
||
import Data.Typeable (cast)
|
||
import System.Directory (doesFileExist)
|
||
|
||
import Hasura.Backends.Postgres.SQL.Types
|
||
import Hasura.Logging (Hasura, LogLevel (..), ToEngineLog (..))
|
||
import Hasura.RQL.DDL.Schema
|
||
import Hasura.RQL.DDL.Schema.LegacyCatalog
|
||
import Hasura.RQL.Types
|
||
import Hasura.Server.Init (DowngradeOptions (..), databaseUrlEnv)
|
||
import Hasura.Server.Logging (StartupLog (..))
|
||
import Hasura.Server.Migrate.Internal
|
||
import Hasura.Server.Migrate.Version (latestCatalogVersion,
|
||
latestCatalogVersionString)
|
||
import Hasura.Server.Types (MaintenanceMode (..))
|
||
|
||
data MigrationResult
|
||
= MRNothingToDo
|
||
| MRInitialized
|
||
| MRMigrated Text -- ^ old catalog version
|
||
| MRMaintanenceMode
|
||
deriving (Show, Eq)
|
||
|
||
instance ToEngineLog MigrationResult Hasura where
|
||
toEngineLog result = toEngineLog $ StartupLog
|
||
{ slLogLevel = LevelInfo
|
||
, slKind = "catalog_migrate"
|
||
, slInfo = A.toJSON $ case result of
|
||
MRNothingToDo ->
|
||
"Already at the latest catalog version (" <> latestCatalogVersionString
|
||
<> "); nothing to do."
|
||
MRInitialized ->
|
||
"Successfully initialized the catalog (at version " <> latestCatalogVersionString <> ")."
|
||
MRMigrated oldVersion ->
|
||
"Successfully migrated from catalog version " <> oldVersion <> " to version "
|
||
<> latestCatalogVersionString <> "."
|
||
MRMaintanenceMode ->
|
||
"Catalog migrations are skipped because the graphql-engine is in maintenance mode"
|
||
}
|
||
|
||
getMigratedFrom
|
||
:: MigrationResult
|
||
-> Maybe Float -- ^ We have version 0.8 as non integral catalog version
|
||
getMigratedFrom = \case
|
||
MRNothingToDo -> Nothing
|
||
MRInitialized -> Nothing
|
||
MRMigrated t -> readMaybe (T.unpack t)
|
||
MRMaintanenceMode -> Nothing
|
||
|
||
-- A migration and (hopefully) also its inverse if we have it.
|
||
-- Polymorphic because `m` can be any `MonadTx`, `MonadIO` when
|
||
-- used in the `migrations` function below.
|
||
data MigrationPair m = MigrationPair
|
||
{ mpMigrate :: m ()
|
||
, mpDown :: Maybe (m ())
|
||
}
|
||
|
||
migrateCatalog
|
||
:: forall m
|
||
. ( MonadTx m
|
||
, MonadIO m
|
||
, MonadBaseControl IO m
|
||
)
|
||
=> Maybe (SourceConnConfiguration 'Postgres)
|
||
-> MaintenanceMode
|
||
-> UTCTime
|
||
-> m (MigrationResult, Metadata)
|
||
migrateCatalog maybeDefaultSourceConfig maintenanceMode migrationTime = do
|
||
catalogSchemaExists <- doesSchemaExist (SchemaName "hdb_catalog")
|
||
versionTableExists <- doesTableExist (SchemaName "hdb_catalog") (TableName "hdb_version")
|
||
migrationResult <-
|
||
if | maintenanceMode == MaintenanceModeEnabled -> do
|
||
if | not catalogSchemaExists ->
|
||
throw500 "unexpected: hdb_catalog schema not found in maintenance mode"
|
||
| not versionTableExists ->
|
||
throw500 "unexpected: hdb_catalog.hdb_version table not found in maintenance mode"
|
||
-- TODO: should we also have a check for the catalog version?
|
||
| otherwise -> pure MRMaintanenceMode
|
||
| otherwise -> case catalogSchemaExists of
|
||
False -> initialize True
|
||
True -> case versionTableExists of
|
||
False -> initialize False
|
||
True -> migrateFrom =<< getCatalogVersion
|
||
metadata <- liftTx fetchMetadataFromCatalog
|
||
pure (migrationResult, metadata)
|
||
where
|
||
-- initializes the catalog, creating the schema if necessary
|
||
initialize :: Bool -> m MigrationResult
|
||
initialize createSchema = do
|
||
liftTx $ Q.catchE defaultTxErrorHandler $
|
||
when createSchema $ Q.unitQ "CREATE SCHEMA hdb_catalog" () False
|
||
enablePgcryptoExtension
|
||
runTx $(Q.sqlFromFile "src-rsr/initialise.sql")
|
||
updateCatalogVersion
|
||
|
||
let emptyMetadata' = case maybeDefaultSourceConfig of
|
||
Nothing -> emptyMetadata
|
||
Just defaultSourceConfig ->
|
||
-- insert metadata with default source
|
||
let defaultSourceMetadata = BackendSourceMetadata $
|
||
SourceMetadata defaultSource mempty mempty defaultSourceConfig
|
||
sources = OMap.singleton defaultSource defaultSourceMetadata
|
||
in emptyMetadata{_metaSources = sources}
|
||
|
||
liftTx $ insertMetadataInCatalog emptyMetadata'
|
||
pure MRInitialized
|
||
|
||
-- migrates an existing catalog to the latest version from an existing verion
|
||
migrateFrom :: Text -> m MigrationResult
|
||
migrateFrom previousVersion
|
||
| previousVersion == latestCatalogVersionString = pure MRNothingToDo
|
||
| [] <- neededMigrations =
|
||
throw400 NotSupported $
|
||
"Cannot use database previously used with a newer version of graphql-engine (expected"
|
||
<> " a catalog version <=" <> latestCatalogVersionString <> ", but the current version"
|
||
<> " is " <> previousVersion <> ")."
|
||
| otherwise = do
|
||
traverse_ (mpMigrate . snd) neededMigrations
|
||
updateCatalogVersion
|
||
pure $ MRMigrated previousVersion
|
||
where
|
||
neededMigrations =
|
||
dropWhile ((/= previousVersion) . fst) (migrations maybeDefaultSourceConfig False maintenanceMode)
|
||
|
||
updateCatalogVersion = setCatalogVersion latestCatalogVersionString migrationTime
|
||
|
||
downgradeCatalog
|
||
:: forall m. (MonadIO m, MonadTx m)
|
||
=> Maybe (SourceConnConfiguration 'Postgres)
|
||
-> DowngradeOptions -> UTCTime -> m MigrationResult
|
||
downgradeCatalog defaultSourceConfig opts time = do
|
||
downgradeFrom =<< getCatalogVersion
|
||
where
|
||
-- downgrades an existing catalog to the specified version
|
||
downgradeFrom :: Text -> m MigrationResult
|
||
downgradeFrom previousVersion
|
||
| previousVersion == dgoTargetVersion opts = do
|
||
pure MRNothingToDo
|
||
| otherwise =
|
||
case neededDownMigrations (dgoTargetVersion opts) of
|
||
Left reason ->
|
||
throw400 NotSupported $
|
||
"This downgrade path (from "
|
||
<> previousVersion <> " to "
|
||
<> dgoTargetVersion opts <>
|
||
") is not supported, because "
|
||
<> reason
|
||
Right path -> do
|
||
sequence_ path
|
||
unless (dgoDryRun opts) do
|
||
setCatalogVersion (dgoTargetVersion opts) time
|
||
pure (MRMigrated previousVersion)
|
||
|
||
where
|
||
neededDownMigrations newVersion =
|
||
downgrade previousVersion newVersion
|
||
(reverse (migrations defaultSourceConfig (dgoDryRun opts) MaintenanceModeDisabled))
|
||
|
||
downgrade
|
||
:: Text
|
||
-> Text
|
||
-> [(Text, MigrationPair m)]
|
||
-> Either Text [m ()]
|
||
downgrade lower upper = skipFutureDowngrades where
|
||
-- We find the list of downgrade scripts to run by first
|
||
-- dropping any downgrades which correspond to newer versions
|
||
-- of the schema than the one we're running currently.
|
||
-- Then we take migrations as needed until we reach the target
|
||
-- version, dropping any remaining migrations from the end of the
|
||
-- (reversed) list.
|
||
skipFutureDowngrades, dropOlderDowngrades :: [(Text, MigrationPair m)] -> Either Text [m ()]
|
||
skipFutureDowngrades xs | previousVersion == lower = dropOlderDowngrades xs
|
||
skipFutureDowngrades [] = Left "the starting version is unrecognized."
|
||
skipFutureDowngrades ((x, _):xs)
|
||
| x == lower = dropOlderDowngrades xs
|
||
| otherwise = skipFutureDowngrades xs
|
||
|
||
dropOlderDowngrades [] = Left "the target version is unrecognized."
|
||
dropOlderDowngrades ((x, MigrationPair{ mpDown = Nothing }):_) =
|
||
Left $ "there is no available migration back to version " <> x <> "."
|
||
dropOlderDowngrades ((x, MigrationPair{ mpDown = Just y }):xs)
|
||
| x == upper = Right [y]
|
||
| otherwise = (y:) <$> dropOlderDowngrades xs
|
||
|
||
setCatalogVersion :: MonadTx m => Text -> UTCTime -> m ()
|
||
setCatalogVersion ver time = liftTx $ Q.unitQE defaultTxErrorHandler [Q.sql|
|
||
INSERT INTO hdb_catalog.hdb_version (version, upgraded_on) VALUES ($1, $2)
|
||
ON CONFLICT ((version IS NOT NULL))
|
||
DO UPDATE SET version = $1, upgraded_on = $2
|
||
|] (ver, time) False
|
||
|
||
migrations
|
||
:: forall m. (MonadIO m, MonadTx m)
|
||
=> Maybe (SourceConnConfiguration 'Postgres) -> Bool -> MaintenanceMode -> [(Text, MigrationPair m)]
|
||
migrations maybeDefaultSourceConfig dryRun maintenanceMode =
|
||
-- We need to build the list of migrations at compile-time so that we can compile the SQL
|
||
-- directly into the executable using `Q.sqlFromFile`. The GHC stage restriction makes
|
||
-- doing this a little bit awkward (we can’t use any definitions in this module at
|
||
-- compile-time), but putting a `let` inside the splice itself is allowed.
|
||
$(let migrationFromFile from to =
|
||
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
|
||
in [| runTxOrPrint $(Q.sqlFromFile path) |]
|
||
migrationFromFileMaybe from to = do
|
||
let path = "src-rsr/migrations/" <> from <> "_to_" <> to <> ".sql"
|
||
exists <- TH.runIO (doesFileExist path)
|
||
if exists
|
||
then [| Just (runTxOrPrint $(Q.sqlFromFile path)) |]
|
||
else [| Nothing |]
|
||
|
||
migrationsFromFile = map $ \(to :: Integer) ->
|
||
let from = to - 1
|
||
in [| ( $(TH.lift $ tshow from)
|
||
, MigrationPair
|
||
$(migrationFromFile (show from) (show to))
|
||
$(migrationFromFileMaybe (show to) (show from))
|
||
) |]
|
||
in TH.listE
|
||
-- version 0.8 is the only non-integral catalog version
|
||
$ [| ("0.8", MigrationPair $(migrationFromFile "08" "1") Nothing) |]
|
||
: migrationsFromFile [2..3]
|
||
++ [| ("3", MigrationPair from3To4 Nothing) |]
|
||
: migrationsFromFile [5..42]
|
||
++ [| ("42", MigrationPair from42To43 (Just from43To42)) |]
|
||
: migrationsFromFile [44]
|
||
)
|
||
where
|
||
runTxOrPrint :: Q.Query -> m ()
|
||
runTxOrPrint
|
||
| dryRun =
|
||
liftIO . TIO.putStrLn . Q.getQueryText
|
||
| otherwise = runTx
|
||
|
||
from42To43 = do
|
||
when (maintenanceMode == MaintenanceModeEnabled) $
|
||
throw500 "cannot migrate to catalog version 43 in maintenance mode"
|
||
let query = $(Q.sqlFromFile "src-rsr/migrations/42_to_43.sql")
|
||
if dryRun then (liftIO . TIO.putStrLn . Q.getQueryText) query
|
||
else do
|
||
metadataV2 <- fetchMetadataFromHdbTables
|
||
runTx query
|
||
defaultSourceConfig <- onNothing maybeDefaultSourceConfig $ throw400 NotSupported $
|
||
"cannot migrate to catalog version 43 without --database-url or env var " <> tshow (fst databaseUrlEnv)
|
||
let metadataV3 =
|
||
let MetadataNoSources{..} = metadataV2
|
||
defaultSourceMetadata = BackendSourceMetadata $
|
||
SourceMetadata defaultSource _mnsTables _mnsFunctions defaultSourceConfig
|
||
in Metadata (OMap.singleton defaultSource defaultSourceMetadata)
|
||
_mnsRemoteSchemas _mnsQueryCollections _mnsAllowlist _mnsCustomTypes _mnsActions _mnsCronTriggers mempty
|
||
emptyApiLimit emptyMetricsConfig mempty
|
||
liftTx $ insertMetadataInCatalog metadataV3
|
||
|
||
from43To42 = do
|
||
let query = $(Q.sqlFromFile "src-rsr/migrations/43_to_42.sql")
|
||
if dryRun then (liftIO . TIO.putStrLn . Q.getQueryText) query
|
||
else do
|
||
Metadata{..} <- liftTx fetchMetadataFromCatalog
|
||
runTx query
|
||
let emptyMetadataNoSources =
|
||
MetadataNoSources mempty mempty mempty mempty mempty emptyCustomTypes mempty mempty
|
||
metadataV2 <- case OMap.toList _metaSources of
|
||
[] -> pure emptyMetadataNoSources
|
||
[(_, BackendSourceMetadata sm)] ->
|
||
pure $ case cast sm of
|
||
Nothing -> emptyMetadataNoSources
|
||
Just SourceMetadata{..} ->
|
||
MetadataNoSources _smTables _smFunctions _metaRemoteSchemas _metaQueryCollections
|
||
_metaAllowlist _metaCustomTypes _metaActions _metaCronTriggers
|
||
_ -> throw400 NotSupported "Cannot downgrade since there are more than one source"
|
||
liftTx $ runHasSystemDefinedT (SystemDefined False) $ saveMetadataToHdbTables metadataV2
|
||
recreateSystemMetadata
|