graphql-engine/server/src-lib/Hasura/Server/Init/Config.hs

466 lines
14 KiB
Haskell
Raw Normal View History

-- | Types and classes related to configuration when the server is initialised
module Hasura.Server.Init.Config
( API (..),
DowngradeOptions (..),
Env,
FromEnv (..),
HGECommand,
HGECommandG (..),
HGEOptions,
HGEOptionsG (..),
KeepAliveDelay (..),
OptionalInterval (..),
PostgresConnInfo (..),
PostgresRawConnDetails (..),
PostgresRawConnInfo (..),
RawAuthHook,
RawConnParams (..),
RawHGECommand,
RawHGEOptions,
RawServeOptions (..),
ResponseInternalErrorsConfig (..),
ServeOptions (..),
WSConnectionInitTimeout (..),
WithEnv,
defaultKeepAliveDelay,
defaultWSConnectionInitTimeout,
msToOptionalInterval,
parseStrAsBool,
rawConnDetailsToUrl,
rawConnDetailsToUrlText,
readAPIs,
readExperimentalFeatures,
readHookType,
readJson,
readLogLevel,
readNonNegativeInt,
runWithEnv,
shouldIncludeInternal,
)
where
import Data.Aeson
import Data.Aeson qualified as J
import Data.Aeson.Casing qualified as J
import Data.Aeson.TH qualified as J
import Data.Char (toLower)
import Data.HashSet qualified as Set
import Data.String qualified as DataString
import Data.Text qualified as T
import Data.Time
import Data.URL.Template
import Database.PG.Query qualified as Q
import Hasura.GraphQL.Execute.LiveQuery.Options qualified as LQ
import Hasura.Logging qualified as L
import Hasura.Prelude
import Hasura.RQL.Types
import Hasura.Server.Auth
import Hasura.Server.Cors
import Hasura.Server.Types
import Hasura.Server.Utils
import Hasura.Session
import Network.Wai.Handler.Warp (HostPreference)
import Network.WebSockets qualified as WS
data RawConnParams = RawConnParams
{ rcpStripes :: !(Maybe Int),
rcpConns :: !(Maybe Int),
rcpIdleTime :: !(Maybe Int),
-- | Time from connection creation after which to destroy a connection and
-- choose a different/new one.
rcpConnLifetime :: !(Maybe NominalDiffTime),
rcpAllowPrepare :: !(Maybe Bool),
-- | See @HASURA_GRAPHQL_PG_POOL_TIMEOUT@
rcpPoolTimeout :: !(Maybe NominalDiffTime)
}
deriving (Show, Eq)
type RawAuthHook = AuthHookG (Maybe Text) (Maybe AuthHookType)
Disable schema sync with an interval of 0 instead of an explicit flag Removing `schemaSyncDisable` flag and interpreting `schemaPollInterval` of `0` as disabling schema sync. This change brings the convention in line with how action and other intervals are used to disable processes. There is an opportunity to abstract the notion of an optional interval similar to how actions uses `AsyncActionsFetchInterval`. This can be used for the following fields of ServeOptions, with RawServeOptions having a milliseconds value where `0` is interpreted as disable. OptionalInterval: ``` -- | Sleep time interval for activities data OptionalInterval = Skip -- ^ No polling | Interval !Milliseconds -- ^ Interval time deriving (Show, Eq) ``` ServeOptions: ``` data ServeOptions impl = ServeOptions { ... , soEventsFetchInterval :: !OptionalInterval , soAsyncActionsFetchInterval :: !OptionalInterval , soSchemaPollInterval :: !OptionalInterval ... } ``` Rather than encoding a `Maybe OptionalInterval` in RawServeOptions, instead a `Maybe Milliseconds` can be used to more directly express the input format, with the ServeOptions constructor interpreting `0` as `Skip`. Current inconsistencies: * `soEventsFetchInterval` has no value interpreted as disabling the fetches * `soAsyncActionsFetchInterval` uses an `OptionalInterval` analog in `RawServeOptions` instead of `Milliseconds` * `soSchemaPollInterval` currently uses `Milliseconds` directly in `ServeOptions` --- ### Kodiak commit message Information used by [Kodiak bot](https://kodiakhq.com/) while merging this PR. #### Commit title Same as the title of this pull request GitOrigin-RevId: 3cda1656ae39ae95ba142512ed4e123d6ffeb7fe
2021-04-07 12:59:48 +03:00
-- | Sleep time interval for recurring activities such as (@'asyncActionsProcessor')
-- Presently @'msToOptionalInterval' interprets `0` as Skip.
data OptionalInterval
= -- | No polling
Skip
| -- | Interval time
Interval !Milliseconds
deriving (Show, Eq)
Disable schema sync with an interval of 0 instead of an explicit flag Removing `schemaSyncDisable` flag and interpreting `schemaPollInterval` of `0` as disabling schema sync. This change brings the convention in line with how action and other intervals are used to disable processes. There is an opportunity to abstract the notion of an optional interval similar to how actions uses `AsyncActionsFetchInterval`. This can be used for the following fields of ServeOptions, with RawServeOptions having a milliseconds value where `0` is interpreted as disable. OptionalInterval: ``` -- | Sleep time interval for activities data OptionalInterval = Skip -- ^ No polling | Interval !Milliseconds -- ^ Interval time deriving (Show, Eq) ``` ServeOptions: ``` data ServeOptions impl = ServeOptions { ... , soEventsFetchInterval :: !OptionalInterval , soAsyncActionsFetchInterval :: !OptionalInterval , soSchemaPollInterval :: !OptionalInterval ... } ``` Rather than encoding a `Maybe OptionalInterval` in RawServeOptions, instead a `Maybe Milliseconds` can be used to more directly express the input format, with the ServeOptions constructor interpreting `0` as `Skip`. Current inconsistencies: * `soEventsFetchInterval` has no value interpreted as disabling the fetches * `soAsyncActionsFetchInterval` uses an `OptionalInterval` analog in `RawServeOptions` instead of `Milliseconds` * `soSchemaPollInterval` currently uses `Milliseconds` directly in `ServeOptions` --- ### Kodiak commit message Information used by [Kodiak bot](https://kodiakhq.com/) while merging this PR. #### Commit title Same as the title of this pull request GitOrigin-RevId: 3cda1656ae39ae95ba142512ed4e123d6ffeb7fe
2021-04-07 12:59:48 +03:00
msToOptionalInterval :: Milliseconds -> OptionalInterval
msToOptionalInterval = \case
0 -> Skip
s -> Interval s
Disable schema sync with an interval of 0 instead of an explicit flag Removing `schemaSyncDisable` flag and interpreting `schemaPollInterval` of `0` as disabling schema sync. This change brings the convention in line with how action and other intervals are used to disable processes. There is an opportunity to abstract the notion of an optional interval similar to how actions uses `AsyncActionsFetchInterval`. This can be used for the following fields of ServeOptions, with RawServeOptions having a milliseconds value where `0` is interpreted as disable. OptionalInterval: ``` -- | Sleep time interval for activities data OptionalInterval = Skip -- ^ No polling | Interval !Milliseconds -- ^ Interval time deriving (Show, Eq) ``` ServeOptions: ``` data ServeOptions impl = ServeOptions { ... , soEventsFetchInterval :: !OptionalInterval , soAsyncActionsFetchInterval :: !OptionalInterval , soSchemaPollInterval :: !OptionalInterval ... } ``` Rather than encoding a `Maybe OptionalInterval` in RawServeOptions, instead a `Maybe Milliseconds` can be used to more directly express the input format, with the ServeOptions constructor interpreting `0` as `Skip`. Current inconsistencies: * `soEventsFetchInterval` has no value interpreted as disabling the fetches * `soAsyncActionsFetchInterval` uses an `OptionalInterval` analog in `RawServeOptions` instead of `Milliseconds` * `soSchemaPollInterval` currently uses `Milliseconds` directly in `ServeOptions` --- ### Kodiak commit message Information used by [Kodiak bot](https://kodiakhq.com/) while merging this PR. #### Commit title Same as the title of this pull request GitOrigin-RevId: 3cda1656ae39ae95ba142512ed4e123d6ffeb7fe
2021-04-07 12:59:48 +03:00
instance FromJSON OptionalInterval where
parseJSON v = msToOptionalInterval <$> parseJSON v
Disable schema sync with an interval of 0 instead of an explicit flag Removing `schemaSyncDisable` flag and interpreting `schemaPollInterval` of `0` as disabling schema sync. This change brings the convention in line with how action and other intervals are used to disable processes. There is an opportunity to abstract the notion of an optional interval similar to how actions uses `AsyncActionsFetchInterval`. This can be used for the following fields of ServeOptions, with RawServeOptions having a milliseconds value where `0` is interpreted as disable. OptionalInterval: ``` -- | Sleep time interval for activities data OptionalInterval = Skip -- ^ No polling | Interval !Milliseconds -- ^ Interval time deriving (Show, Eq) ``` ServeOptions: ``` data ServeOptions impl = ServeOptions { ... , soEventsFetchInterval :: !OptionalInterval , soAsyncActionsFetchInterval :: !OptionalInterval , soSchemaPollInterval :: !OptionalInterval ... } ``` Rather than encoding a `Maybe OptionalInterval` in RawServeOptions, instead a `Maybe Milliseconds` can be used to more directly express the input format, with the ServeOptions constructor interpreting `0` as `Skip`. Current inconsistencies: * `soEventsFetchInterval` has no value interpreted as disabling the fetches * `soAsyncActionsFetchInterval` uses an `OptionalInterval` analog in `RawServeOptions` instead of `Milliseconds` * `soSchemaPollInterval` currently uses `Milliseconds` directly in `ServeOptions` --- ### Kodiak commit message Information used by [Kodiak bot](https://kodiakhq.com/) while merging this PR. #### Commit title Same as the title of this pull request GitOrigin-RevId: 3cda1656ae39ae95ba142512ed4e123d6ffeb7fe
2021-04-07 12:59:48 +03:00
instance ToJSON OptionalInterval where
toJSON = \case
Skip -> toJSON @Milliseconds 0
Disable schema sync with an interval of 0 instead of an explicit flag Removing `schemaSyncDisable` flag and interpreting `schemaPollInterval` of `0` as disabling schema sync. This change brings the convention in line with how action and other intervals are used to disable processes. There is an opportunity to abstract the notion of an optional interval similar to how actions uses `AsyncActionsFetchInterval`. This can be used for the following fields of ServeOptions, with RawServeOptions having a milliseconds value where `0` is interpreted as disable. OptionalInterval: ``` -- | Sleep time interval for activities data OptionalInterval = Skip -- ^ No polling | Interval !Milliseconds -- ^ Interval time deriving (Show, Eq) ``` ServeOptions: ``` data ServeOptions impl = ServeOptions { ... , soEventsFetchInterval :: !OptionalInterval , soAsyncActionsFetchInterval :: !OptionalInterval , soSchemaPollInterval :: !OptionalInterval ... } ``` Rather than encoding a `Maybe OptionalInterval` in RawServeOptions, instead a `Maybe Milliseconds` can be used to more directly express the input format, with the ServeOptions constructor interpreting `0` as `Skip`. Current inconsistencies: * `soEventsFetchInterval` has no value interpreted as disabling the fetches * `soAsyncActionsFetchInterval` uses an `OptionalInterval` analog in `RawServeOptions` instead of `Milliseconds` * `soSchemaPollInterval` currently uses `Milliseconds` directly in `ServeOptions` --- ### Kodiak commit message Information used by [Kodiak bot](https://kodiakhq.com/) while merging this PR. #### Commit title Same as the title of this pull request GitOrigin-RevId: 3cda1656ae39ae95ba142512ed4e123d6ffeb7fe
2021-04-07 12:59:48 +03:00
Interval s -> toJSON s
data API
= METADATA
| GRAPHQL
| PGDUMP
| DEVELOPER
| CONFIG
deriving (Show, Eq, Read, Generic)
$( J.deriveJSON
(J.defaultOptions {J.constructorTagModifier = map toLower})
''API
)
instance Hashable API
{- Note: [Experimental features]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The graphql-engine accepts a list of experimental features that can be
enabled at the startup. Experimental features are a way to introduce
new, but not stable features to our users in a manner in which they have
the choice to enable or disable a certain feature(s).
The objective of an experimental feature should be that when the feature is disabled,
the graphql-engine should work the same way as it worked before adding the said feature.
The experimental feature's flag is `--experimental-features` and the corresponding
environment variable is `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` which expects a comma-seperated
value.
When an experimental feature is stable enough i.e. it's stable through multiple non-beta releases
then we make the feature not experimental i.e. it will always be enabled. Note that when we do this
we still have to support parsing of the experimental feature because users of the previous version
will have it enabled and when they upgrade an error should not be thrown at the startup. For example:
The inherited roles was an experimental feature when introduced and it was enabled by
setting `--experimental-features` to `inherited_roles` and then it was decided to make the inherited roles
a stable feature, so it was removed as an experimental feature but the code was modified such that
`--experimental-features inherited_roles` to not throw an error.
-}
data RawServeOptions impl = RawServeOptions
{ rsoPort :: Maybe Int,
rsoHost :: Maybe HostPreference,
rsoConnParams :: RawConnParams,
rsoTxIso :: Maybe Q.TxIsolation,
rsoAdminSecret :: Maybe AdminSecretHash,
rsoAuthHook :: RawAuthHook,
rsoJwtSecret :: Maybe JWTConfig,
rsoUnAuthRole :: Maybe RoleName,
rsoCorsConfig :: Maybe CorsConfig,
rsoEnableConsole :: Bool,
rsoConsoleAssetsDir :: Maybe Text,
rsoEnableTelemetry :: Maybe Bool,
rsoWsReadCookie :: Bool,
rsoStringifyNum :: Bool,
rsoDangerousBooleanCollapse :: Maybe Bool,
rsoEnabledAPIs :: Maybe [API],
rsoMxRefetchInt :: Maybe LQ.RefetchInterval,
rsoMxBatchSize :: Maybe LQ.BatchSize,
rsoEnableAllowlist :: Bool,
rsoEnabledLogTypes :: Maybe [L.EngineLogType impl],
rsoLogLevel :: Maybe L.LogLevel,
rsoDevMode :: Bool,
rsoAdminInternalErrors :: Maybe Bool,
rsoEventsHttpPoolSize :: Maybe Int,
rsoEventsFetchInterval :: Maybe Milliseconds,
rsoAsyncActionsFetchInterval :: Maybe Milliseconds,
rsoLogHeadersFromEnv :: Bool,
rsoEnableRemoteSchemaPermissions :: Bool,
rsoWebSocketCompression :: Bool,
rsoWebSocketKeepAlive :: Maybe Int,
rsoInferFunctionPermissions :: Maybe Bool,
rsoEnableMaintenanceMode :: Bool,
rsoSchemaPollInterval :: Maybe Milliseconds,
rsoExperimentalFeatures :: Maybe [ExperimentalFeature],
rsoEventsFetchBatchSize :: Maybe NonNegativeInt,
rsoGracefulShutdownTimeout :: Maybe Seconds,
rsoWebSocketConnectionInitTimeout :: Maybe Int
-- see Note [Experimental features]
}
-- | @'ResponseInternalErrorsConfig' represents the encoding of the internal
-- errors in the response to the client.
-- See the github comment https://github.com/hasura/graphql-engine/issues/4031#issuecomment-609747705 for more details.
data ResponseInternalErrorsConfig
= InternalErrorsAllRequests
| InternalErrorsAdminOnly
| InternalErrorsDisabled
deriving (Show, Eq)
shouldIncludeInternal :: RoleName -> ResponseInternalErrorsConfig -> Bool
shouldIncludeInternal role = \case
InternalErrorsAllRequests -> True
InternalErrorsAdminOnly -> role == adminRoleName
InternalErrorsDisabled -> False
newtype KeepAliveDelay = KeepAliveDelay {unKeepAliveDelay :: Seconds}
deriving (Eq, Show)
$(J.deriveJSON hasuraJSON ''KeepAliveDelay)
defaultKeepAliveDelay :: KeepAliveDelay
defaultKeepAliveDelay = KeepAliveDelay $ fromIntegral (5 :: Int)
newtype WSConnectionInitTimeout = WSConnectionInitTimeout {unWSConnectionInitTimeout :: Seconds}
deriving (Eq, Show)
$(J.deriveJSON hasuraJSON ''WSConnectionInitTimeout)
defaultWSConnectionInitTimeout :: WSConnectionInitTimeout
defaultWSConnectionInitTimeout = WSConnectionInitTimeout $ fromIntegral (3 :: Int)
data ServeOptions impl = ServeOptions
{ soPort :: Int,
soHost :: HostPreference,
soConnParams :: Q.ConnParams,
soTxIso :: Q.TxIsolation,
soAdminSecret :: Set.HashSet AdminSecretHash,
soAuthHook :: Maybe AuthHook,
soJwtSecret :: [JWTConfig],
soUnAuthRole :: Maybe RoleName,
soCorsConfig :: CorsConfig,
soEnableConsole :: Bool,
soConsoleAssetsDir :: Maybe Text,
soEnableTelemetry :: Bool,
soStringifyNum :: Bool,
soDangerousBooleanCollapse :: Bool,
soEnabledAPIs :: Set.HashSet API,
soLiveQueryOpts :: LQ.LiveQueriesOptions,
soEnableAllowlist :: Bool,
soEnabledLogTypes :: Set.HashSet (L.EngineLogType impl),
soLogLevel :: L.LogLevel,
soResponseInternalErrorsConfig :: ResponseInternalErrorsConfig,
soEventsHttpPoolSize :: Maybe Int,
soEventsFetchInterval :: Maybe Milliseconds,
soAsyncActionsFetchInterval :: OptionalInterval,
soLogHeadersFromEnv :: Bool,
soEnableRemoteSchemaPermissions :: RemoteSchemaPermsCtx,
soConnectionOptions :: WS.ConnectionOptions,
soWebsocketKeepAlive :: KeepAliveDelay,
soInferFunctionPermissions :: FunctionPermissionsCtx,
soEnableMaintenanceMode :: MaintenanceMode,
soSchemaPollInterval :: OptionalInterval,
soExperimentalFeatures :: Set.HashSet ExperimentalFeature,
soEventsFetchBatchSize :: NonNegativeInt,
soDevMode :: Bool,
soGracefulShutdownTimeout :: Seconds,
soWebsocketConnectionInitTimeout :: WSConnectionInitTimeout,
soEventingMode :: EventingMode,
soReadOnlyMode :: ReadOnlyMode
}
data DowngradeOptions = DowngradeOptions
{ dgoTargetVersion :: !Text,
dgoDryRun :: !Bool
}
deriving (Show, Eq)
data PostgresConnInfo a = PostgresConnInfo
{ _pciDatabaseConn :: !a,
_pciRetries :: !(Maybe Int)
}
deriving (Show, Eq, Functor, Foldable, Traversable)
data PostgresRawConnDetails = PostgresRawConnDetails
{ connHost :: !String,
connPort :: !Int,
connUser :: !String,
connPassword :: !String,
connDatabase :: !String,
connOptions :: !(Maybe String)
}
deriving (Eq, Read, Show)
data PostgresRawConnInfo
= PGConnDatabaseUrl !URLTemplate
| PGConnDetails !PostgresRawConnDetails
deriving (Show, Eq)
rawConnDetailsToUrl :: PostgresRawConnDetails -> URLTemplate
rawConnDetailsToUrl =
mkPlainURLTemplate . rawConnDetailsToUrlText
rawConnDetailsToUrlText :: PostgresRawConnDetails -> Text
rawConnDetailsToUrlText PostgresRawConnDetails {..} =
T.pack $
"postgresql://" <> connUser
<> ":"
<> connPassword
<> "@"
<> connHost
<> ":"
<> show connPort
<> "/"
<> connDatabase
<> maybe "" ("?options=" <>) connOptions
data HGECommandG a
= HCServe !a
| HCExport
| HCClean
| HCVersion
| HCDowngrade !DowngradeOptions
deriving (Show, Eq)
$(J.deriveJSON (J.aesonPrefix J.camelCase) {J.omitNothingFields = True} ''PostgresRawConnDetails)
type HGECommand impl = HGECommandG (ServeOptions impl)
type RawHGECommand impl = HGECommandG (RawServeOptions impl)
data HGEOptionsG a b = HGEOptionsG
{ hoDatabaseUrl :: !(PostgresConnInfo a),
hoMetadataDbUrl :: !(Maybe String),
hoCommand :: !(HGECommandG b)
}
deriving (Show, Eq)
type RawHGEOptions impl = HGEOptionsG (Maybe PostgresRawConnInfo) (RawServeOptions impl)
type HGEOptions impl = HGEOptionsG (Maybe UrlConf) (ServeOptions impl)
type Env = [(String, String)]
readHookType :: String -> Either String AuthHookType
readHookType tyS =
case tyS of
"GET" -> Right AHTGet
"POST" -> Right AHTPost
_ -> Left "Only expecting GET / POST"
parseStrAsBool :: String -> Either String Bool
parseStrAsBool t
| map toLower t `elem` truthVals = Right True
| map toLower t `elem` falseVals = Right False
| otherwise = Left errMsg
where
truthVals = ["true", "t", "yes", "y"]
falseVals = ["false", "f", "no", "n"]
errMsg =
" Not a valid boolean text. " ++ "True values are "
++ show truthVals
++ " and False values are "
++ show falseVals
++ ". All values are case insensitive"
readNonNegativeInt :: String -> Either String NonNegativeInt
readNonNegativeInt s =
onNothing (mkNonNegativeInt =<< readMaybe s) $ Left "Only expecting a non negative integer"
readAPIs :: String -> Either String [API]
readAPIs = mapM readAPI . T.splitOn "," . T.pack
where
readAPI si = case T.toUpper $ T.strip si of
"METADATA" -> Right METADATA
"GRAPHQL" -> Right GRAPHQL
"PGDUMP" -> Right PGDUMP
"DEVELOPER" -> Right DEVELOPER
"CONFIG" -> Right CONFIG
_ -> Left "Only expecting list of comma separated API types metadata,graphql,pgdump,developer,config"
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
readExperimentalFeatures :: String -> Either String [ExperimentalFeature]
readExperimentalFeatures = mapM readAPI . T.splitOn "," . T.pack
where
readAPI si = case T.toLower $ T.strip si of
"inherited_roles" -> Right EFInheritedRoles
_ -> Left "Only expecting list of comma separated experimental features"
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
readLogLevel :: String -> Either String L.LogLevel
readLogLevel s = case T.toLower $ T.strip $ T.pack s of
"debug" -> Right L.LevelDebug
"info" -> Right L.LevelInfo
"warn" -> Right L.LevelWarn
"error" -> Right L.LevelError
_ -> Left "Valid log levels: debug, info, warn or error"
class FromEnv a where
fromEnv :: String -> Either String a
-- Deserialize from seconds, in the usual way
instance FromEnv NominalDiffTime where
fromEnv s =
maybe (Left "could not parse as a Double") (Right . realToFrac) $
(readMaybe s :: Maybe Double)
instance FromEnv String where
fromEnv = Right
instance FromEnv HostPreference where
fromEnv = Right . DataString.fromString
instance FromEnv Text where
fromEnv = Right . T.pack
instance FromEnv AuthHookType where
fromEnv = readHookType
instance FromEnv Int where
fromEnv = maybe (Left "Expecting Int value") Right . readMaybe
instance FromEnv AdminSecretHash where
fromEnv = Right . hashAdminSecret . T.pack
instance FromEnv RoleName where
backend only insert permissions (rfc #4120) (#4224) * move user info related code to Hasura.User module * the RFC #4120 implementation; insert permissions with admin secret * revert back to old RoleName based schema maps An attempt made to avoid duplication of schema contexts in types if any role doesn't possess any admin secret specific schema * fix compile errors in haskell test * keep 'user_vars' for session variables in http-logs * no-op refacto * tests for admin only inserts * update docs for admin only inserts * updated CHANGELOG.md * default behaviour when admin secret is not set * fix x-hasura-role to X-Hasura-Role in pytests * introduce effective timeout in actions async tests * update docs for admin-secret not configured case * Update docs/graphql/manual/api-reference/schema-metadata-api/permission.rst Co-Authored-By: Marion Schleifer <marion@hasura.io> * Apply suggestions from code review Co-Authored-By: Marion Schleifer <marion@hasura.io> * a complete iteration backend insert permissions accessable via 'x-hasura-backend-privilege' session variable * console changes for backend-only permissions * provide tooltip id; update labels and tooltips; * requested changes * requested changes - remove className from Toggle component - use appropriate function name (capitalizeFirstChar -> capitalize) * use toggle props from definitelyTyped * fix accidental commit * Revert "introduce effective timeout in actions async tests" This reverts commit b7a59c19d643520cfde6af579889e1038038438a. * generate complete schema for both 'default' and 'backend' sessions * Apply suggestions from code review Co-Authored-By: Marion Schleifer <marion@hasura.io> * remove unnecessary import, export Toggle as is * update session variable in tooltip * 'x-hasura-use-backend-only-permissions' variable to switch * update help texts * update docs * update docs * update console help text * regenerate package-lock * serve no backend schema when backend_only: false and header set to true - Few type name refactor as suggested by @0x777 * update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * fix a merge bug where a certain entity didn't get removed Co-authored-by: Marion Schleifer <marion@hasura.io> Co-authored-by: Rishichandra Wawhal <rishi@hasura.io> Co-authored-by: rikinsk <rikin.kachhia@gmail.com> Co-authored-by: Tirumarai Selvan <tiru@hasura.io>
2020-04-24 12:10:53 +03:00
fromEnv string = case mkRoleName (T.pack string) of
Nothing -> Left "empty string not allowed"
backend only insert permissions (rfc #4120) (#4224) * move user info related code to Hasura.User module * the RFC #4120 implementation; insert permissions with admin secret * revert back to old RoleName based schema maps An attempt made to avoid duplication of schema contexts in types if any role doesn't possess any admin secret specific schema * fix compile errors in haskell test * keep 'user_vars' for session variables in http-logs * no-op refacto * tests for admin only inserts * update docs for admin only inserts * updated CHANGELOG.md * default behaviour when admin secret is not set * fix x-hasura-role to X-Hasura-Role in pytests * introduce effective timeout in actions async tests * update docs for admin-secret not configured case * Update docs/graphql/manual/api-reference/schema-metadata-api/permission.rst Co-Authored-By: Marion Schleifer <marion@hasura.io> * Apply suggestions from code review Co-Authored-By: Marion Schleifer <marion@hasura.io> * a complete iteration backend insert permissions accessable via 'x-hasura-backend-privilege' session variable * console changes for backend-only permissions * provide tooltip id; update labels and tooltips; * requested changes * requested changes - remove className from Toggle component - use appropriate function name (capitalizeFirstChar -> capitalize) * use toggle props from definitelyTyped * fix accidental commit * Revert "introduce effective timeout in actions async tests" This reverts commit b7a59c19d643520cfde6af579889e1038038438a. * generate complete schema for both 'default' and 'backend' sessions * Apply suggestions from code review Co-Authored-By: Marion Schleifer <marion@hasura.io> * remove unnecessary import, export Toggle as is * update session variable in tooltip * 'x-hasura-use-backend-only-permissions' variable to switch * update help texts * update docs * update docs * update console help text * regenerate package-lock * serve no backend schema when backend_only: false and header set to true - Few type name refactor as suggested by @0x777 * update CHANGELOG.md * Update CHANGELOG.md * Update CHANGELOG.md * fix a merge bug where a certain entity didn't get removed Co-authored-by: Marion Schleifer <marion@hasura.io> Co-authored-by: Rishichandra Wawhal <rishi@hasura.io> Co-authored-by: rikinsk <rikin.kachhia@gmail.com> Co-authored-by: Tirumarai Selvan <tiru@hasura.io>
2020-04-24 12:10:53 +03:00
Just roleName -> Right roleName
instance FromEnv Bool where
fromEnv = parseStrAsBool
instance FromEnv Q.TxIsolation where
fromEnv = readIsoLevel
instance FromEnv CorsConfig where
fromEnv = readCorsDomains
instance FromEnv [API] where
fromEnv = readAPIs
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
instance FromEnv [ExperimentalFeature] where
fromEnv = readExperimentalFeatures
instance FromEnv LQ.BatchSize where
fromEnv s = do
val <- readEither s
maybe (Left "batch size should be a non negative integer") Right $ LQ.mkBatchSize val
instance FromEnv LQ.RefetchInterval where
fromEnv x = do
val <- fmap (milliseconds . fromInteger) . readEither $ x
maybe (Left "refetch interval should be a non negative integer") Right $ LQ.mkRefetchInterval val
instance FromEnv Milliseconds where
fromEnv = fmap fromInteger . readEither
instance FromEnv Seconds where
fromEnv = fmap fromInteger . readEither
instance FromEnv JWTConfig where
fromEnv = readJson
instance FromEnv [JWTConfig] where
fromEnv = readJson
instance L.EnabledLogTypes impl => FromEnv [L.EngineLogType impl] where
fromEnv = L.parseEnabledLogTypes
instance FromEnv L.LogLevel where
fromEnv = readLogLevel
instance FromEnv URLTemplate where
fromEnv = parseURLTemplate . T.pack
instance FromEnv NonNegativeInt where
fromEnv = readNonNegativeInt
type WithEnv a = ReaderT Env (ExceptT String Identity) a
runWithEnv :: Env -> WithEnv a -> Either String a
runWithEnv env m = runIdentity $ runExceptT $ runReaderT m env