2021-02-12 04:34:00 +03:00
|
|
|
{-# LANGUAGE NamedFieldPuns #-}
|
2019-11-26 15:14:21 +03:00
|
|
|
{-# LANGUAGE RecordWildCards #-}
|
2018-06-27 16:11:32 +03:00
|
|
|
|
2019-11-26 15:14:21 +03:00
|
|
|
module Main where
|
2019-03-12 08:46:27 +03:00
|
|
|
|
2021-03-16 18:27:51 +03:00
|
|
|
import Control.Applicative
|
2020-07-14 22:00:58 +03:00
|
|
|
import Control.Exception
|
2020-12-28 15:56:00 +03:00
|
|
|
import Control.Monad.Trans.Managed (ManagedT (..), lowerManagedT)
|
|
|
|
import Data.Int (Int64)
|
|
|
|
import Data.Text.Conversions (convertText)
|
|
|
|
import Data.Time.Clock (getCurrentTime)
|
|
|
|
import Data.Time.Clock.POSIX (getPOSIXTime)
|
2020-01-23 00:55:55 +03:00
|
|
|
|
2019-11-26 15:14:21 +03:00
|
|
|
import Hasura.App
|
2020-12-28 15:56:00 +03:00
|
|
|
import Hasura.Logging (Hasura, LogLevel (..),
|
|
|
|
defaultEnabledEngineLogTypes)
|
2020-12-14 07:30:19 +03:00
|
|
|
import Hasura.Metadata.Class
|
2018-06-27 16:11:32 +03:00
|
|
|
import Hasura.Prelude
|
2019-11-20 21:21:30 +03:00
|
|
|
import Hasura.RQL.DDL.Schema
|
2020-12-28 15:56:00 +03:00
|
|
|
import Hasura.RQL.DDL.Schema.Cache.Common
|
|
|
|
import Hasura.RQL.DDL.Schema.Source
|
2019-10-21 19:01:05 +03:00
|
|
|
import Hasura.RQL.Types
|
2018-06-27 16:11:32 +03:00
|
|
|
import Hasura.Server.Init
|
2021-01-07 12:04:22 +03:00
|
|
|
import Hasura.Server.Migrate (downgradeCatalog)
|
2021-02-18 19:46:14 +03:00
|
|
|
import Hasura.Server.Types (MaintenanceMode (..))
|
2019-11-26 15:14:21 +03:00
|
|
|
import Hasura.Server.Version
|
2018-06-27 16:11:32 +03:00
|
|
|
|
2020-12-28 15:56:00 +03:00
|
|
|
import qualified Control.Concurrent.Extended as C
|
|
|
|
import qualified Data.ByteString.Char8 as BC
|
|
|
|
import qualified Data.ByteString.Lazy as BL
|
|
|
|
import qualified Data.ByteString.Lazy.Char8 as BLC
|
|
|
|
import qualified Data.Environment as Env
|
|
|
|
import qualified Database.PG.Query as Q
|
|
|
|
import qualified Hasura.GC as GC
|
|
|
|
import qualified Hasura.Tracing as Tracing
|
|
|
|
import qualified System.Exit as Sys
|
|
|
|
import qualified System.Metrics as EKG
|
|
|
|
import qualified System.Posix.Signals as Signals
|
2020-08-18 22:53:12 +03:00
|
|
|
|
2019-03-12 08:46:27 +03:00
|
|
|
|
2018-06-27 16:11:32 +03:00
|
|
|
main :: IO ()
|
2020-07-14 22:00:58 +03:00
|
|
|
main = do
|
|
|
|
tryExit $ do
|
|
|
|
args <- parseArgs
|
|
|
|
env <- Env.getEnvironment
|
2020-11-25 13:56:44 +03:00
|
|
|
runApp env args
|
2020-07-14 22:00:58 +03:00
|
|
|
where
|
|
|
|
tryExit io = try io >>= \case
|
|
|
|
Left (ExitException _code msg) -> BC.putStrLn msg >> Sys.exitFailure
|
2021-01-28 14:39:17 +03:00
|
|
|
Right r -> return r
|
2018-07-27 12:34:50 +03:00
|
|
|
|
2020-11-25 13:56:44 +03:00
|
|
|
runApp :: Env.Environment -> HGEOptions Hasura -> IO ()
|
2020-12-28 15:56:00 +03:00
|
|
|
runApp env (HGEOptionsG rci metadataDbUrl hgeCmd) = do
|
2020-11-24 09:10:04 +03:00
|
|
|
initTime <- liftIO getCurrentTime
|
2020-12-28 15:56:00 +03:00
|
|
|
globalCtx@GlobalCtx{..} <- initGlobalCtx env metadataDbUrl rci
|
2021-01-07 12:04:22 +03:00
|
|
|
let (maybeDefaultPgConnInfo, maybeRetries) = _gcDefaultPostgresConnInfo
|
2020-11-24 09:10:04 +03:00
|
|
|
|
2020-07-14 22:00:58 +03:00
|
|
|
withVersion $$(getVersionFromEnvironment) $ case hgeCmd of
|
2019-11-26 15:14:21 +03:00
|
|
|
HCServe serveOptions -> do
|
2020-09-08 19:19:52 +03:00
|
|
|
ekgStore <- liftIO do
|
|
|
|
s <- EKG.newStore
|
|
|
|
EKG.registerGcMetrics s
|
2020-11-12 12:25:48 +03:00
|
|
|
|
2020-09-08 21:13:35 +03:00
|
|
|
let getTimeMs :: IO Int64
|
|
|
|
getTimeMs = (round . (* 1000)) `fmap` getPOSIXTime
|
|
|
|
|
|
|
|
EKG.registerCounter "ekg.server_timestamp_ms" getTimeMs s
|
2020-09-08 19:19:52 +03:00
|
|
|
pure s
|
2020-11-12 12:25:48 +03:00
|
|
|
|
2020-12-21 21:56:00 +03:00
|
|
|
-- It'd be nice if we didn't have to call runManagedT twice here, but
|
|
|
|
-- there is a data dependency problem since the call to runPGMetadataStorageApp
|
|
|
|
-- below depends on serveCtx.
|
|
|
|
runManagedT (initialiseServeCtx env globalCtx serveOptions) $ \serveCtx -> do
|
|
|
|
-- Catches the SIGTERM signal and initiates a graceful shutdown.
|
|
|
|
-- Graceful shutdown for regular HTTP requests is already implemented in
|
|
|
|
-- Warp, and is triggered by invoking the 'closeSocket' callback.
|
|
|
|
-- We only catch the SIGTERM signal once, that is, if the user hits CTRL-C
|
|
|
|
-- once again, we terminate the process immediately.
|
2021-02-12 04:34:00 +03:00
|
|
|
|
|
|
|
-- The function is written in this style to avoid the shutdown
|
|
|
|
-- handler retaining a reference to the entire serveCtx (see #344)
|
|
|
|
-- If you modify this code then you should check the core to see
|
|
|
|
-- that serveCtx is not retained.
|
|
|
|
_ <- case serveCtx of
|
|
|
|
ServeCtx{_scShutdownLatch} ->
|
2021-04-24 08:38:00 +03:00
|
|
|
liftIO $ do
|
|
|
|
void $ Signals.installHandler Signals.sigTERM (Signals.CatchOnce (shutdownGracefully _scShutdownLatch)) Nothing
|
|
|
|
void $ Signals.installHandler Signals.sigINT (Signals.CatchOnce (shutdownGracefully _scShutdownLatch)) Nothing
|
2020-12-28 15:56:00 +03:00
|
|
|
|
|
|
|
let Loggers _ logger pgLogger = _scLoggers serveCtx
|
2021-04-06 06:25:02 +03:00
|
|
|
|
2020-12-21 21:56:00 +03:00
|
|
|
_idleGCThread <- C.forkImmortal "ourIdleGC" logger $
|
|
|
|
GC.ourIdleGC logger (seconds 0.3) (seconds 10) (seconds 60)
|
2020-12-28 15:56:00 +03:00
|
|
|
|
2020-12-21 21:56:00 +03:00
|
|
|
serverMetrics <- liftIO $ createServerMetrics ekgStore
|
2020-12-28 15:56:00 +03:00
|
|
|
flip runPGMetadataStorageApp (_scMetadataDbPool serveCtx, pgLogger) . lowerManagedT $ do
|
2021-02-13 03:05:23 +03:00
|
|
|
runHGEServer (const $ pure ()) env serveOptions serveCtx initTime Nothing serverMetrics ekgStore
|
2020-07-14 22:00:58 +03:00
|
|
|
|
2018-12-19 14:38:33 +03:00
|
|
|
HCExport -> do
|
2021-01-07 12:04:22 +03:00
|
|
|
res <- runTxWithMinimalPool _gcMetadataDbConnInfo fetchMetadataFromCatalog
|
2020-07-14 22:00:58 +03:00
|
|
|
either (printErrJExit MetadataExportError) printJSON res
|
2019-01-02 14:24:17 +03:00
|
|
|
|
2018-12-19 14:38:33 +03:00
|
|
|
HCClean -> do
|
2021-01-07 12:04:22 +03:00
|
|
|
res <- runTxWithMinimalPool _gcMetadataDbConnInfo dropHdbCatalogSchema
|
2020-11-24 09:10:04 +03:00
|
|
|
let cleanSuccessMsg = "successfully cleaned graphql-engine related data"
|
|
|
|
either (printErrJExit MetadataCleanError) (const $ liftIO $ putStrLn cleanSuccessMsg) res
|
2019-01-02 14:24:17 +03:00
|
|
|
|
2018-12-19 14:38:33 +03:00
|
|
|
HCExecute -> do
|
2019-11-26 15:14:21 +03:00
|
|
|
queryBs <- liftIO BL.getContents
|
2021-04-08 11:25:11 +03:00
|
|
|
let sqlGenCtx = SQLGenCtx False False
|
2020-12-28 15:56:00 +03:00
|
|
|
remoteSchemaPermsCtx = RemoteSchemaPermsDisabled
|
|
|
|
pgLogger = print
|
|
|
|
pgSourceResolver = mkPgSourceResolver pgLogger
|
2021-01-29 08:48:17 +03:00
|
|
|
functionPermsCtx = FunctionPermissionsInferred
|
2021-02-18 19:46:14 +03:00
|
|
|
maintenanceMode = MaintenanceModeDisabled
|
|
|
|
serverConfigCtx =
|
[Preview] Inherited roles for postgres read queries
fixes #3868
docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de`
Note:
To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`.
Introduction
------------
This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`.
How are select permissions of different roles are combined?
------------------------------------------------------------
A select permission includes 5 things:
1. Columns accessible to the role
2. Row selection filter
3. Limit
4. Allow aggregation
5. Scalar computed fields accessible to the role
Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`.
Let's say the following GraphQL query is queried with the `combined_roles` role.
```graphql
query {
employees {
address
phone
}
}
```
This will translate to the following SQL query:
```sql
select
(case when (P1 or P2) then address else null end) as address,
(case when P2 then phone else null end) as phone
from employee
where (P1 or P2)
```
The other parameters of the select permission will be combined in the following manner:
1. Limit - Minimum of the limits will be the limit of the inherited role
2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation
3. Scalar computed fields - same as table column fields, as in the above example
APIs for inherited roles:
----------------------
1. `add_inherited_role`
`add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments
`role_name`: the name of the inherited role to be added (String)
`role_set`: list of roles that need to be combined (Array of Strings)
Example:
```json
{
"type": "add_inherited_role",
"args": {
"role_name":"combined_user",
"role_set":[
"user",
"user1"
]
}
}
```
After adding the inherited role, the inherited role can be used like single roles like earlier
Note:
An inherited role can only be created with non-inherited/singular roles.
2. `drop_inherited_role`
The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument:
`role_name`: name of the inherited role to be dropped
Example:
```json
{
"type": "drop_inherited_role",
"args": {
"role_name":"combined_user"
}
}
```
Metadata
---------
The derived roles metadata will be included under the `experimental_features` key while exporting the metadata.
```json
{
"experimental_features": {
"derived_roles": [
{
"role_name": "manager_is_employee_too",
"role_set": [
"employee",
"manager"
]
}
]
}
}
```
Scope
------
Only postgres queries and subscriptions are supported in this PR.
Important points:
-----------------
1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done.
TODOs
-------
- [ ] Tests
- [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features
- [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?)
- [ ] Introspection test with a inherited role (nullability changes in a inherited role)
- [ ] Docs
- [ ] Changelog
Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com>
GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
|
|
|
ServerConfigCtx functionPermsCtx remoteSchemaPermsCtx sqlGenCtx maintenanceMode mempty
|
2021-02-18 19:46:14 +03:00
|
|
|
cacheBuildParams =
|
|
|
|
CacheBuildParams _gcHttpManager pgSourceResolver serverConfigCtx
|
2020-12-28 15:56:00 +03:00
|
|
|
runManagedT (mkMinimalPool _gcMetadataDbConnInfo) $ \metadataDbPool -> do
|
|
|
|
res <- flip runPGMetadataStorageApp (metadataDbPool, pgLogger) $
|
|
|
|
runMetadataStorageT $ liftEitherM do
|
2021-02-19 05:39:30 +03:00
|
|
|
(metadata, _) <- fetchMetadata
|
2021-02-18 19:46:14 +03:00
|
|
|
runAsAdmin _gcHttpManager serverConfigCtx $ do
|
2020-12-28 15:56:00 +03:00
|
|
|
schemaCache <- runCacheBuild cacheBuildParams $
|
|
|
|
buildRebuildableSchemaCache env metadata
|
2020-12-21 21:56:00 +03:00
|
|
|
execQuery env queryBs
|
|
|
|
& Tracing.runTraceTWithReporter Tracing.noReporter "execute"
|
|
|
|
& runMetadataT metadata
|
|
|
|
& runCacheRWT schemaCache
|
|
|
|
& fmap (\((res, _), _, _) -> res)
|
|
|
|
either (printErrJExit ExecuteProcessError) (liftIO . BLC.putStrLn) res
|
2019-01-28 16:55:28 +03:00
|
|
|
|
2020-02-07 14:03:12 +03:00
|
|
|
HCDowngrade opts -> do
|
2021-01-07 12:04:22 +03:00
|
|
|
let defaultSourceConfig = maybeDefaultPgConnInfo <&> \(dbUrlConf, _) ->
|
|
|
|
let pgSourceConnInfo = PostgresSourceConnInfo dbUrlConf
|
2021-03-16 18:27:51 +03:00
|
|
|
(Just setPostgresPoolSettings{_ppsRetries = maybeRetries <|> Just 1})
|
2021-04-14 20:51:02 +03:00
|
|
|
False
|
2021-04-28 19:49:23 +03:00
|
|
|
Q.ReadCommitted
|
2021-05-21 04:49:50 +03:00
|
|
|
Nothing
|
2021-02-14 09:07:52 +03:00
|
|
|
in PostgresConnConfiguration pgSourceConnInfo Nothing
|
2020-12-28 15:56:00 +03:00
|
|
|
res <- runTxWithMinimalPool _gcMetadataDbConnInfo $ downgradeCatalog defaultSourceConfig opts initTime
|
2020-07-14 22:00:58 +03:00
|
|
|
either (printErrJExit DowngradeProcessError) (liftIO . print) res
|
2020-02-07 14:03:12 +03:00
|
|
|
|
2020-01-23 00:55:55 +03:00
|
|
|
HCVersion -> liftIO $ putStrLn $ "Hasura GraphQL Engine: " ++ convertText currentVersion
|
2019-11-26 15:14:21 +03:00
|
|
|
where
|
2020-12-21 21:56:00 +03:00
|
|
|
runTxWithMinimalPool connInfo tx = lowerManagedT $ do
|
2020-11-24 09:10:04 +03:00
|
|
|
minimalPool <- mkMinimalPool connInfo
|
|
|
|
liftIO $ runExceptT $ Q.runTx minimalPool (Q.ReadCommitted, Nothing) tx
|
|
|
|
|
|
|
|
-- | Generate Postgres pool with single connection.
|
|
|
|
-- It is useful when graphql-engine executes a transaction on database
|
|
|
|
-- and exits in commands other than 'serve'.
|
|
|
|
mkMinimalPool connInfo = do
|
|
|
|
pgLogger <- _lsPgLogger <$> mkLoggers defaultEnabledEngineLogTypes LevelInfo
|
|
|
|
let connParams = Q.defaultConnParams { Q.cpConns = 1 }
|
|
|
|
liftIO $ Q.initPGPool connInfo connParams pgLogger
|