graphql-engine/server/src-lib/Hasura/RQL/DDL/Metadata.hs

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

830 lines
37 KiB
Haskell
Raw Normal View History

2018-06-27 16:11:32 +03:00
module Hasura.RQL.DDL.Metadata
( runReplaceMetadata,
runReplaceMetadataV2,
runExportMetadata,
runExportMetadataV2,
runClearMetadata,
runReloadMetadata,
runDumpInternalState,
runGetInconsistentMetadata,
runDropInconsistentMetadata,
runGetCatalogState,
runSetCatalogState,
runTestWebhookTransform,
server: multitenant metadata storage The metadata storage implementation for graphql-engine-multitenant. - It uses a centralized PG database to store metadata of all tenants (instead of per tenant database) - Similarly, it uses a single schema-sync listener thread per MT worker (instead of listener thread per tenant) (PS: although, the processor thread is spawned per tenant) - 2 new flags are introduced - `--metadataDatabaseUrl` and (optional) `--metadataDatabaseRetries` Internally, a "metadata mode" is introduced to indicate an external/managed store vs a store managed by each pro-server. To run : - obtain the schema file (located at `pro/server/res/cloud/metadata_db_schema.sql`) - apply the schema on a PG database - set the `--metadataDatabaseUrl` flag to point to the above database - run the MT executable The schema (and its migrations) for the metadata db is managed outside the MT worker. ### New metadata The following is the new portion of `Metadata` added : ```yaml version: 3 metrics_config: analyze_query_variables: true analyze_response_body: false api_limits: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` - In Pro, the code around fetching/updating/syncing pro-config is removed - That also means, `hdb_pro_catalog` for keeping the config cache is not required. Hence the `hdb_pro_catalog` is also removed - The required config comes from metadata / schema cache ### New Metadata APIs - `set_api_limits` - `remove_api_limits` - `set_metrics_config` - `remove_metrics_config` #### `set_api_limits` ```yaml type: set_api_limits args: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: anonymous: max_reqs_per_min: 10 unique_params: "ip" editor: max_reqs_per_min: 30 unique_params: - x-hasura-user-id user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` #### `remove_api_limits` ```yaml type: remove_api_limits args: {} ``` #### `set_metrics_config` ```yaml type: set_metrics_config args: analyze_query_variables: true analyze_response_body: false ``` #### `remove_metrics_config` ```yaml type: remove_metrics_config args: {} ``` #### TODO - [x] on-prem pro implementation for `MonadMetadataStorage` - [x] move the project config from Lux to pro metadata (PR: #379) - [ ] console changes for pro config/api limits, subscription workers (cc @soorajshankar @beerose) - [x] address other minor TODOs - [x] TxIso for `MonadSourceResolver` - [x] enable EKG connection pool metrics - [x] add logging of connection info when sources are added? - [x] confirm if the `buildReason` for schema cache is correct - [ ] testing - [x] 1.3 -> 1.4 cloud migration script (#465; PR: #508) - [x] one-time migration of existing metadata from users' db to centralized PG - [x] one-time migration of pro project config + api limits + regression tests from metrics API to metadata - [ ] integrate with infra team (WIP - cc @hgiasac) - [x] benchmark with 1000+ tenants + each tenant making read/update metadata query every second (PR: https://github.com/hasura/graphql-engine-mono/pull/411) - [ ] benchmark with few tenants having large metadata (100+ tables etc.) - [ ] when user moves regions (https://github.com/hasura/lux/issues/1717) - [ ] metadata has to be migrated from one regional PG to another - [ ] migrate metrics data as well ? - [ ] operation logs - [ ] regression test runs - [ ] find a way to share the schema files with the infra team Co-authored-by: Naveen Naidu <30195193+Naveenaidu@users.noreply.github.com> GitOrigin-RevId: 39e8361f2c0e96e0f9e8f8fb45e6cc14857f31f1
2021-02-11 20:54:25 +03:00
runSetMetricsConfig,
runRemoveMetricsConfig,
module Hasura.RQL.DDL.Metadata.Types,
2018-06-27 16:11:32 +03:00
)
where
import Control.Lens (to, (.~), (^.), (^?))
import Control.Monad.Trans.Control (MonadBaseControl)
import Data.Aeson qualified as J
import Data.Aeson.Ordered qualified as AO
import Data.Attoparsec.Text qualified as AT
import Data.Bifunctor (first)
import Data.Bitraversable
import Data.ByteString.Lazy qualified as BL
import Data.CaseInsensitive qualified as CI
import Data.Environment qualified as Env
import Data.Has (Has, getter)
import Data.HashMap.Strict qualified as Map
import Data.HashMap.Strict.InsOrd.Extended qualified as OMap
import Data.HashSet qualified as HS
import Data.HashSet qualified as Set
import Data.List qualified as L
import Data.List.Extended qualified as L
import Data.SerializableBlob qualified as SB
import Data.Text qualified as T
import Data.Text.Encoding qualified as TE
import Data.Text.Extended (dquoteList, (<<>))
import Hasura.Base.Error
import Hasura.EncJSON
import Hasura.Logging qualified as HL
import Hasura.Metadata.Class
import Hasura.NativeQuery.API
import Hasura.Prelude hiding (first)
import Hasura.RQL.DDL.Action
import Hasura.RQL.DDL.ComputedField
import Hasura.RQL.DDL.CustomTypes
import Hasura.RQL.DDL.Endpoint
import Hasura.RQL.DDL.EventTrigger
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
import Hasura.RQL.DDL.InheritedRoles
import Hasura.RQL.DDL.Metadata.Types
import Hasura.RQL.DDL.Permission
import Hasura.RQL.DDL.Relationship
import Hasura.RQL.DDL.RemoteRelationship
import Hasura.RQL.DDL.ScheduledTrigger
import Hasura.RQL.DDL.Schema
import Hasura.RQL.DDL.Schema.Source
import Hasura.RQL.DDL.Webhook.Transform
import Hasura.RQL.Types.Allowlist
import Hasura.RQL.Types.ApiLimit
import Hasura.RQL.Types.Backend
import Hasura.RQL.Types.Common
import Hasura.RQL.Types.Endpoint
import Hasura.RQL.Types.EventTrigger
import Hasura.RQL.Types.EventTrigger qualified as ET
import Hasura.RQL.Types.Eventing.Backend (BackendEventTrigger (..))
import Hasura.RQL.Types.Metadata
import Hasura.RQL.Types.Metadata.Backend
import Hasura.RQL.Types.Metadata.Object
import Hasura.RQL.Types.Network
import Hasura.RQL.Types.OpenTelemetry
import Hasura.RQL.Types.QueryCollection
import Hasura.RQL.Types.ScheduledTrigger
import Hasura.RQL.Types.SchemaCache
import Hasura.RQL.Types.SchemaCache.Build
import Hasura.RQL.Types.Source (unsafeSourceInfo)
import Hasura.RQL.Types.SourceCustomization
import Hasura.SQL.AnyBackend qualified as AB
import Hasura.SQL.Backend (BackendType (..))
import Hasura.SQL.BackendMap qualified as BackendMap
import Hasura.Server.Logging (MetadataLog (..))
import Network.HTTP.Client.Transformable qualified as HTTP
-- | Helper function to run the post drop source hook
postDropSourceHookHelper ::
( MonadError QErr m,
MonadIO m,
MonadBaseControl IO m,
MonadReader r m,
Has (HL.Logger HL.Hasura) r
) =>
SchemaCache ->
SourceName ->
AB.AnyBackend SourceMetadata ->
m ()
postDropSourceHookHelper oldSchemaCache sourceName sourceMetadataBackend = do
logger :: (HL.Logger HL.Hasura) <- asks getter
AB.dispatchAnyBackend @BackendMetadata sourceMetadataBackend \(_ :: SourceMetadata b) -> do
let sourceInfoMaybe = unsafeSourceInfo @b =<< Map.lookup sourceName (scSources oldSchemaCache)
case sourceInfoMaybe of
Nothing ->
HL.unLogger logger $
MetadataLog
HL.LevelWarn
( "Could not cleanup the source '"
<> sourceName
<<> "' while dropping it from the graphql-engine as it is inconsistent."
<> " Please consider cleaning the resources created by the graphql engine,"
<> " refer https://hasura.io/docs/latest/graphql/core/event-triggers/remove-event-triggers/#clean-footprints-manually "
)
J.Null
Just sourceInfo -> runPostDropSourceHook defaultSource sourceInfo
runClearMetadata ::
forall m r.
( MonadIO m,
CacheRWM m,
MetadataM m,
MonadMetadataStorageQueryAPI m,
MonadBaseControl IO m,
MonadReader r m,
MonadError QErr m,
Has (HL.Logger HL.Hasura) r,
MonadEventLogCleanup m
) =>
ClearMetadata ->
m EncJSON
runClearMetadata _ = do
metadata <- getMetadata
oldSchemaCache <- askSchemaCache
-- We can infer whether the server is started with `--database-url` option
-- (or corresponding env variable) by checking the existence of @'defaultSource'
-- in current metadata.
let maybeDefaultSourceMetadata = metadata ^? metaSources . ix defaultSource . to unBackendSourceMetadata
emptyMetadata' = case maybeDefaultSourceMetadata of
Nothing -> emptyMetadata
Just exists ->
-- If default postgres source is defined, we need to set metadata
-- which contains only default source without any tables and functions.
let emptyDefaultSource =
AB.dispatchAnyBackend @Backend exists \(s :: SourceMetadata b) ->
BackendSourceMetadata $
AB.mkAnyBackend @b $
SourceMetadata
@b
defaultSource
(_smKind @b s)
mempty
mempty
mempty
(_smConfiguration @b s)
Nothing
emptySourceCustomization
Nothing
in emptyMetadata
& metaSources %~ OMap.insert defaultSource emptyDefaultSource
resp <- runReplaceMetadataV1 $ RMWithSources emptyMetadata'
-- Cleanup the default source explicitly because in the `runReplaceMetadataV1`
-- call it won't be considered as a dropped source because we artificially add
-- a empty source metadata for the default source metadata. So, for `runReplaceMetadataV1`
-- the default source will not be a dropped source and hence there will not be
-- any post drop source action on it. So, here we expicitly do the post drop source
-- action for the default source, if it existed in the metadata.
for_ maybeDefaultSourceMetadata $ postDropSourceHookHelper oldSchemaCache defaultSource
pure resp
{- Note [Cleanup for dropped triggers]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
There was an issue (https://github.com/hasura/graphql-engine/issues/5461)
fixed (via https://github.com/hasura/graphql-engine/pull/6137) related to
event triggers while replacing metadata in the catalog prior to metadata
separation. The metadata separation solves the issue naturally, since the
'hdb_catalog.event_triggers' table is no more in use and new/updated event
triggers are processed in building schema cache. But we need to drop the
database trigger and archive events for dropped event triggers. This is handled
explicitly in @'runReplaceMetadata' function.
-}
2018-06-27 16:11:32 +03:00
-- | Replace the 'current metadata' with the 'new metadata'
-- The 'new metadata' might come via the 'Import Metadata' in console
runReplaceMetadata ::
( CacheRWM m,
MetadataM m,
MonadIO m,
MonadBaseControl IO m,
MonadMetadataStorageQueryAPI m,
MonadReader r m,
MonadError QErr m,
Has (HL.Logger HL.Hasura) r,
MonadEventLogCleanup m
) =>
ReplaceMetadata ->
m EncJSON
runReplaceMetadata = \case
RMReplaceMetadataV1 v1args -> runReplaceMetadataV1 v1args
RMReplaceMetadataV2 v2args -> runReplaceMetadataV2 v2args
runReplaceMetadataV1 ::
( CacheRWM m,
MetadataM m,
MonadIO m,
MonadBaseControl IO m,
MonadMetadataStorageQueryAPI m,
MonadReader r m,
MonadError QErr m,
Has (HL.Logger HL.Hasura) r,
MonadEventLogCleanup m
) =>
ReplaceMetadataV1 ->
m EncJSON
runReplaceMetadataV1 =
(successMsg <$) . runReplaceMetadataV2 . ReplaceMetadataV2 NoAllowInconsistentMetadata
runReplaceMetadataV2 ::
forall m r.
( CacheRWM m,
MetadataM m,
MonadIO m,
MonadBaseControl IO m,
MonadMetadataStorageQueryAPI m,
MonadReader r m,
MonadError QErr m,
Has (HL.Logger HL.Hasura) r,
MonadEventLogCleanup m
) =>
ReplaceMetadataV2 ->
m EncJSON
runReplaceMetadataV2 ReplaceMetadataV2 {..} = do
logger :: (HL.Logger HL.Hasura) <- asks getter
-- we drop all the future cron trigger events before inserting the new metadata
-- and re-populating future cron events below
let introspectionDisabledRoles =
case _rmv2Metadata of
RMWithSources m -> _metaSetGraphqlIntrospectionOptions m
RMWithoutSources _ -> mempty
oldMetadata <- getMetadata
oldSchemaCache <- askSchemaCache
(cronTriggersMetadata, cronTriggersToBeAdded) <- processCronTriggers oldMetadata
metadata <- case _rmv2Metadata of
RMWithSources m -> pure $ m {_metaCronTriggers = cronTriggersMetadata}
RMWithoutSources MetadataNoSources {..} -> do
let maybeDefaultSourceMetadata = oldMetadata ^? metaSources . ix defaultSource . toSourceMetadata
defaultSourceMetadata <-
onNothing maybeDefaultSourceMetadata $
throw400 NotSupported "cannot import metadata without sources since no default source is defined"
let newDefaultSourceMetadata =
BackendSourceMetadata $
AB.mkAnyBackend
defaultSourceMetadata
{ _smTables = _mnsTables,
_smFunctions = _mnsFunctions
}
pure $
Metadata
(OMap.singleton defaultSource newDefaultSourceMetadata)
_mnsRemoteSchemas
_mnsQueryCollections
_mnsAllowlist
_mnsCustomTypes
_mnsActions
cronTriggersMetadata
(_metaRestEndpoints oldMetadata)
emptyApiLimit
emptyMetricsConfig
mempty
introspectionDisabledRoles
emptyNetwork
mempty
emptyOpenTelemetryConfig
let (oldSources, newSources) = (_metaSources oldMetadata, _metaSources metadata)
-- Check for duplicate trigger names in the new source metadata
for_ (OMap.toList newSources) $ \(source, newBackendSourceMetadata) -> do
for_ (OMap.lookup source oldSources) $ \_oldBackendSourceMetadata ->
dispatch newBackendSourceMetadata \(newSourceMetadata :: SourceMetadata b) -> do
let newTriggerNames = concatMap (OMap.keys . _tmEventTriggers) (OMap.elems $ _smTables newSourceMetadata)
duplicateTriggerNamesInNewMetadata = newTriggerNames \\ (L.uniques newTriggerNames)
unless (null duplicateTriggerNamesInNewMetadata) $ do
throw400 NotSupported ("Event trigger with duplicate names not allowed: " <> dquoteList (map triggerNameToTxt duplicateTriggerNamesInNewMetadata))
let cacheInvalidations =
CacheInvalidations
{ ciMetadata = False,
ciRemoteSchemas = mempty,
ciSources = Set.fromList $ OMap.keys newSources,
ciDataConnectors = mempty
}
-- put the new metadata in the state managed by the `MetadataT`
putMetadata metadata
-- build the schema cache with the new metadata
buildSchemaCacheWithInvalidations cacheInvalidations mempty
case _rmv2AllowInconsistentMetadata of
AllowInconsistentMetadata -> pure ()
NoAllowInconsistentMetadata -> throwOnInconsistencies
-- populate future cron events for all the new cron triggers that are imported
for_ cronTriggersToBeAdded $ \CronTriggerMetadata {..} ->
populateInitialCronTriggerEvents ctSchedule ctName
-- See Note [Cleanup for dropped triggers]
dropSourceSQLTriggers logger oldSchemaCache (_metaSources oldMetadata) (_metaSources metadata)
newSchemaCache <- askSchemaCache
updateTriggerCleanupSchedules logger (_metaSources oldMetadata) (_metaSources metadata) newSchemaCache
>>= (`onLeft` throwError)
let droppedSources = OMap.difference oldSources newSources
-- Clean up the sources that are not present in the new metadata
for_ (OMap.toList droppedSources) $ \(oldSource, oldSourceBackendMetadata) ->
postDropSourceHookHelper oldSchemaCache oldSource (unBackendSourceMetadata oldSourceBackendMetadata)
pure . encJFromJValue . formatInconsistentObjs . scInconsistentObjs $ newSchemaCache
where
{- Note [Cron triggers behaviour with replace metadata]
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When the metadata is replaced, we delete only the cron triggers
that were deleted, instead of deleting all the old cron triggers (which
existed in the metadata before it was replaced) and inserting all the
new cron triggers. This is done this way, because when a cron trigger is
dropped, the cron events associated with it will also be dropped from the DB
and when a new cron trigger is added, new cron events are generated by the
graphql-engine. So, this way we only delete and insert the data which has been changed.
The cron triggers that were deleted is calculated by getting a diff
of the old cron triggers and the new cron triggers. Note that we don't just
check the name of the trigger to calculate the diff, the whole cron trigger
definition is considered in the calculation.
Note: Only cron triggers with `include_in_metadata` set to `true` can be updated/deleted
via the replace metadata API. Cron triggers with `include_in_metadata` can only be modified
via the `create_cron_trigger` and `delete_cron_trigger` APIs.
-}
processCronTriggers oldMetadata = do
let (oldCronTriggersIncludedInMetadata, oldCronTriggersNotIncludedInMetadata) =
OMap.partition ctIncludeInMetadata (_metaCronTriggers oldMetadata)
allNewCronTriggers =
case _rmv2Metadata of
RMWithoutSources m -> _mnsCronTriggers m
RMWithSources m -> _metaCronTriggers m
-- this function is intended to use with `Map.differenceWith`, it's used when two
-- equal keys are encountered, then the values are compared to calculate the diff.
-- see https://hackage.haskell.org/package/unordered-containers-0.2.14.0/docs/Data-HashMap-Internal.html#v:differenceWith
leftIfDifferent l r
| l == r = Nothing
| otherwise = Just l
cronTriggersToBeAdded =
Map.differenceWith
leftIfDifferent
(OMap.toHashMap allNewCronTriggers)
(OMap.toHashMap oldCronTriggersIncludedInMetadata)
cronTriggersToBeDropped =
Map.differenceWith
leftIfDifferent
(OMap.toHashMap oldCronTriggersIncludedInMetadata)
(OMap.toHashMap allNewCronTriggers)
liftEitherM $ dropFutureCronEvents $ MetadataCronTriggers $ Map.keys cronTriggersToBeDropped
cronTriggers <- do
-- traverse over the new cron triggers and check if any of them
-- already exists as a cron trigger with "included_in_metadata: false"
for_ allNewCronTriggers $ \ct ->
when (ctName ct `OMap.member` oldCronTriggersNotIncludedInMetadata) $
throw400 AlreadyExists $
"cron trigger with name "
<> ctName ct
<<> " already exists as a cron trigger with \"included_in_metadata\" as false"
-- we add the old cron triggers with included_in_metadata set to false with the
-- newly added cron triggers
pure $ allNewCronTriggers <> oldCronTriggersNotIncludedInMetadata
pure $ (cronTriggers, cronTriggersToBeAdded)
dropSourceSQLTriggers ::
HL.Logger HL.Hasura ->
SchemaCache ->
InsOrdHashMap SourceName BackendSourceMetadata ->
InsOrdHashMap SourceName BackendSourceMetadata ->
m ()
dropSourceSQLTriggers (HL.Logger logger) oldSchemaCache oldSources newSources = do
-- NOTE: the current implementation of this function has an edge case.
-- The edge case is that when a `SourceA` which contained some event triggers
-- is modified to point to a new database, this function will try to drop the
-- SQL triggers of the dropped event triggers on the new database which doesn't exist.
-- In the current implementation, this doesn't throw an error because the trigger is dropped
-- using `DROP IF EXISTS..` meaning this silently fails without throwing an error.
for_ (OMap.toList newSources) $ \(source, newBackendSourceMetadata) -> do
for_ (OMap.lookup source oldSources) $ \oldBackendSourceMetadata ->
compose source (unBackendSourceMetadata newBackendSourceMetadata) (unBackendSourceMetadata oldBackendSourceMetadata) \(newSourceMetadata :: SourceMetadata b) -> do
dispatch oldBackendSourceMetadata \oldSourceMetadata -> do
let oldTriggersMap = getTriggersMap oldSourceMetadata
newTriggersMap = getTriggersMap newSourceMetadata
droppedEventTriggers = OMap.keys $ oldTriggersMap `OMap.difference` newTriggersMap
retainedNewTriggers = newTriggersMap `OMap.intersection` oldTriggersMap
catcher e@QErr {qeCode}
| qeCode == Unexpected = pure () -- NOTE: This information should be returned by the inconsistent_metadata response, so doesn't need additional logging.
| otherwise = throwError e -- rethrow other errors
-- This will swallow Unexpected exceptions for sources if allow_inconsistent_metadata is enabled
-- This should be ok since if the sources are already missing from the cache then they should
-- not need to be removed.
--
-- TODO: Determine if any errors should be thrown from askSourceConfig at all if the errors are just being discarded
return $
flip catchError catcher do
sourceConfigMaybe <- askSourceConfigMaybe @b source
case sourceConfigMaybe of
Nothing ->
-- TODO: Add user facing docs on how to drop triggers manually. Issue #7104
logger $
MetadataLog
HL.LevelWarn
( "Could not drop SQL triggers present in the source '"
<> source
<<> "' as it is inconsistent."
<> " While creating an event trigger, Hasura creates SQL triggers on the table."
<> " Please refer https://hasura.io/docs/latest/graphql/core/event-triggers/remove-event-triggers/#clean-up-event-trigger-footprints-manually "
<> " to delete the sql triggers from the database manually."
<> " For more details, please refer https://hasura.io/docs/latest/graphql/core/event-triggers/index.html "
)
J.Null
Just sourceConfig -> do
for_ droppedEventTriggers $
\triggerName -> do
-- TODO: The `tableName` parameter could be computed while building
-- the triggers map and avoid the cache lookup.
tableNameMaybe <- getTableNameFromTrigger @b oldSchemaCache source triggerName
case tableNameMaybe of
Nothing ->
logger $
MetadataLog
HL.LevelWarn
(sqlTriggerError triggerName)
J.Null
Just tableName ->
dropTriggerAndArchiveEvents @b sourceConfig triggerName tableName
for_ (OMap.toList retainedNewTriggers) $ \(retainedNewTriggerName, retainedNewTriggerConf) ->
case OMap.lookup retainedNewTriggerName oldTriggersMap of
Nothing ->
logger $
MetadataLog
HL.LevelWarn
(sqlTriggerError retainedNewTriggerName)
J.Null
Just oldTriggerConf -> do
let newTriggerOps = etcDefinition retainedNewTriggerConf
oldTriggerOps = etcDefinition oldTriggerConf
isDroppedOp old new = isJust old && isNothing new
droppedOps =
[ (bool Nothing (Just INSERT) (isDroppedOp (tdInsert oldTriggerOps) (tdInsert newTriggerOps))),
(bool Nothing (Just UPDATE) (isDroppedOp (tdUpdate oldTriggerOps) (tdUpdate newTriggerOps))),
(bool Nothing (Just ET.DELETE) (isDroppedOp (tdDelete oldTriggerOps) (tdDelete newTriggerOps)))
]
tableNameMaybe <- getTableNameFromTrigger @b oldSchemaCache source retainedNewTriggerName
case tableNameMaybe of
Nothing ->
logger $
MetadataLog
HL.LevelWarn
(sqlTriggerError retainedNewTriggerName)
J.Null
Just tableName ->
dropDanglingSQLTrigger @b sourceConfig retainedNewTriggerName tableName (Set.fromList $ catMaybes droppedOps)
where
compose ::
SourceName ->
AB.AnyBackend i ->
AB.AnyBackend i ->
(forall b. BackendEventTrigger b => i b -> i b -> m ()) ->
m ()
compose sourceName x y f = AB.composeAnyBackend @BackendEventTrigger f x y (logger $ HL.UnstructuredLog HL.LevelInfo $ SB.fromText $ "Event trigger clean up couldn't be done on the source " <> sourceName <<> " because it has changed its type")
sqlTriggerError :: TriggerName -> Text
sqlTriggerError triggerName =
( "Could not drop SQL triggers associated with event trigger '"
<> triggerName
<<> "'. While creating an event trigger, Hasura creates SQL triggers on the table."
<> " Please refer https://hasura.io/docs/latest/graphql/core/event-triggers/remove-event-triggers/#clean-up-event-trigger-footprints-manually "
<> " to delete the sql triggers from the database manually."
<> " For more details, please refer https://hasura.io/docs/latest/graphql/core/event-triggers/index.html "
)
dispatch (BackendSourceMetadata bs) = AB.dispatchAnyBackend @BackendEventTrigger bs
-- | Only includes the cron triggers with `included_in_metadata` set to `True`
processCronTriggersMetadata :: Metadata -> Metadata
processCronTriggersMetadata metadata =
let cronTriggersIncludedInMetadata = OMap.filter ctIncludeInMetadata $ _metaCronTriggers metadata
in metadata {_metaCronTriggers = cronTriggersIncludedInMetadata}
runExportMetadata ::
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
forall m.
(QErrM m, MetadataM m) =>
ExportMetadata ->
m EncJSON
runExportMetadata ExportMetadata {} =
encJFromOrderedValue . metadataToOrdJSON <$> (processCronTriggersMetadata <$> getMetadata)
runExportMetadataV2 ::
[Preview] Inherited roles for postgres read queries fixes #3868 docker image - `hasura/graphql-engine:inherited-roles-preview-48b73a2de` Note: To be able to use the inherited roles feature, the graphql-engine should be started with the env variable `HASURA_GRAPHQL_EXPERIMENTAL_FEATURES` set to `inherited_roles`. Introduction ------------ This PR implements the idea of multiple roles as presented in this [paper](https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/FGALanguageICDE07.pdf). The multiple roles feature in this PR can be used via inherited roles. An inherited role is a role which can be created by combining multiple singular roles. For example, if there are two roles `author` and `editor` configured in the graphql-engine, then we can create a inherited role with the name of `combined_author_editor` role which will combine the select permissions of the `author` and `editor` roles and then make GraphQL queries using the `combined_author_editor`. How are select permissions of different roles are combined? ------------------------------------------------------------ A select permission includes 5 things: 1. Columns accessible to the role 2. Row selection filter 3. Limit 4. Allow aggregation 5. Scalar computed fields accessible to the role Suppose there are two roles, `role1` gives access to the `address` column with row filter `P1` and `role2` gives access to both the `address` and the `phone` column with row filter `P2` and we create a new role `combined_roles` which combines `role1` and `role2`. Let's say the following GraphQL query is queried with the `combined_roles` role. ```graphql query { employees { address phone } } ``` This will translate to the following SQL query: ```sql select (case when (P1 or P2) then address else null end) as address, (case when P2 then phone else null end) as phone from employee where (P1 or P2) ``` The other parameters of the select permission will be combined in the following manner: 1. Limit - Minimum of the limits will be the limit of the inherited role 2. Allow aggregations - If any of the role allows aggregation, then the inherited role will allow aggregation 3. Scalar computed fields - same as table column fields, as in the above example APIs for inherited roles: ---------------------- 1. `add_inherited_role` `add_inherited_role` is the [metadata API](https://hasura.io/docs/1.0/graphql/core/api-reference/index.html#schema-metadata-api) to create a new inherited role. It accepts two arguments `role_name`: the name of the inherited role to be added (String) `role_set`: list of roles that need to be combined (Array of Strings) Example: ```json { "type": "add_inherited_role", "args": { "role_name":"combined_user", "role_set":[ "user", "user1" ] } } ``` After adding the inherited role, the inherited role can be used like single roles like earlier Note: An inherited role can only be created with non-inherited/singular roles. 2. `drop_inherited_role` The `drop_inherited_role` API accepts the name of the inherited role and drops it from the metadata. It accepts a single argument: `role_name`: name of the inherited role to be dropped Example: ```json { "type": "drop_inherited_role", "args": { "role_name":"combined_user" } } ``` Metadata --------- The derived roles metadata will be included under the `experimental_features` key while exporting the metadata. ```json { "experimental_features": { "derived_roles": [ { "role_name": "manager_is_employee_too", "role_set": [ "employee", "manager" ] } ] } } ``` Scope ------ Only postgres queries and subscriptions are supported in this PR. Important points: ----------------- 1. All columns exposed to an inherited role will be marked as `nullable`, this is done so that cell value nullification can be done. TODOs ------- - [ ] Tests - [ ] Test a GraphQL query running with a inherited role without enabling inherited roles in experimental features - [] Tests for aggregate queries, limit, computed fields, functions, subscriptions (?) - [ ] Introspection test with a inherited role (nullability changes in a inherited role) - [ ] Docs - [ ] Changelog Co-authored-by: Vamshi Surabhi <6562944+0x777@users.noreply.github.com> GitOrigin-RevId: 3b8ee1e11f5ceca80fe294f8c074d42fbccfec63
2021-03-08 14:14:13 +03:00
forall m.
(QErrM m, MetadataM m) =>
MetadataResourceVersion ->
ExportMetadata ->
m EncJSON
runExportMetadataV2 currentResourceVersion ExportMetadata {} = do
exportMetadata <- processCronTriggersMetadata <$> getMetadata
pure $
encJFromOrderedValue $
AO.object
[ ("resource_version", AO.toOrdered currentResourceVersion),
("metadata", metadataToOrdJSON exportMetadata)
]
runReloadMetadata :: (QErrM m, CacheRWM m, MetadataM m) => ReloadMetadata -> m EncJSON
runReloadMetadata (ReloadMetadata reloadRemoteSchemas reloadSources reloadRecreateEventTriggers reloadDataConnectors) = do
metadata <- getMetadata
let allSources = HS.fromList $ OMap.keys $ _metaSources metadata
allRemoteSchemas = HS.fromList $ OMap.keys $ _metaRemoteSchemas metadata
allDataConnectors =
maybe mempty (HS.fromList . OMap.keys . unBackendConfigWrapper) $
BackendMap.lookup @'DataConnector $
_metaBackendConfigs metadata
checkRemoteSchema name =
unless (HS.member name allRemoteSchemas) $
throw400 NotExists $
"Remote schema with name " <> name <<> " not found in metadata"
checkSource name =
unless (HS.member name allSources) $
throw400 NotExists $
"Source with name " <> name <<> " not found in metadata"
checkDataConnector name =
unless (HS.member name allDataConnectors) $
throw400 NotExists $
"Data connector with name " <> name <<> " not found in metadata"
remoteSchemaInvalidations <- case reloadRemoteSchemas of
RSReloadAll -> pure allRemoteSchemas
RSReloadList l -> mapM_ checkRemoteSchema l *> pure l
sourcesInvalidations <- case reloadSources of
RSReloadAll -> pure allSources
RSReloadList l -> mapM_ checkSource l *> pure l
recreateEventTriggersSources <- case reloadRecreateEventTriggers of
RSReloadAll -> pure allSources
RSReloadList l -> mapM_ checkSource l *> pure l
dataConnectorInvalidations <- case reloadDataConnectors of
RSReloadAll -> pure allDataConnectors
RSReloadList l -> mapM_ checkDataConnector l *> pure l
let cacheInvalidations =
CacheInvalidations
{ ciMetadata = True,
ciRemoteSchemas = remoteSchemaInvalidations,
ciSources = sourcesInvalidations,
ciDataConnectors = dataConnectorInvalidations
}
buildSchemaCacheWithOptions (CatalogUpdate $ Just recreateEventTriggersSources) cacheInvalidations metadata
inconsObjs <- scInconsistentObjs <$> askSchemaCache
pure . encJFromJValue . J.object $
[ "message" J..= ("success" :: Text),
"is_consistent" J..= null inconsObjs
]
<> ["inconsistent_objects" J..= inconsObjs | not (null inconsObjs)]
runDumpInternalState ::
(QErrM m, CacheRM m) =>
DumpInternalState ->
m EncJSON
runDumpInternalState _ =
encJFromJValue <$> askSchemaCache
runGetInconsistentMetadata ::
(QErrM m, CacheRM m) =>
GetInconsistentMetadata ->
m EncJSON
runGetInconsistentMetadata _ = do
inconsObjs <- scInconsistentObjs <$> askSchemaCache
return $ encJFromJValue $ formatInconsistentObjs inconsObjs
formatInconsistentObjs :: [InconsistentMetadata] -> J.Value
formatInconsistentObjs inconsObjs =
J.object
[ "is_consistent" J..= null inconsObjs,
"inconsistent_objects" J..= inconsObjs
]
runDropInconsistentMetadata ::
(QErrM m, CacheRWM m, MetadataM m) =>
DropInconsistentMetadata ->
m EncJSON
runDropInconsistentMetadata _ = do
sc <- askSchemaCache
let inconsSchObjs = L.nub . concatMap imObjectIds $ scInconsistentObjs sc
-- Note: when building the schema cache, we try to put dependents after their dependencies in the
-- list of inconsistent objects, so reverse the list to start with dependents first. This is not
-- perfect — a completely accurate solution would require performing a topological sort — but it
-- seems to work well enough for now.
MetadataModifier {..} <- execWriterT $ mapM_ (tell . purgeMetadataObj) (reverse inconsSchObjs)
metadata <- getMetadata
putMetadata $ runMetadataModifier metadata
buildSchemaCache mempty
-- after building the schema cache, we need to check the inconsistent metadata, if any
-- are only those which are not droppable
newInconsistentObjects <- scInconsistentObjs <$> askSchemaCache
let droppableInconsistentObjects = filter droppableInconsistentMetadata newInconsistentObjects
unless (null droppableInconsistentObjects) $
throwError
(err400 Unexpected "cannot continue due to new inconsistent metadata")
{ qeInternal = Just $ ExtraInternal $ J.toJSON newInconsistentObjects
}
return successMsg
purgeMetadataObj :: MetadataObjId -> MetadataModifier
purgeMetadataObj = \case
MOSource source -> MetadataModifier $ metaSources %~ OMap.delete source
MOSourceObjId source exists -> AB.dispatchAnyBackend @BackendMetadata exists $ handleSourceObj source
MORemoteSchema rsn -> dropRemoteSchemaInMetadata rsn
MORemoteSchemaPermissions rsName role -> dropRemoteSchemaPermissionInMetadata rsName role
MORemoteSchemaRemoteRelationship rsName typeName relName ->
dropRemoteSchemaRemoteRelationshipInMetadata rsName typeName relName
MOCustomTypes -> clearCustomTypesInMetadata
MOAction action -> dropActionInMetadata action -- Nothing
MOActionPermission action role -> dropActionPermissionInMetadata action role
MOCronTrigger ctName -> dropCronTriggerInMetadata ctName
MOEndpoint epName -> dropEndpointInMetadata epName
MOInheritedRole role -> dropInheritedRoleInMetadata role
MOQueryCollectionsQuery cName lq -> dropListedQueryFromQueryCollections cName lq
MODataConnectorAgent agentName ->
MetadataModifier $
metaBackendConfigs
%~ BackendMap.modify @'DataConnector (BackendConfigWrapper . OMap.delete agentName . unBackendConfigWrapper)
MOOpenTelemetry subobject ->
case subobject of
OtelSubobjectAll ->
MetadataModifier $ metaOpenTelemetryConfig .~ emptyOpenTelemetryConfig
OtelSubobjectExporterOtlp ->
MetadataModifier $ metaOpenTelemetryConfig . ocExporterOtlp .~ defaultOtelExporterConfig
OtelSubobjectBatchSpanProcessor ->
MetadataModifier $ metaOpenTelemetryConfig . ocBatchSpanProcessor .~ defaultOtelBatchSpanProcessorConfig
where
handleSourceObj :: forall b. BackendMetadata b => SourceName -> SourceMetadataObjId b -> MetadataModifier
handleSourceObj source = \case
SMOTable qt -> dropTableInMetadata @b source qt
SMOFunction qf -> dropFunctionInMetadata @b source qf
SMOFunctionPermission qf rn -> dropFunctionPermissionInMetadata @b source qf rn
SMONativeQuery nq -> dropNativeQueryInMetadata @b source nq
SMOTableObj qt tableObj ->
MetadataModifier $
tableMetadataSetter @b source qt %~ case tableObj of
MTORel rn _ -> dropRelationshipInMetadata rn
MTOPerm rn pt -> dropPermissionInMetadata rn pt
MTOTrigger trn -> dropEventTriggerInMetadata trn
MTOComputedField ccn -> dropComputedFieldInMetadata ccn
MTORemoteRelationship rn -> dropRemoteRelationshipInMetadata rn
dropListedQueryFromQueryCollections :: CollectionName -> ListedQuery -> MetadataModifier
dropListedQueryFromQueryCollections cName lq = MetadataModifier $ removeAndCleanupMetadata
where
removeAndCleanupMetadata m =
let newQueryCollection = filteredCollection (_metaQueryCollections m)
-- QueryCollections = InsOrdHashMap CollectionName CreateCollection
filteredCollection :: QueryCollections -> QueryCollections
filteredCollection qc = OMap.filter (isNonEmptyCC) $ OMap.adjust (collectionModifier) (cName) qc
collectionModifier :: CreateCollection -> CreateCollection
collectionModifier cc@CreateCollection {..} =
cc
{ _ccDefinition =
let oldQueries = _cdQueries _ccDefinition
in _ccDefinition
{ _cdQueries = filter (/= lq) oldQueries
}
}
isNonEmptyCC :: CreateCollection -> Bool
isNonEmptyCC = not . null . _cdQueries . _ccDefinition
cleanupAllowList :: MetadataAllowlist -> MetadataAllowlist
cleanupAllowList = OMap.filterWithKey (\_ _ -> OMap.member cName newQueryCollection)
cleanupRESTEndpoints :: Endpoints -> Endpoints
cleanupRESTEndpoints endpoints = OMap.filter (not . isFaultyQuery . _edQuery . _ceDefinition) endpoints
isFaultyQuery :: QueryReference -> Bool
isFaultyQuery QueryReference {..} = _qrCollectionName == cName && _qrQueryName == (_lqName lq)
in m
{ _metaQueryCollections = newQueryCollection,
_metaAllowlist = cleanupAllowList (_metaAllowlist m),
_metaRestEndpoints = cleanupRESTEndpoints (_metaRestEndpoints m)
}
runGetCatalogState ::
(MonadMetadataStorageQueryAPI m, MonadError QErr m) => GetCatalogState -> m EncJSON
runGetCatalogState _ =
encJFromJValue <$> liftEitherM fetchCatalogState
runSetCatalogState ::
(MonadMetadataStorageQueryAPI m, MonadError QErr m) => SetCatalogState -> m EncJSON
runSetCatalogState SetCatalogState {..} = do
liftEitherM $ updateCatalogState _scsType _scsState
pure successMsg
server: multitenant metadata storage The metadata storage implementation for graphql-engine-multitenant. - It uses a centralized PG database to store metadata of all tenants (instead of per tenant database) - Similarly, it uses a single schema-sync listener thread per MT worker (instead of listener thread per tenant) (PS: although, the processor thread is spawned per tenant) - 2 new flags are introduced - `--metadataDatabaseUrl` and (optional) `--metadataDatabaseRetries` Internally, a "metadata mode" is introduced to indicate an external/managed store vs a store managed by each pro-server. To run : - obtain the schema file (located at `pro/server/res/cloud/metadata_db_schema.sql`) - apply the schema on a PG database - set the `--metadataDatabaseUrl` flag to point to the above database - run the MT executable The schema (and its migrations) for the metadata db is managed outside the MT worker. ### New metadata The following is the new portion of `Metadata` added : ```yaml version: 3 metrics_config: analyze_query_variables: true analyze_response_body: false api_limits: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` - In Pro, the code around fetching/updating/syncing pro-config is removed - That also means, `hdb_pro_catalog` for keeping the config cache is not required. Hence the `hdb_pro_catalog` is also removed - The required config comes from metadata / schema cache ### New Metadata APIs - `set_api_limits` - `remove_api_limits` - `set_metrics_config` - `remove_metrics_config` #### `set_api_limits` ```yaml type: set_api_limits args: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: anonymous: max_reqs_per_min: 10 unique_params: "ip" editor: max_reqs_per_min: 30 unique_params: - x-hasura-user-id user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` #### `remove_api_limits` ```yaml type: remove_api_limits args: {} ``` #### `set_metrics_config` ```yaml type: set_metrics_config args: analyze_query_variables: true analyze_response_body: false ``` #### `remove_metrics_config` ```yaml type: remove_metrics_config args: {} ``` #### TODO - [x] on-prem pro implementation for `MonadMetadataStorage` - [x] move the project config from Lux to pro metadata (PR: #379) - [ ] console changes for pro config/api limits, subscription workers (cc @soorajshankar @beerose) - [x] address other minor TODOs - [x] TxIso for `MonadSourceResolver` - [x] enable EKG connection pool metrics - [x] add logging of connection info when sources are added? - [x] confirm if the `buildReason` for schema cache is correct - [ ] testing - [x] 1.3 -> 1.4 cloud migration script (#465; PR: #508) - [x] one-time migration of existing metadata from users' db to centralized PG - [x] one-time migration of pro project config + api limits + regression tests from metrics API to metadata - [ ] integrate with infra team (WIP - cc @hgiasac) - [x] benchmark with 1000+ tenants + each tenant making read/update metadata query every second (PR: https://github.com/hasura/graphql-engine-mono/pull/411) - [ ] benchmark with few tenants having large metadata (100+ tables etc.) - [ ] when user moves regions (https://github.com/hasura/lux/issues/1717) - [ ] metadata has to be migrated from one regional PG to another - [ ] migrate metrics data as well ? - [ ] operation logs - [ ] regression test runs - [ ] find a way to share the schema files with the infra team Co-authored-by: Naveen Naidu <30195193+Naveenaidu@users.noreply.github.com> GitOrigin-RevId: 39e8361f2c0e96e0f9e8f8fb45e6cc14857f31f1
2021-02-11 20:54:25 +03:00
runSetMetricsConfig ::
(MonadIO m, CacheRWM m, MetadataM m, MonadError QErr m) =>
MetricsConfig ->
m EncJSON
runSetMetricsConfig mc = do
withNewInconsistentObjsCheck $
buildSchemaCache $
MetadataModifier $
metaMetricsConfig .~ mc
pure successMsg
runRemoveMetricsConfig ::
(MonadIO m, CacheRWM m, MetadataM m, MonadError QErr m) =>
m EncJSON
runRemoveMetricsConfig = do
withNewInconsistentObjsCheck $
buildSchemaCache $
MetadataModifier $
metaMetricsConfig .~ emptyMetricsConfig
pure successMsg
data TestTransformError
= RequestInitializationError HTTP.HttpException
| RequestTransformationError HTTP.Request TransformErrorBundle
runTestWebhookTransform ::
(QErrM m) =>
TestWebhookTransform ->
m EncJSON
runTestWebhookTransform (TestWebhookTransform env headers urlE payload rt _ sv) = do
url <- case urlE of
URL url' -> interpolateFromEnv env url'
EnvVar var ->
let err = throwError $ err400 NotFound "Missing Env Var"
in maybe err (pure . T.pack) $ Env.lookupEnv env var
headers' <- traverse (traverse (fmap TE.encodeUtf8 . interpolateFromEnv env . TE.decodeUtf8)) headers
result <- runExceptT $ do
initReq <- hoistEither $ first RequestInitializationError $ HTTP.mkRequestEither url
let req = initReq & HTTP.body .~ pure (J.encode payload) & HTTP.headers .~ headers'
reqTransform = requestFields rt
engine = templateEngine rt
reqTransformCtx = fmap mkRequestContext $ mkReqTransformCtx url sv engine
hoistEither $ first (RequestTransformationError req) $ applyRequestTransform reqTransformCtx reqTransform req
case result of
Right transformed ->
packTransformResult $ Right transformed
Left (RequestTransformationError _ err) -> packTransformResult (Left err)
-- NOTE: In the following case we have failed before producing a valid request.
Left (RequestInitializationError err) ->
let errorBundle =
TransformErrorBundle $
pure $
J.object ["error_code" J..= J.String "Request Initialization Error", "message" J..= J.String (tshow err)]
in throw400WithDetail ValidationFailed "request transform validation failed" $ J.toJSON errorBundle
interpolateFromEnv :: MonadError QErr m => Env.Environment -> Text -> m Text
interpolateFromEnv env url =
case AT.parseOnly parseEnvTemplate url of
Left _ -> throwError $ err400 ParseFailed "Invalid Url Template"
Right xs ->
let lookup' var = maybe (Left var) (Right . T.pack) $ Env.lookupEnv env (T.unpack var)
result = traverse (fmap indistinct . bitraverse lookup' pure) xs
err e =
throwError $
err400 NotFound $
"Missing Env Var: "
<> e
<> ". For security reasons when testing request options real environment variable values are not available. Please enter a mock value for "
<> e
<> " in the Sample Env Variables list. See https://hasura.io/docs/latest/graphql/core/actions/rest-connectors/#action-transforms-sample-context"
in either err (pure . fold) result
-- | Deserialize a JSON or X-WWW-URL-FORMENCODED body from an
-- 'HTTP.Request' as 'J.Value'.
decodeBody :: Maybe BL.ByteString -> J.Value
decodeBody Nothing = J.Null
decodeBody (Just bs) = fromMaybe J.Null $ jsonToValue bs <|> formUrlEncodedToValue bs
-- | Attempt to encode a 'ByteString' as an Aeson 'Value'
jsonToValue :: BL.ByteString -> Maybe J.Value
jsonToValue bs = J.decode bs
-- | Quote a 'ByteString' then attempt to encode it as a JSON
-- String. This is necessary for 'x-www-url-formencoded' bodies. They
-- are a list of key/value pairs encoded as a raw 'ByteString' with no
-- quoting whereas JSON Strings must be quoted.
formUrlEncodedToValue :: BL.ByteString -> Maybe J.Value
formUrlEncodedToValue bs = J.decode ("\"" <> bs <> "\"")
parseEnvTemplate :: AT.Parser [Either T.Text T.Text]
parseEnvTemplate = AT.many1 $ pEnv <|> pLit <|> fmap Right "{"
where
pEnv = fmap (Left) $ "{{" *> AT.takeWhile1 (/= '}') <* "}}"
pLit = fmap Right $ AT.takeWhile1 (/= '{')
indistinct :: Either a a -> a
indistinct = either id id
packTransformResult :: (MonadError QErr m) => Either TransformErrorBundle HTTP.Request -> m EncJSON
packTransformResult = \case
Right req ->
pure . encJFromJValue $
J.object
[ "webhook_url" J..= (req ^. HTTP.url),
"method" J..= (req ^. HTTP.method),
"headers" J..= (first CI.foldedCase <$> (req ^. HTTP.headers)),
"body" J..= decodeBody (req ^. HTTP.body)
]
Left err -> throw400WithDetail ValidationFailed "request transform validation failed" $ J.toJSON err