mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-17 20:41:49 +03:00
06b599b747
The metadata storage implementation for graphql-engine-multitenant. - It uses a centralized PG database to store metadata of all tenants (instead of per tenant database) - Similarly, it uses a single schema-sync listener thread per MT worker (instead of listener thread per tenant) (PS: although, the processor thread is spawned per tenant) - 2 new flags are introduced - `--metadataDatabaseUrl` and (optional) `--metadataDatabaseRetries` Internally, a "metadata mode" is introduced to indicate an external/managed store vs a store managed by each pro-server. To run : - obtain the schema file (located at `pro/server/res/cloud/metadata_db_schema.sql`) - apply the schema on a PG database - set the `--metadataDatabaseUrl` flag to point to the above database - run the MT executable The schema (and its migrations) for the metadata db is managed outside the MT worker. ### New metadata The following is the new portion of `Metadata` added : ```yaml version: 3 metrics_config: analyze_query_variables: true analyze_response_body: false api_limits: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` - In Pro, the code around fetching/updating/syncing pro-config is removed - That also means, `hdb_pro_catalog` for keeping the config cache is not required. Hence the `hdb_pro_catalog` is also removed - The required config comes from metadata / schema cache ### New Metadata APIs - `set_api_limits` - `remove_api_limits` - `set_metrics_config` - `remove_metrics_config` #### `set_api_limits` ```yaml type: set_api_limits args: disabled: false depth_limit: global: 5 per_role: user: 7 editor: 9 rate_limit: per_role: anonymous: max_reqs_per_min: 10 unique_params: "ip" editor: max_reqs_per_min: 30 unique_params: - x-hasura-user-id user: unique_params: - x-hasura-user-id - x-hasura-team-id max_reqs_per_min: 20 global: unique_params: IP max_reqs_per_min: 10 ``` #### `remove_api_limits` ```yaml type: remove_api_limits args: {} ``` #### `set_metrics_config` ```yaml type: set_metrics_config args: analyze_query_variables: true analyze_response_body: false ``` #### `remove_metrics_config` ```yaml type: remove_metrics_config args: {} ``` #### TODO - [x] on-prem pro implementation for `MonadMetadataStorage` - [x] move the project config from Lux to pro metadata (PR: #379) - [ ] console changes for pro config/api limits, subscription workers (cc @soorajshankar @beerose) - [x] address other minor TODOs - [x] TxIso for `MonadSourceResolver` - [x] enable EKG connection pool metrics - [x] add logging of connection info when sources are added? - [x] confirm if the `buildReason` for schema cache is correct - [ ] testing - [x] 1.3 -> 1.4 cloud migration script (#465; PR: #508) - [x] one-time migration of existing metadata from users' db to centralized PG - [x] one-time migration of pro project config + api limits + regression tests from metrics API to metadata - [ ] integrate with infra team (WIP - cc @hgiasac) - [x] benchmark with 1000+ tenants + each tenant making read/update metadata query every second (PR: https://github.com/hasura/graphql-engine-mono/pull/411) - [ ] benchmark with few tenants having large metadata (100+ tables etc.) - [ ] when user moves regions (https://github.com/hasura/lux/issues/1717) - [ ] metadata has to be migrated from one regional PG to another - [ ] migrate metrics data as well ? - [ ] operation logs - [ ] regression test runs - [ ] find a way to share the schema files with the infra team Co-authored-by: Naveen Naidu <30195193+Naveenaidu@users.noreply.github.com> GitOrigin-RevId: 39e8361f2c0e96e0f9e8f8fb45e6cc14857f31f1
64 lines
2.1 KiB
Haskell
64 lines
2.1 KiB
Haskell
{-|
|
|
This module holds functions and data types used for logging at the GraphQL
|
|
layer. In contrast with, logging at the HTTP server layer.
|
|
-}
|
|
|
|
module Hasura.GraphQL.Logging
|
|
( QueryLog(..)
|
|
, MonadQueryLog(..)
|
|
) where
|
|
|
|
import qualified Data.Aeson as J
|
|
import qualified Language.GraphQL.Draft.Syntax as G
|
|
|
|
import Hasura.GraphQL.Transport.HTTP.Protocol (GQLReqUnparsed)
|
|
import Hasura.Metadata.Class
|
|
import Hasura.Prelude
|
|
import Hasura.Server.Types (RequestId)
|
|
import Hasura.Tracing (TraceT)
|
|
|
|
import qualified Hasura.GraphQL.Execute.Query as EQ
|
|
import qualified Hasura.Logging as L
|
|
|
|
|
|
-- | A GraphQL query, optionally generated SQL, and the request id makes up the
|
|
-- | 'QueryLog'
|
|
data QueryLog
|
|
= QueryLog
|
|
{ _qlQuery :: !GQLReqUnparsed
|
|
, _qlGeneratedSql :: !(Maybe (G.Name, EQ.PreparedSql))
|
|
, _qlRequestId :: !RequestId
|
|
}
|
|
|
|
instance J.ToJSON QueryLog where
|
|
toJSON (QueryLog q sql reqId) =
|
|
J.object [ "query" J..= q
|
|
, "generated_sql" J..= sql
|
|
, "request_id" J..= reqId
|
|
]
|
|
|
|
instance L.ToEngineLog QueryLog L.Hasura where
|
|
toEngineLog ql = (L.LevelInfo, L.ELTQueryLog, J.toJSON ql)
|
|
|
|
class Monad m => MonadQueryLog m where
|
|
logQueryLog
|
|
:: L.Logger L.Hasura
|
|
-> GQLReqUnparsed
|
|
-> Maybe (G.Name, EQ.PreparedSql)
|
|
-- ^ Generated SQL if any
|
|
-> RequestId
|
|
-- ^ unique identifier for a request. NOTE this can be spoofed!
|
|
-> m ()
|
|
|
|
instance MonadQueryLog m => MonadQueryLog (ExceptT e m) where
|
|
logQueryLog l req sqlMap reqId = lift $ logQueryLog l req sqlMap reqId
|
|
|
|
instance MonadQueryLog m => MonadQueryLog (ReaderT r m) where
|
|
logQueryLog l req sqlMap reqId = lift $ logQueryLog l req sqlMap reqId
|
|
|
|
instance MonadQueryLog m => MonadQueryLog (TraceT m) where
|
|
logQueryLog l req sqlMap reqId = lift $ logQueryLog l req sqlMap reqId
|
|
|
|
instance MonadQueryLog m => MonadQueryLog (MetadataStorageT m) where
|
|
logQueryLog l req sqlMap reqId = lift $ logQueryLog l req sqlMap reqId
|