incremental metadata: Clean up a few lingering loose ends

- Move MonadBase/MonadBaseControl instances for TxE into pg-client-hs
- Set the -qn2 RTS option by default to limit the parallel GC to 2
  threads
- Remove eventlog instrumentation
- Don’t rebuild the schema cache again after running a query that needs
  it to be rebuilt, since we do that explicitly now.
- Remove some redundant checks, and relocate a couple others.
This commit is contained in:
Alexis King 2020-01-08 17:19:02 -06:00
parent 06df174e8a
commit e47a8c4b86
17 changed files with 42 additions and 163 deletions

View File

@ -49,7 +49,13 @@ common common-exe
ghc-options:
-threaded -rtsopts
-- Re. `-I2` see #2565
"-with-rtsopts=-N -I2"
--
-- `-qn2` limits the parallel GC to at most 2 capabilities. This came up in #3354/#3394, as the
-- parallel GC was causing significant performance overhead. Folks in #ghc on freenode advised
-- limiting the parallel GC to 2 or 3 capabilities as a very conservative choice, since more
-- than that is highly unlikely to ever be helpful. More benchmarking would be useful to know if
-- this is the right decision. Its possible it would better to just turn it off completely.
"-with-rtsopts=-N -I2 -qn2"
if flag(profile)
ghc-prof-options: -rtsopts

View File

@ -1,4 +1,3 @@
{-# OPTIONS_GHC -Wwarn=orphans #-}
{-# LANGUAGE UndecidableInstances #-}
-- A module for postgres execution related types and operations
@ -182,14 +181,6 @@ instance MonadTx (Q.TxE QErr) where
instance MonadIO (LazyTx e) where
liftIO = LTTx . liftIO
-- FIXME: move orphans into pg-client package
instance MonadBase IO (Q.TxE e) where
liftBase = liftIO
instance MonadBaseControl IO (Q.TxE e) where
type StM (Q.TxE e) a = StM (ReaderT Q.PGConn (ExceptT e IO)) a
liftBaseWith f = Q.TxE $ liftBaseWith \run -> f (run . Q.txHandler)
restoreM = Q.TxE . restoreM
instance MonadBase IO (LazyTx e) where
liftBase = liftIO

View File

@ -102,7 +102,6 @@ showError qf = \case
FTAFirst -> "first argument of the function " <>> qf
FTANamed argName _ -> argName <<> " argument of the function " <>> qf
-- FIXME: ensure all call sites are updated
addComputedFieldP2Setup
:: (QErrM m)
=> S.HashSet QualifiedTable

View File

@ -265,10 +265,9 @@ runCreateEventTriggerQuery q = do
return successMsg
runDeleteEventTriggerQuery
:: (MonadTx m, UserInfoM m, CacheRWM m)
:: (MonadTx m, CacheRWM m)
=> DeleteEventTriggerQuery -> m EncJSON
runDeleteEventTriggerQuery (DeleteEventTriggerQuery name) = do
adminOnly
liftTx $ delEventTriggerFromCatalog name
withNewInconsistentObjsCheck buildSchemaCache
pure successMsg

View File

@ -46,24 +46,6 @@ convColSpec :: FieldInfoMap FieldInfo -> PermColSpec -> [PGCol]
convColSpec _ (PCCols cols) = cols
convColSpec cim PCStar = map pgiColumn $ getCols cim
-- FIXME: move check into collecting code for addPermP1 (probably buildSchemaCache)
assertPermNotDefined
:: (MonadError QErr m)
=> RoleName
-> PermAccessor a
-> TableInfo
-> m ()
assertPermNotDefined roleName pa tableInfo =
when (permissionIsDefined rpi pa || roleName == adminRole)
$ throw400 AlreadyExists $ mconcat
[ "'" <> T.pack (show $ permAccToType pa) <> "'"
, " permission on " <>> _tciName (_tiCoreInfo tableInfo)
, " for role " <>> roleName
, " already exists"
]
where
rpi = M.lookup roleName $ _tiRolePermInfoMap tableInfo
permissionIsDefined
:: Maybe RolePermInfo -> PermAccessor a -> Bool
permissionIsDefined rpi pa =
@ -287,20 +269,6 @@ class (ToJSON a) => IsPerm a where
:: DropPerm a -> PermAccessor (PermInfo a)
getPermAcc2 _ = permAccessor
-- FIXME: Push into addPermP1
validateViewPerm
:: (IsPerm a, QErrM m) => PermDef a -> TableCoreInfo -> m ()
validateViewPerm permDef tableInfo =
case permAcc of
PASelect -> return ()
PAInsert -> mutableView tn viIsInsertable viewInfo "insertable"
PAUpdate -> mutableView tn viIsUpdatable viewInfo "updatable"
PADelete -> mutableView tn viIsDeletable viewInfo "deletable"
where
tn = _tciName tableInfo
viewInfo = _tciViewInfo tableInfo
permAcc = getPermAcc1 permDef
addPermP2 :: (IsPerm a, MonadTx m, HasSystemDefined m) => QualifiedTable -> PermDef a -> m ()
addPermP2 tn pd = do
let pt = permAccToType $ getPermAcc1 pd
@ -311,7 +279,6 @@ runCreatePerm
:: (UserInfoM m, CacheRWM m, IsPerm a, MonadTx m, HasSystemDefined m)
=> CreatePerm a -> m EncJSON
runCreatePerm (WithTable tn pd) = do
adminOnly
addPermP2 tn pd
let pt = permAccToType $ getPermAcc1 pd
buildSchemaCacheFor $ MOTableObj tn (MTOPerm (pdRole pd) pt)

View File

@ -29,10 +29,9 @@ renameRelP2 qt newRN relInfo = withNewInconsistentObjsCheck $ do
oldRN = riName relInfo
runRenameRel
:: (MonadTx m, CacheRWM m, UserInfoM m)
:: (MonadTx m, CacheRWM m)
=> RenameRel -> m EncJSON
runRenameRel (RenameRel qt rn newRN) = do
adminOnly
tabInfo <- askTableCoreInfo qt
ri <- askRelType (_tciFieldInfoMap tabInfo) rn ""
withNewInconsistentObjsCheck do

View File

@ -54,8 +54,6 @@ import Hasura.RQL.Types.Catalog
import Hasura.RQL.Types.QueryCollection
import Hasura.SQL.Types
import Debug.Trace
buildRebuildableSchemaCache
:: (MonadIO m, MonadUnique m, MonadTx m, HasHttpManager m, HasSQLGenCtx m)
=> m (RebuildableSchemaCache m)
@ -82,18 +80,12 @@ instance (Monad m) => CacheRM (CacheRWT m) where
instance (MonadIO m, MonadTx m, MonadUnique m) => CacheRWM (CacheRWT m) where
buildSchemaCacheWithOptions buildReason = CacheRWT do
liftIO $ traceEventIO "START refresh"
RebuildableSchemaCache _ invalidationMap rule <- get
catalogMetadata <- liftTx fetchCatalogData
liftIO $ traceEventIO "START build"
result <- lift $ flip runReaderT buildReason $ Inc.build rule (catalogMetadata, invalidationMap)
let schemaCache = Inc.result result
liftIO $ traceEventIO "STOP build"
liftIO $ traceEventIO "START prune"
let !prunedInvalidationMap = pruneInvalidationMap schemaCache invalidationMap
liftIO $ traceEventIO "STOP prune"
liftIO $ traceEventIO "STOP refresh"
put $ RebuildableSchemaCache schemaCache prunedInvalidationMap (Inc.rebuildRule result)
prunedInvalidationMap = pruneInvalidationMap schemaCache invalidationMap
put $! RebuildableSchemaCache schemaCache prunedInvalidationMap (Inc.rebuildRule result)
where
pruneInvalidationMap schemaCache = M.filterWithKey \name _ ->
M.member name (scRemoteSchemas schemaCache)
@ -135,14 +127,11 @@ buildSchemaCacheRule = proc inputs -> do
computedFields = catalogMetadata
-- tables
bindA -< liftIO $ traceEventIO "START tables"
tableRawInfos <- buildTableCache -< tables
bindA -< liftIO $ traceEventIO "STOP tables"
-- relationships and computed fields
let relationshipsByTable = M.groupOn _crTable relationships
computedFieldsByTable = M.groupOn (_afcTable . _cccComputedField) computedFields
bindA -< liftIO $ traceEventIO "START fields"
tableCoreInfos <- (tableRawInfos >- returnA)
>-> (\info -> (info, relationshipsByTable) >- alignExtraTableInfo mkRelationshipMetadataObject)
>-> (\info -> (info, computedFieldsByTable) >- alignExtraTableInfo mkComputedFieldMetadataObject)
@ -151,7 +140,6 @@ buildSchemaCacheRule = proc inputs -> do
allFields <- addNonColumnFields -<
(tableRawInfos, columns, tableRelationships, tableComputedFields)
returnA -< tableRawInfo { _tciFieldInfoMap = allFields }) |)
bindA -< liftIO $ traceEventIO "STOP fields"
-- permissions and event triggers
tableCoreInfosDep <- Inc.newDependency -< tableCoreInfos
@ -159,15 +147,11 @@ buildSchemaCacheRule = proc inputs -> do
>-> (\info -> (info, M.groupOn _cpTable permissions) >- alignExtraTableInfo mkPermissionMetadataObject)
>-> (\info -> (info, M.groupOn _cetTable eventTriggers) >- alignExtraTableInfo mkEventTriggerMetadataObject)
>-> (| Inc.keyed (\_ ((tableCoreInfo, tablePermissions), tableEventTriggers) -> do
bindA -< liftIO $ traceEventIO "START permissions"
let tableName = _tciName tableCoreInfo
tableFields = _tciFieldInfoMap tableCoreInfo
permissionInfos <- buildTablePermissions -<
(tableCoreInfosDep, tableName, tableFields, HS.fromList tablePermissions)
bindA -< liftIO $ traceEventIO "STOP permissions"
bindA -< liftIO $ traceEventIO "START event triggers"
eventTriggerInfos <- buildTableEventTriggers -< (tableCoreInfo, tableEventTriggers)
bindA -< liftIO $ traceEventIO "STOP event triggers"
returnA -< TableInfo
{ _tiCoreInfo = tableCoreInfo
, _tiRolePermInfoMap = permissionInfos
@ -175,8 +159,6 @@ buildSchemaCacheRule = proc inputs -> do
}) |)
-- sql functions
bindA -< liftIO $ traceEventIO "START functions"
let tableNames = HS.fromList $ M.keys tableCache
functionCache <- (mapFromL _cfFunction functions >- returnA)
>-> (| Inc.keyed (\_ (CatalogFunction qf systemDefined config funcDefs) -> do
let definition = toJSON $ TrackFunction qf
@ -186,14 +168,12 @@ buildSchemaCacheRule = proc inputs -> do
(| withRecordInconsistency (
(| modifyErrA (do
rawfi <- bindErrorA -< handleMultipleFunctions qf funcDefs
(fi, dep) <- bindErrorA -<
trackFunctionP2Setup tableNames qf systemDefined config rawfi
(fi, dep) <- bindErrorA -< mkFunctionInfo qf systemDefined config rawfi
recordDependencies -< (metadataObject, schemaObject, [dep])
returnA -< fi)
|) addFunctionContext)
|) metadataObject) |)
>-> (\infos -> M.catMaybes infos >- returnA)
bindA -< liftIO $ traceEventIO "STOP functions"
-- allow list
let allowList = allowlistDefs
@ -202,18 +182,14 @@ buildSchemaCacheRule = proc inputs -> do
& HS.fromList
-- build GraphQL context with tables and functions
bindA -< liftIO $ traceEventIO "START GQL"
baseGQLSchema <- bindA -< GS.mkGCtxMap tableCache functionCache
bindA -< liftIO $ traceEventIO "STOP GQL"
-- remote schemas
bindA -< liftIO $ traceEventIO "START remote schemas"
let invalidatedRemoteSchemas = flip map remoteSchemas \remoteSchema ->
(M.lookup (_arsqName remoteSchema) invalidationMap, remoteSchema)
(remoteSchemaMap, gqlSchema, remoteGQLSchema) <-
(| foldlA' (\schemas schema -> (schemas, schema) >- addRemoteSchema)
|) (M.empty, baseGQLSchema, GC.emptyGCtx) invalidatedRemoteSchemas
bindA -< liftIO $ traceEventIO "STOP remote schemas"
returnA -< BuildOutputs
{ _boTables = tableCache

View File

@ -23,8 +23,6 @@ import Hasura.RQL.Types
import Hasura.RQL.Types.Catalog
import Hasura.SQL.Types
import Debug.Trace
buildTablePermissions
:: ( ArrowChoice arr, Inc.ArrowDistribute arr, Inc.ArrowCache m arr
, ArrowWriter (Seq CollectedInfo) arr, MonadTx m, MonadReader BuildReason m )
@ -95,14 +93,10 @@ buildPermission = Inc.cache proc (tableCache, tableName, tableFields, permission
(| withPermission (do
bindErrorA -< when (roleName == adminRole) $
throw400 ConstraintViolation "cannot define permission for admin role"
bindA -< liftTx . liftIO $ traceEventIO "START permissions/build/decode"
perm <- bindErrorA -< decodeValue pDef
bindA -< liftTx . liftIO $ traceEventIO "STOP permissions/build/decode"
let permDef = PermDef roleName perm Nothing
bindA -< liftTx . liftIO $ traceEventIO "START permissions/build/info"
(info, dependencies) <- liftEitherA <<< Inc.bindDepend -< runExceptT $
runTableCoreCacheRT (buildPermInfo tableName tableFields permDef) tableCache
bindA -< liftTx . liftIO $ traceEventIO "STOP permissions/build/info"
tellA -< Seq.fromList dependencies
rebuildViewsIfNeeded -< (tableName, permDef, info)
returnA -< info)
@ -114,8 +108,6 @@ rebuildViewsIfNeeded
, Inc.Cacheable a, IsPerm a, Inc.Cacheable (PermInfo a) )
=> (QualifiedTable, PermDef a, PermInfo a) `arr` ()
rebuildViewsIfNeeded = Inc.cache $ arrM \(tableName, permDef, info) -> do
liftTx . liftIO $ traceEventIO "START permissions/build/views"
buildReason <- ask
when (buildReason == CatalogUpdate) $
addPermP2Setup tableName permDef info
liftTx . liftIO $ traceEventIO "STOP permissions/build/views"

View File

@ -12,7 +12,6 @@ module Hasura.RQL.DDL.Schema.Catalog
import Hasura.Prelude
import qualified Data.Text as T
import qualified Database.PG.Query as Q
import Data.Aeson
@ -29,19 +28,9 @@ import Hasura.RQL.Types.Error
import Hasura.RQL.Types.SchemaCache
import Hasura.SQL.Types
import Debug.Trace
fetchCatalogData :: (MonadTx m) => m CatalogMetadata
fetchCatalogData = do
liftTx . liftIO $ traceEventIO "START fetch"
liftTx . liftIO $ traceEventIO "START fetch/query"
metadataBytes <- (liftTx $ runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler $(Q.sqlFromFile "src-rsr/catalog_metadata.sql") () True)
liftTx . liftIO $ traceEventIO "STOP fetch/query"
liftTx . liftIO $ traceEventIO "START fetch/decode"
let !decodedValue = force (eitherDecodeStrict' metadataBytes)
liftTx . liftIO $ traceEventIO "STOP fetch/decode"
liftTx . liftIO $ traceEventIO "STOP fetch"
decodedValue `onLeft` \err -> throw500 (T.pack err)
fetchCatalogData = liftTx $ Q.getAltJ . runIdentity . Q.getRow <$> Q.withQE defaultTxErrorHandler
$(Q.sqlFromFile "src-rsr/catalog_metadata.sql") () True
saveTableToCatalog :: (MonadTx m) => QualifiedTable -> SystemDefined -> Bool -> TableConfig -> m ()
saveTableToCatalog (QualifiedObject sn tn) systemDefined isEnum config = liftTx $

View File

@ -22,7 +22,6 @@ import qualified Language.GraphQL.Draft.Syntax as G
import qualified Control.Monad.Validate as MV
import qualified Data.HashMap.Strict as M
import qualified Data.HashSet as S
import qualified Data.Sequence as Seq
import qualified Data.Text as T
import qualified Database.PG.Query as Q
@ -71,7 +70,8 @@ validateFuncArgs args =
invalidArgs = filter (not . G.isValidName) $ map G.Name funcArgsText
data FunctionIntegrityError
= FunctionVariadic
= FunctionNameNotGQLCompliant
| FunctionVariadic
| FunctionReturnNotCompositeType
| FunctionReturnNotSetof
| FunctionReturnNotSetofTable
@ -101,15 +101,11 @@ mkFunctionInfo qf systemDefined config rawFuncInfo =
throwValidateError = MV.dispute . pure
validateFunction = do
-- throw error if function has variadic arguments
unless (G.isValidName $ GS.qualObjectToName qf) $ throwValidateError FunctionNameNotGQLCompliant
when hasVariadic $ throwValidateError FunctionVariadic
-- throw error if return type is not composite type
when (retTyTyp /= PGKindComposite) $ throwValidateError FunctionReturnNotCompositeType
-- throw error if function do not returns SETOF
unless retSet $ throwValidateError FunctionReturnNotSetof
-- throw error if return type is not a valid table
unless returnsTab $ throwValidateError FunctionReturnNotSetofTable
-- throw error if function type is VOLATILE
when (funTy == FTVOLATILE) $ throwValidateError FunctionVolatile
-- validate function argument names
@ -147,6 +143,7 @@ mkFunctionInfo qf systemDefined config rawFuncInfo =
<> makeReasonMessage allErrors showOneError
showOneError = \case
FunctionNameNotGQLCompliant -> "function name is not a legal GraphQL identifier"
FunctionVariadic -> "function with \"VARIADIC\" parameters are not supported"
FunctionReturnNotCompositeType -> "the function does not return a \"COMPOSITE\" type"
FunctionReturnNotSetof -> "the function does not return a SETOF"
@ -205,35 +202,9 @@ trackFunctionP1 qf = do
when (M.member qt $ scTables rawSchemaCache) $
throw400 NotSupported $ "table with name " <> qf <<> " already exists"
trackFunctionP2Setup
:: (QErrM m)
=> HashSet QualifiedTable
-- ^ the set of all tracked tables
-> QualifiedFunction
-> SystemDefined
-> FunctionConfig
-> RawFunctionInfo
-> m (FunctionInfo, SchemaDependency)
trackFunctionP2Setup trackedTableNames qf systemDefined config rawfi = do
(fi, deps) <- mkFunctionInfo qf systemDefined config rawfi
-- FIXME: eliminate redundant check now handled by dependencies
unless (fiReturnType fi `S.member` trackedTableNames) $
throw400 NotExists $ "table " <> fiReturnType fi <<> " is not tracked"
pure (fi, deps)
trackFunctionP2 :: (MonadTx m, CacheRWM m, HasSystemDefined m)
=> QualifiedFunction -> FunctionConfig -> m EncJSON
trackFunctionP2 qf config = do
let funcNameGQL = GS.qualObjectToName qf
-- check function name is in compliance with GraphQL spec
-- FIXME: move check into trackFunctionP2Setup
unless (G.isValidName funcNameGQL) $ throw400 NotSupported $
"function name " <> qf <<> " is not in compliance with GraphQL spec"
-- check for conflicts in remote schema
-- FIXME: ensure check is preserved
-- GS.checkConflictingNode defGCtx funcNameGQL
-- fetch function info
systemDefined <- askSystemDefined
liftTx $ saveFunctionToCatalog qf config systemDefined
buildSchemaCacheFor $ MOFunction qf

View File

@ -177,23 +177,21 @@ unTrackExistingTableOrViewP1 (UntrackTable vn _) = do
unTrackExistingTableOrViewP2
:: (CacheRWM m, MonadTx m)
=> UntrackTable -> m EncJSON
unTrackExistingTableOrViewP2 (UntrackTable qtn cascade) = do
unTrackExistingTableOrViewP2 (UntrackTable qtn cascade) = withNewInconsistentObjsCheck do
sc <- askSchemaCache
-- Get relational, query template and function dependants
let allDeps = getDependentObjs sc (SOTable qtn)
indirectDeps = filter (not . isDirectDep) allDeps
-- Report bach with an error if cascade is not set
when (indirectDeps /= [] && not (or cascade)) $ reportDepsExt indirectDeps []
-- Purge all the dependents from state
mapM_ purgeDependentObject indirectDeps
-- delete the table and its direct dependencies
delTableAndDirectDeps qtn
buildSchemaCache
return successMsg
pure successMsg
where
isDirectDep = \case
(SOTableObj dtn _) -> qtn == dtn

View File

@ -32,8 +32,6 @@ module Hasura.RQL.Types
, askEventTriggerInfo
, askTabInfoFromTrigger
, adminOnly
, HeaderObj
, liftMaybe
@ -287,13 +285,6 @@ askFieldInfo m f =
askCurRole :: (UserInfoM m) => m RoleName
askCurRole = userRole <$> askUserInfo
adminOnly :: (UserInfoM m, QErrM m) => m ()
adminOnly = do
curRole <- askCurRole
unless (curRole == adminRole) $ throw400 AccessDenied errMsg
where
errMsg = "restricted access : admin only"
successMsg :: EncJSON
successMsg = "{\"message\":\"success\"}"

View File

@ -284,7 +284,7 @@ v1QueryHandler query = do
scRef <- scCacheRef . hcServerCtx <$> ask
logger <- scLogger . hcServerCtx <$> ask
res <- bool (fst <$> dbAction) (withSCUpdate scRef logger dbAction) $
queryNeedsReload query
queryModifiesSchemaCache query
return $ HttpResponse res Nothing
where
-- Hit postgres

View File

@ -184,14 +184,21 @@ runQuery pgExecCtx instanceId userInfo sc hMgr sqlGenCtx systemDefined query = d
where
runCtx = RunCtx userInfo hMgr sqlGenCtx
withReload r = do
when (queryNeedsReload query) $ do
when (queryModifiesSchemaCache query) $ do
e <- liftIO $ runExceptT $ runLazyTx pgExecCtx Q.ReadWrite
$ liftTx $ recordSchemaUpdate instanceId
liftEither e
return r
queryNeedsReload :: RQLQuery -> Bool
queryNeedsReload (RQV1 qi) = case qi of
-- | A predicate that determines whether the given query might modify/rebuild the schema cache. If
-- so, it needs to acquire the global lock on the schema cache so that other queries do not modify
-- it concurrently.
--
-- Ideally, we would enforce this using the type system — queries for which this function returns
-- 'False' should not be allowed to modify the schema cache. But for now we just ensure consistency
-- by hand.
queryModifiesSchemaCache :: RQLQuery -> Bool
queryModifiesSchemaCache (RQV1 qi) = case qi of
RQAddExistingTableOrView _ -> True
RQTrackTable _ -> True
RQUntrackTable _ -> True
@ -256,8 +263,8 @@ queryNeedsReload (RQV1 qi) = case qi of
RQDumpInternalState _ -> False
RQBulk qs -> any queryNeedsReload qs
queryNeedsReload (RQV2 qi) = case qi of
RQBulk qs -> any queryModifiesSchemaCache qs
queryModifiesSchemaCache (RQV2 qi) = case qi of
RQV2TrackTable _ -> True
RQV2SetTableCustomFields _ -> True
RQV2TrackFunction _ -> True
@ -304,16 +311,10 @@ runQueryM
)
=> RQLQuery
-> m EncJSON
runQueryM rq =
withPathK "args" $ runQueryM' <* rebuildGCtx
runQueryM rq = withPathK "args" $ case rq of
RQV1 q -> runQueryV1M q
RQV2 q -> runQueryV2M q
where
-- FIXME: rethink this
rebuildGCtx = when (queryNeedsReload rq) buildSchemaCache
runQueryM' = case rq of
RQV1 q -> runQueryV1M q
RQV2 q -> runQueryV2M q
runQueryV1M = \case
RQAddExistingTableOrView q -> runTrackTableQ q
RQTrackTable q -> runTrackTableQ q

View File

@ -1,4 +1,4 @@
-- {-# OPTIONS_GHC -fforce-recomp #-}
{-# OPTIONS_GHC -fforce-recomp #-}
module Hasura.Server.Version
( currentVersion
, consoleVersion

View File

@ -17,7 +17,7 @@ rebuild-ghc-options: true
extra-deps:
# use https URLs so that build systems can clone these repos
- git: https://github.com/hasura/pg-client-hs.git
commit: bd21e66d8197af381a6c0b493e22d611ed1fa386
commit: 70a849d09bea9461e72c5a5bbde06df65aab61c0
- git: https://github.com/hasura/graphql-parser-hs.git
commit: f3d9b645efd9adb143e2ad4c6b73bded1578a4e9
- git: https://github.com/hasura/ci-info-hs.git

View File

@ -13,11 +13,11 @@ packages:
git: https://github.com/hasura/pg-client-hs.git
pantry-tree:
size: 1107
sha256: aaa78473e95368c73c4f6cd5d85f1016af5783090d804aeb7e0d244c02dd3354
commit: bd21e66d8197af381a6c0b493e22d611ed1fa386
sha256: 7dd16080034ac2a94763106ead8296105b538424533bed5ba3567824e2dfe4ea
commit: 70a849d09bea9461e72c5a5bbde06df65aab61c0
original:
git: https://github.com/hasura/pg-client-hs.git
commit: bd21e66d8197af381a6c0b493e22d611ed1fa386
commit: 70a849d09bea9461e72c5a5bbde06df65aab61c0
- completed:
cabal-file:
size: 3364