diff --git a/CHANGELOG.md b/CHANGELOG.md index 91b933b9509..8ea72479241 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ (Add entries here in the order of: server, console, cli, docs, others) +- server: add new `--conn-lifetime` and `HASURA_GRAPHQL_PG_CONN_LIFETIME` options for expiring connections after some amount of active time (#5087) +- server: shrink libpq connection request/response buffers back to 1MB if they grow beyond 2MB, fixing leak-like behavior on active servers (#5087) - docs: add note for managed databases in postgres requirements (close #1677, #3783) (#5228) diff --git a/scripts/dev.sh b/scripts/dev.sh index 226071d9362..a636427cc9b 100755 --- a/scripts/dev.sh +++ b/scripts/dev.sh @@ -142,16 +142,19 @@ fi # export for psql, etc. export PGPASSWORD=postgres -DB_URL="postgres://postgres:$PGPASSWORD@127.0.0.1:$PG_PORT/postgres" +# The URL for the postgres server we might launch +CONTAINER_DB_URL="postgres://postgres:$PGPASSWORD@127.0.0.1:$PG_PORT/postgres" +# ... but we might like to use a different PG instance when just launching graphql-engine: +HASURA_GRAPHQL_DATABASE_URL=${HASURA_GRAPHQL_DATABASE_URL-$CONTAINER_DB_URL} PG_CONTAINER_NAME="hasura-dev-postgres-$PG_PORT" -# We can remove psql as a dependency: -DOCKER_PSQL="docker exec -u postgres -it $PG_CONTAINER_NAME psql -p $PG_PORT" +# We can remove psql as a dependency by using it from the (running) PG container: +DOCKER_PSQL="docker exec -u postgres -it $PG_CONTAINER_NAME psql $HASURA_GRAPHQL_DATABASE_URL" -function wait_docker_postgres { +function wait_postgres { echo -n "Waiting for postgres to come up" - until $DOCKER_PSQL postgres -c '\l' &>/dev/null; do + until ( $DOCKER_PSQL -c '\l' || psql $HASURA_GRAPHQL_DATABASE_URL -c '\l') &>/dev/null; do echo -n '.' && sleep 0.2 done echo " Ok" @@ -202,18 +205,20 @@ if [ "$MODE" = "graphql-engine" ]; then } trap cleanup EXIT - + export HASURA_GRAPHQL_DATABASE_URL # Defined above export HASURA_GRAPHQL_SERVER_PORT=${HASURA_GRAPHQL_SERVER_PORT-8181} - echo_pretty "We will connect to postgres container '$PG_CONTAINER_NAME'" - echo_pretty "If you haven't yet, please launch a postgres container in a separate terminal with:" + echo_pretty "We will connect to postgres at '$HASURA_GRAPHQL_DATABASE_URL'" + echo_pretty "If you haven't overridden HASURA_GRAPHQL_DATABASE_URL, you can" + echo_pretty "launch a fresh postgres container for us to connect to, in a" + echo_pretty "separate terminal with:" echo_pretty " $ $0 postgres" - echo_pretty "or press CTRL-C and invoke graphql-engine manually" echo_pretty "" - RUN_INVOCATION=(cabal new-run --project-file=cabal.project.dev-sh --RTS -- exe:graphql-engine +RTS -N -T -RTS - --database-url="$DB_URL" serve - --enable-console --console-assets-dir "$PROJECT_ROOT/console/static/dist") + RUN_INVOCATION=(cabal new-run --project-file=cabal.project.dev-sh --RTS -- + exe:graphql-engine +RTS -N -T -s -RTS serve + --enable-console --console-assets-dir "$PROJECT_ROOT/console/static/dist" + ) echo_pretty 'About to do:' echo_pretty ' $ cabal new-build --project-file=cabal.project.dev-sh exe:graphql-engine' @@ -221,7 +226,7 @@ if [ "$MODE" = "graphql-engine" ]; then echo_pretty '' cabal new-build --project-file=cabal.project.dev-sh exe:graphql-engine - wait_docker_postgres + wait_postgres # Print helpful info after startup logs so it's visible: { @@ -337,7 +342,7 @@ EOL if [ "$MODE" = "postgres" ]; then launch_postgres_container - wait_docker_postgres + wait_postgres echo_pretty "Postgres logs will start to show up in realtime here. Press CTRL-C to exit and " echo_pretty "shutdown this container." echo_pretty "" @@ -347,7 +352,7 @@ if [ "$MODE" = "postgres" ]; then echo_pretty " $ PGPASSWORD="$PGPASSWORD" psql -h 127.0.0.1 -p "$PG_PORT" postgres -U postgres" echo_pretty "" echo_pretty "Here is the database URL:" - echo_pretty " $DB_URL" + echo_pretty " $CONTAINER_DB_URL" echo_pretty "" echo_pretty "If you want to launch a 'graphql-engine' that works with this database:" echo_pretty " $ $0 graphql-engine" @@ -375,19 +380,19 @@ elif [ "$MODE" = "test" ]; then # rebuilding twice... ugh cabal new-build --project-file=cabal.project.dev-sh exe:graphql-engine test:graphql-engine-tests launch_postgres_container - wait_docker_postgres + wait_postgres # These also depend on a running DB: if [ "$RUN_UNIT_TESTS" = true ]; then echo_pretty "Running Haskell test suite" - HASURA_GRAPHQL_DATABASE_URL="$DB_URL" cabal new-run --project-file=cabal.project.dev-sh -- test:graphql-engine-tests + HASURA_GRAPHQL_DATABASE_URL="$CONTAINER_DB_URL" cabal new-run --project-file=cabal.project.dev-sh -- test:graphql-engine-tests fi if [ "$RUN_INTEGRATION_TESTS" = true ]; then GRAPHQL_ENGINE_TEST_LOG=/tmp/hasura-dev-test-engine.log echo_pretty "Starting graphql-engine, logging to $GRAPHQL_ENGINE_TEST_LOG" export HASURA_GRAPHQL_SERVER_PORT=8088 - cabal new-run --project-file=cabal.project.dev-sh -- exe:graphql-engine --database-url="$DB_URL" serve --stringify-numeric-types \ + cabal new-run --project-file=cabal.project.dev-sh -- exe:graphql-engine --database-url="$CONTAINER_DB_URL" serve --stringify-numeric-types \ --enable-console --console-assets-dir ../console/static/dist \ &> "$GRAPHQL_ENGINE_TEST_LOG" & GRAPHQL_ENGINE_PID=$! @@ -449,7 +454,7 @@ elif [ "$MODE" = "test" ]; then # TODO MAYBE: fix deprecation warnings, make them an error - if pytest -W ignore::DeprecationWarning --hge-urls http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT --pg-urls "$DB_URL" $PYTEST_ARGS; then + if pytest -W ignore::DeprecationWarning --hge-urls http://127.0.0.1:$HASURA_GRAPHQL_SERVER_PORT --pg-urls "$CONTAINER_DB_URL" $PYTEST_ARGS; then PASSED=true else PASSED=false diff --git a/server/cabal.project b/server/cabal.project index abb20a5aded..290002ab8e6 100644 --- a/server/cabal.project +++ b/server/cabal.project @@ -36,7 +36,7 @@ package graphql-engine source-repository-package type: git location: https://github.com/hasura/pg-client-hs.git - tag: 70a849d09bea9461e72c5a5bbde06df65aab61c0 + tag: cbfc69b935d19dc40be8cdcc73a70b81cf511d34 source-repository-package type: git diff --git a/server/graphql-engine.cabal b/server/graphql-engine.cabal index 612ada2dacc..4417cd7066f 100644 --- a/server/graphql-engine.cabal +++ b/server/graphql-engine.cabal @@ -42,6 +42,8 @@ common common-exe ghc-options: -threaded -rtsopts -- Re. `-I2` see #2565 + -- TODO try the new: -Iw flag: + -- http://downloads.haskell.org/~ghc/latest/docs/html/users_guide/runtime_control.html#rts-flag--Iw%20%E2%9F%A8seconds%E2%9F%A9 -- -- `-qn2` limits the parallel GC to at most 2 capabilities. This came up in #3354/#3394, as the -- parallel GC was causing significant performance overhead. Folks in #ghc on freenode advised diff --git a/server/src-lib/Hasura/Server/Init.hs b/server/src-lib/Hasura/Server/Init.hs index 274ee2317f3..19c703d9ad0 100644 --- a/server/src-lib/Hasura/Server/Init.hs +++ b/server/src-lib/Hasura/Server/Init.hs @@ -175,14 +175,15 @@ mkServeOptions rso = do #else defaultAPIs = [METADATA,GRAPHQL,PGDUMP,CONFIG] #endif - mkConnParams (RawConnParams s c i p) = do + mkConnParams (RawConnParams s c i cl p) = do stripes <- fromMaybe 1 <$> withEnv s (fst pgStripesEnv) -- Note: by Little's Law we can expect e.g. (with 50 max connections) a -- hard throughput cap at 1000RPS when db queries take 50ms on average: conns <- fromMaybe 50 <$> withEnv c (fst pgConnsEnv) iTime <- fromMaybe 180 <$> withEnv i (fst pgTimeoutEnv) + connLifetime <- withEnv cl (fst pgConnLifetimeEnv) allowPrepare <- fromMaybe True <$> withEnv p (fst pgUsePrepareEnv) - return $ Q.ConnParams stripes conns iTime allowPrepare + return $ Q.ConnParams stripes conns iTime allowPrepare connLifetime mkAuthHook (AuthHookG mUrl mType) = do mUrlEnv <- withEnv mUrl $ fst authHookEnv @@ -359,6 +360,13 @@ pgTimeoutEnv = , "Each connection's idle time before it is closed (default: 180 sec)" ) +pgConnLifetimeEnv :: (String, String) +pgConnLifetimeEnv = + ( "HASURA_GRAPHQL_PG_CONN_LIFETIME" + , "Time from connection creation after which the connection should be destroyed and a new one " + <> "created. (default: none)" + ) + pgUsePrepareEnv :: (String, String) pgUsePrepareEnv = ( "HASURA_GRAPHQL_USE_PREPARED_STATEMENTS" @@ -574,7 +582,7 @@ parseTxIsolation = optional $ parseConnParams :: Parser RawConnParams parseConnParams = - RawConnParams <$> stripes <*> conns <*> timeout <*> allowPrepare + RawConnParams <$> stripes <*> conns <*> idleTimeout <*> connLifetime <*> allowPrepare where stripes = optional $ option auto @@ -592,12 +600,20 @@ parseConnParams = help (snd pgConnsEnv) ) - timeout = optional $ + idleTimeout = optional $ option auto ( long "timeout" <> metavar "" <> help (snd pgTimeoutEnv) ) + + connLifetime = fmap (fmap fromRational) $ optional $ + option auto + ( long "conn-lifetime" <> + metavar "" <> + help (snd pgConnLifetimeEnv) + ) + allowPrepare = optional $ option (eitherReader parseStringAsBool) ( long "use-prepared-statements" <> diff --git a/server/src-lib/Hasura/Server/Init/Config.hs b/server/src-lib/Hasura/Server/Init/Config.hs index c40fbacf043..485f9421d02 100644 --- a/server/src-lib/Hasura/Server/Init/Config.hs +++ b/server/src-lib/Hasura/Server/Init/Config.hs @@ -9,6 +9,7 @@ import qualified Data.Text as T import qualified Database.PG.Query as Q import Data.Char (toLower) +import Data.Time import Network.Wai.Handler.Warp (HostPreference) import qualified Hasura.Cache as Cache @@ -26,6 +27,9 @@ data RawConnParams { rcpStripes :: !(Maybe Int) , rcpConns :: !(Maybe Int) , rcpIdleTime :: !(Maybe Int) + , rcpConnLifetime :: !(Maybe NominalDiffTime) + -- ^ Time from connection creation after which to destroy a connection and + -- choose a different/new one. , rcpAllowPrepare :: !(Maybe Bool) } deriving (Show, Eq) @@ -202,6 +206,11 @@ readJson = J.eitherDecodeStrict . txtToBs . T.pack class FromEnv a where fromEnv :: String -> Either String a +-- Deserialize from seconds, in the usual way +instance FromEnv NominalDiffTime where + fromEnv s = maybe (Left "could not parse as a Double") (Right . realToFrac) $ + (readMaybe s :: Maybe Double) + instance FromEnv String where fromEnv = Right