mirror of
https://github.com/hasura/graphql-engine.git
synced 2024-12-18 04:51:35 +03:00
47183e8a2c
The Plan part is missing, because it needs support from FromIr. That'll come in a follow up commit. **Next PR**: #2529 This is the result of splitting up the mega PR into more digestible chunks. This is the smallest subset I've been able to collect. Missing parts are noted in comments. The code isn't reachable from Main, so it won't affect the test suite. It just gets compiled for now. For context, this splits up work from https://github.com/hasura/graphql-engine-mono/pull/2332 PR-URL: https://github.com/hasura/graphql-engine-mono/pull/2511 Co-authored-by: Abby Sassel <3883855+sassela@users.noreply.github.com> GitOrigin-RevId: 00f30b0f494b56b3b7f8c1b0996377db4874c88d
145 lines
4.8 KiB
Haskell
145 lines
4.8 KiB
Haskell
module Hasura.Backends.MySQL.Connection
|
|
( runJSONPathQuery,
|
|
resolveSourceConfig,
|
|
resolveDatabaseMetadata,
|
|
fetchAllRows,
|
|
runQueryYieldingRows,
|
|
)
|
|
where
|
|
|
|
import Data.Aeson hiding (Result)
|
|
import Data.Aeson qualified as J
|
|
import Data.Aeson.Text (encodeToTextBuilder)
|
|
import Data.ByteString (ByteString)
|
|
import Data.Environment qualified as Env
|
|
import Data.HashMap.Strict qualified as HM
|
|
import Data.HashMap.Strict.InsOrd qualified as OMap
|
|
import Data.Pool
|
|
import Data.Scientific (fromFloatDigits)
|
|
import Data.Text qualified as T
|
|
import Data.Text.Encoding (decodeUtf8)
|
|
import Data.Text.Lazy (toStrict)
|
|
import Data.Text.Lazy.Builder (toLazyText)
|
|
import Data.Vector (Vector)
|
|
import Data.Vector qualified as V
|
|
import Database.MySQL.Base
|
|
import Database.MySQL.Base.Types (Field (..))
|
|
import Database.MySQL.Simple.Result qualified as MySQL
|
|
import Hasura.Backends.MySQL.DataLoader.Plan qualified as DataLoaderPlan
|
|
import Hasura.Backends.MySQL.Meta (getMetadata)
|
|
import Hasura.Backends.MySQL.ToQuery (Query (..))
|
|
import Hasura.Backends.MySQL.Types
|
|
import Hasura.Base.Error
|
|
import Hasura.Prelude
|
|
import Hasura.RQL.Types.Common
|
|
import Hasura.RQL.Types.Source
|
|
import Hasura.SQL.Backend
|
|
|
|
resolveSourceConfig :: (MonadIO m) => SourceName -> ConnSourceConfig -> Env.Environment -> m (Either QErr SourceConfig)
|
|
resolveSourceConfig _name csc@ConnSourceConfig {_cscPoolSettings = ConnPoolSettings {..}, ..} _env = do
|
|
let connectInfo =
|
|
defaultConnectInfo
|
|
{ connectHost = T.unpack _cscHost,
|
|
connectPort = _cscPort,
|
|
connectUser = T.unpack _cscUser,
|
|
connectPassword = T.unpack _cscPassword,
|
|
connectDatabase = T.unpack _cscDatabase
|
|
}
|
|
runExceptT $
|
|
SourceConfig csc
|
|
<$> liftIO
|
|
( createPool
|
|
(connect connectInfo)
|
|
close
|
|
1
|
|
(fromIntegral _cscIdleTimeout)
|
|
(fromIntegral _cscMaxConnections)
|
|
)
|
|
|
|
resolveDatabaseMetadata :: (MonadIO m) => SourceConfig -> m (Either QErr (ResolvedSource 'MySQL))
|
|
resolveDatabaseMetadata sc@SourceConfig {..} =
|
|
runExceptT $ do
|
|
metadata <- liftIO $ withResource scConnectionPool (getMetadata scConfig)
|
|
pure $ ResolvedSource sc metadata mempty mempty
|
|
|
|
parseFieldResult :: Field -> Maybe ByteString -> Value
|
|
parseFieldResult f@Field {..} mBs =
|
|
case fieldType of
|
|
Long ->
|
|
let fvalue :: Double = MySQL.convert f mBs
|
|
in Number $ fromFloatDigits fvalue
|
|
VarString ->
|
|
let fvalue :: Text = MySQL.convert f mBs
|
|
in J.String fvalue
|
|
DateTime -> maybe J.Null (J.String . decodeUtf8) mBs
|
|
_ -> error $ "parseResult: not implemented yet " <> show f <> " " <> show mBs
|
|
|
|
-- TODO: handle remaining cases
|
|
|
|
fieldsToAeson :: [Field] -> [[Maybe ByteString]] -> [Value]
|
|
fieldsToAeson column rows =
|
|
[ Object $
|
|
HM.fromList $
|
|
[ (decodeUtf8 (fieldName c)) .= (parseFieldResult c r)
|
|
| (c, r) <- (zip column row :: [(Field, Maybe ByteString)])
|
|
]
|
|
| row <- (rows :: [[Maybe ByteString]])
|
|
]
|
|
|
|
runJSONPathQuery ::
|
|
(MonadError QErr m, MonadIO m) =>
|
|
(Pool Connection) ->
|
|
Query ->
|
|
m Text
|
|
runJSONPathQuery pool (Query querySql) = do
|
|
result <- liftIO $
|
|
withResource pool $ \conn -> do
|
|
query conn querySql
|
|
result <- storeResult conn
|
|
fields <- fetchFields result
|
|
rows <- fetchAllRows result
|
|
pure $ fieldsToAeson fields rows
|
|
pure $ toStrict $ toLazyText $ encodeToTextBuilder $ toJSON result
|
|
|
|
-- | Used by the dataloader to produce rows of records. Those rows of
|
|
-- records are then manipulated by the dataloader to do Haskell-side
|
|
-- joins. Is a Vector of HashMaps the most efficient choice? A
|
|
-- pandas-style data frame could also be more efficient,
|
|
-- dependingly. However, this is a legible approach; efficiency
|
|
-- improvements can be added later.
|
|
parseAndCollectRows ::
|
|
[Field] ->
|
|
[[Maybe ByteString]] ->
|
|
Vector (InsOrdHashMap DataLoaderPlan.FieldName J.Value)
|
|
parseAndCollectRows columns rows =
|
|
V.fromList
|
|
[ OMap.fromList
|
|
[ (DataLoaderPlan.FieldName . decodeUtf8 . fieldName $ column, parseFieldResult column value)
|
|
| (column, value) <- zip columns row :: [(Field, Maybe ByteString)]
|
|
]
|
|
| row <- rows :: [[Maybe ByteString]]
|
|
]
|
|
|
|
-- | Run a query immediately and parse up the results into a vector.
|
|
runQueryYieldingRows ::
|
|
(MonadIO m) =>
|
|
Pool Connection ->
|
|
Query ->
|
|
m (Vector (InsOrdHashMap DataLoaderPlan.FieldName J.Value))
|
|
runQueryYieldingRows pool (Query querySql) = do
|
|
liftIO $
|
|
withResource pool $ \conn -> do
|
|
query conn querySql
|
|
result <- storeResult conn
|
|
fields <- fetchFields result
|
|
rows <- fetchAllRows result
|
|
pure (parseAndCollectRows fields rows)
|
|
|
|
fetchAllRows :: Result -> IO [[Maybe ByteString]]
|
|
fetchAllRows r = reverse <$> go [] r
|
|
where
|
|
go acc res =
|
|
fetchRow res >>= \case
|
|
[] -> pure acc
|
|
r' -> go (r' : acc) res
|