mirror of
https://github.com/github/semantic.git
synced 2024-11-27 12:57:49 +03:00
Make benchmarks operate and switch to gauge
.
I find myself wanting to benchmark some I/O code, so I decided to look at our benchmark suite, to find that it had bitrotted somewhat. This patch brings it back up to working status, switches to `gauge` (which is more accurate than `criteron` and incurs fewer dependencies), and sprinkles some typed paths on there.
This commit is contained in:
parent
c34b8fe568
commit
65060868bc
@ -1,50 +1,85 @@
|
||||
{-# LANGUAGE DataKinds, FlexibleContexts, TypeFamilies, TypeApplications #-}
|
||||
{-# LANGUAGE DataKinds, FlexibleContexts, PackageImports, PartialTypeSignatures, TypeApplications, TypeFamilies #-}
|
||||
|
||||
module Main where
|
||||
|
||||
import Algebra.Graph
|
||||
import Control.Monad
|
||||
import Criterion.Main
|
||||
import Data.Abstract.Evaluatable
|
||||
import Data.Abstract.FreeVariables
|
||||
import Data.Blob
|
||||
import Data.Blob.IO (readBlobFromFile')
|
||||
import Data.Bifunctor
|
||||
import Data.Functor.Classes
|
||||
import "semantic" Data.Graph (Graph (..), topologicalSort)
|
||||
import Data.Graph.ControlFlowVertex
|
||||
import qualified Data.Language as Language
|
||||
import Data.Project
|
||||
import Data.Proxy
|
||||
import Data.Term
|
||||
import Gauge.Main
|
||||
import Parsing.Parser
|
||||
import Semantic.Config (defaultOptions)
|
||||
import Semantic.Task (withOptions)
|
||||
import Semantic.Util hiding (evalRubyProject, evalPythonProject, evaluateProject)
|
||||
import Semantic.Graph
|
||||
import Semantic.Task (SomeException, TaskSession (..), runTask, withOptions)
|
||||
import Semantic.Util hiding (evalPythonProject, evalRubyProject, evaluateProject)
|
||||
import Source.Loc
|
||||
import qualified System.Path as Path
|
||||
import System.Path ((</>))
|
||||
|
||||
-- Duplicating this stuff from Util to shut off the logging
|
||||
|
||||
callGraphProject' :: ( Language.SLanguage lang
|
||||
, Ord1 syntax
|
||||
, Declarations1 syntax
|
||||
, Evaluatable syntax
|
||||
, FreeVariables1 syntax
|
||||
, AccessControls1 syntax
|
||||
, HasPrelude lang
|
||||
, Functor syntax
|
||||
, VertexDeclarationWithStrategy (VertexDeclarationStrategy syntax) syntax syntax
|
||||
)
|
||||
=> TaskSession
|
||||
-> Proxy lang
|
||||
-> Parser (Term syntax Loc)
|
||||
-> Path.RelFile
|
||||
-> IO (Either String (Data.Graph.Graph ControlFlowVertex))
|
||||
callGraphProject' session proxy parser path = fmap (first show) . runTask session $ do
|
||||
blob <- readBlobFromFile' (fileForRelPath path)
|
||||
package <- fmap snd <$> parsePackage parser (Project (Path.toString (Path.takeDirectory path)) [blob] (Language.reflect proxy) [])
|
||||
modules <- topologicalSort <$> runImportGraphToModules proxy package
|
||||
runCallGraph proxy False modules package
|
||||
|
||||
callGraphProject proxy parser paths = withOptions defaultOptions $ \ config logger statter ->
|
||||
callGraphProject' (TaskSession config "" False logger statter) proxy parser paths
|
||||
|
||||
evalRubyProject = justEvaluating <=< evaluateProject (Proxy :: Proxy 'Language.Ruby) rubyParser
|
||||
evalPythonProject = justEvaluating <=< evaluateProject (Proxy :: Proxy 'Language.Python) pythonParser
|
||||
|
||||
evaluateProject proxy parser paths = withOptions defaultOptions $ \ config logger statter ->
|
||||
evaluateProject' (TaskConfig config logger statter) proxy parser paths
|
||||
evaluateProject proxy parser path = withOptions defaultOptions $ \ config logger statter ->
|
||||
evaluateProject' (TaskSession config "" False logger statter) proxy parser [Path.toString path]
|
||||
|
||||
-- We use `fmap show` to ensure that all the parts of the result of evaluation are
|
||||
-- evaluated themselves. While an NFData instance is the most morally correct way
|
||||
-- to do this, I'm reluctant to add NFData instances to every single datatype in the
|
||||
-- project—coercing the result into a string will suffice, though it throws off the
|
||||
-- memory allocation results a bit.
|
||||
pyEval :: FilePath -> Benchmarkable
|
||||
pyEval p = nfIO . evalPythonProject $ ["bench/bench-fixtures/python/" <> p]
|
||||
pyEval :: Path.RelFile -> Benchmarkable
|
||||
pyEval p = nfIO $ evalPythonProject (Path.relDir "bench/bench-fixtures/python" </> p)
|
||||
|
||||
rbEval :: FilePath -> Benchmarkable
|
||||
rbEval p = nfIO . evalRubyProject $ ["bench/bench-fixtures/ruby/" <> p]
|
||||
rbEval :: Path.RelFile -> Benchmarkable
|
||||
rbEval p = nfIO $ evalRubyProject (Path.relDir "bench/bench-fixtures/python" </> p)
|
||||
|
||||
pyCall :: FilePath -> Benchmarkable
|
||||
pyCall p = nfIO $ callGraphProject pythonParser (Proxy @'Language.Python) defaultOptions ["bench/bench-fixtures/python/" <> p]
|
||||
pyCall :: Path.RelFile -> Benchmarkable
|
||||
pyCall p = nfIO $ callGraphProject (Proxy @'Language.Python) pythonParser (Path.relDir "bench/bench-fixtures/python/" </> p)
|
||||
|
||||
rbCall :: FilePath -> Benchmarkable
|
||||
rbCall p = nfIO $ callGraphProject rubyParser (Proxy @'Language.Ruby) defaultOptions ["bench/bench-fixtures/ruby/" <> p]
|
||||
rbCall :: Path.RelFile -> Benchmarkable
|
||||
rbCall p = nfIO $ callGraphProject (Proxy @'Language.Ruby) rubyParser $ (Path.relDir "bench/bench-fixtures/ruby" </> p)
|
||||
|
||||
main :: IO ()
|
||||
main = defaultMain
|
||||
[ bgroup "python" [ bench "assignment" $ pyEval "simple-assignment.py"
|
||||
, bench "function def" $ pyEval "function-definition.py"
|
||||
, bench "if + function calls" $ pyEval "if-statement-functions.py"
|
||||
, bench "call graph" $ pyCall "if-statement-functions.py"
|
||||
[ bgroup "python" [ bench "assignment" . pyEval $ Path.relFile "simple-assignment.py"
|
||||
, bench "function def" . pyEval $ Path.relFile "function-definition.py"
|
||||
, bench "if + function calls" . pyCall . Path.relFile $ "if-statement-functions.py"
|
||||
, bench "call graph" $ pyCall . Path.relFile $ "if-statement-functions.py"
|
||||
]
|
||||
, bgroup "ruby" [ bench "assignment" $ rbEval "simple-assignment.rb"
|
||||
, bench "function def" $ rbEval "function-definition.rb"
|
||||
, bench "if + function calls" $ rbEval "if-statement-functions.rb"
|
||||
, bench "call graph" $ rbCall "if-statement-functions.rb"
|
||||
, bgroup "ruby" [ bench "assignment" . rbEval $ Path.relFile "simple-assignment.rb"
|
||||
, bench "function def" . rbEval . Path.relFile $ "function-definition.rb"
|
||||
, bench "if + function calls" . rbCall $ Path.relFile "if-statement-functions.rb"
|
||||
, bench "call graph" $ rbCall $ Path.relFile "if-statement-functions.rb"
|
||||
]
|
||||
]
|
||||
|
@ -408,14 +408,16 @@ test-suite parse-examples
|
||||
, tasty-hunit
|
||||
|
||||
benchmark evaluation
|
||||
import: haskell, executable-flags
|
||||
import: haskell, dependencies, executable-flags
|
||||
hs-source-dirs: bench/evaluation
|
||||
type: exitcode-stdio-1.0
|
||||
main-is: Main.hs
|
||||
ghc-options: -static
|
||||
build-depends: base
|
||||
, criterion ^>= 1.5
|
||||
, algebraic-graphs
|
||||
, gauge ^>= 0.2.5
|
||||
, semantic
|
||||
, semantic-source
|
||||
|
||||
source-repository head
|
||||
type: git
|
||||
|
@ -8,6 +8,7 @@ module Semantic.Util
|
||||
, evalRubyProject
|
||||
, evalTypeScriptProject
|
||||
, evaluateProject'
|
||||
, justEvaluating
|
||||
, mergeErrors
|
||||
, reassociate
|
||||
, parseFile
|
||||
|
Loading…
Reference in New Issue
Block a user