From 302936bf7f73513692f789ba929998455bc17abe Mon Sep 17 00:00:00 2001 From: Martin Sosic Date: Wed, 28 Jun 2023 23:19:06 +0200 Subject: [PATCH] Added reporting of tokens usage. --- wasp-ai/src/client/pages/ResultPage.jsx | 25 +++++++++++++------ .../Wasp/Cli/Command/CreateNewProject/AI.hs | 2 +- waspc/run | 4 +-- waspc/src/Wasp/AI/CodeAgent.hs | 21 +++++++++++++--- waspc/src/Wasp/AI/GenerateNewProject.hs | 15 ++++++----- .../Wasp/AI/GenerateNewProject/Skeleton.hs | 8 +++--- waspc/src/Wasp/AI/OpenAI/ChatGPT.hs | 8 ++++-- 7 files changed, 56 insertions(+), 27 deletions(-) diff --git a/wasp-ai/src/client/pages/ResultPage.jsx b/wasp-ai/src/client/pages/ResultPage.jsx index 71cfdb4c4..107965269 100644 --- a/wasp-ai/src/client/pages/ResultPage.jsx +++ b/wasp-ai/src/client/pages/ResultPage.jsx @@ -125,23 +125,32 @@ export const ResultPage = () => { } function getEmoji(log) { - // log.toLowerCase().includes("generated") ? "✅ " : "⌛️ " - if (log.toLowerCase().includes("generated")) { - return "✅ "; + if ( + log.toLowerCase().includes("generated") || + log.toLowerCase().includes("fixed") || + log.toLowerCase().includes("updated") + ) { + return "✅"; } if (log.toLowerCase().includes("done!")) { - return "🎉 "; + return "🎉"; } if ( log.toLowerCase().includes("error") || log.toLowerCase().includes("fail") ) { - return "❌ "; + return "❌"; } if (log.toLowerCase().includes("warning")) { - return "⚠️ "; + return "⚠️"; } - return "⌛️ "; + if (log.toLowerCase().includes("tokens usage")) { + return "📊"; + } + if (log.toLowerCase().endsWith("...")) { + return "⌛️"; + } + return "🤖"; } return ( @@ -197,7 +206,7 @@ export const ResultPage = () => { : (previewLogsCount - i) * (1 / previewLogsCount), }} > - {getEmoji(log)} + {getEmoji(log) + " "} {log} ))} diff --git a/waspc/cli/src/Wasp/Cli/Command/CreateNewProject/AI.hs b/waspc/cli/src/Wasp/Cli/Command/CreateNewProject/AI.hs index ffce6d79a..3d4f31203 100644 --- a/waspc/cli/src/Wasp/Cli/Command/CreateNewProject/AI.hs +++ b/waspc/cli/src/Wasp/Cli/Command/CreateNewProject/AI.hs @@ -103,7 +103,7 @@ createNewProjectNonInteractiveToStdout projectName appDescription = do generateNewProject :: CA.CodeAgentConfig -> NewProjectAppName -> String -> IO () generateNewProject codeAgentConfig (NewProjectAppName appName) appDescription = do waspProjectSkeletonFiles <- readWaspProjectSkeletonFiles - CA.runCodeAgent codeAgentConfig $ + CA.runCodeAgent codeAgentConfig $ do GNP.generateNewProject (newProjectDetails appName appDescription) waspProjectSkeletonFiles getOpenAIApiKey :: Command OpenAIApiKey diff --git a/waspc/run b/waspc/run index d9dcab82b..73a82a5c4 100755 --- a/waspc/run +++ b/waspc/run @@ -92,8 +92,8 @@ print_usage () { "Runs the code formatter and formats the code in place." print_usage_cmd "module-graph" \ "Creates graph of modules in the project. Needs graphmod (available on hackage) and graphviz (your os distribution) installed." - print_usage_cmd "wasp-deploy:compile" \ - "Compiles the TS project under packages/deploy" + print_usage_cmd "wasp-packages:compile" \ + "Compiles the TS projects under packages/." } exitStatusToString () { diff --git a/waspc/src/Wasp/AI/CodeAgent.hs b/waspc/src/Wasp/AI/CodeAgent.hs index d7c92bd42..1d00e0de7 100644 --- a/waspc/src/Wasp/AI/CodeAgent.hs +++ b/waspc/src/Wasp/AI/CodeAgent.hs @@ -10,6 +10,7 @@ module Wasp.AI.CodeAgent getFile, getAllFiles, queryChatGPT, + getTotalTokensUsage, ) where @@ -35,7 +36,7 @@ runCodeAgent :: CodeAgentConfig -> CodeAgent a -> IO a runCodeAgent config codeAgent = fst <$> (_unCodeAgent codeAgent `runReaderT` config) `runStateT` initialState where - initialState = CodeAgentState {_files = H.empty} + initialState = CodeAgentState {_files = H.empty, _usage = []} writeToLog :: Text -> CodeAgent () writeToLog msg = asks _writeLog >>= \f -> liftIO $ f msg @@ -56,12 +57,24 @@ getFile path = gets $ H.lookup path . _files getAllFiles :: CodeAgent [(FilePath, Text)] getAllFiles = gets $ H.toList . _files --- TODO: Make it so that if ChatGPT replies with being too busy, we try again. queryChatGPT :: ChatGPTParams -> [ChatMessage] -> CodeAgent Text queryChatGPT params messages = do key <- asks _openAIApiKey - liftIO $ ChatGPT.queryChatGPT key params messages + chatResponse <- liftIO $ ChatGPT.queryChatGPT key params messages + modify $ \s -> s {_usage = _usage s <> [ChatGPT.usage chatResponse]} + return $ ChatGPT.getChatResponseContent chatResponse + +type NumTokens = Int + +-- | Returns total tokens usage: (, ). +getTotalTokensUsage :: CodeAgent (NumTokens, NumTokens) +getTotalTokensUsage = do + usage <- gets _usage + let numPromptTokens = sum $ ChatGPT.prompt_tokens <$> usage + let numCompletionTokens = sum $ ChatGPT.completion_tokens <$> usage + return (numPromptTokens, numCompletionTokens) data CodeAgentState = CodeAgentState - { _files :: H.HashMap FilePath Text -- TODO: Name this "cacheFiles" maybe? + { _files :: H.HashMap FilePath Text, -- TODO: Name this "cacheFiles" maybe? + _usage :: [ChatGPT.ChatResponseUsage] } diff --git a/waspc/src/Wasp/AI/GenerateNewProject.hs b/waspc/src/Wasp/AI/GenerateNewProject.hs index 7abb29abe..9187416c5 100644 --- a/waspc/src/Wasp/AI/GenerateNewProject.hs +++ b/waspc/src/Wasp/AI/GenerateNewProject.hs @@ -9,7 +9,8 @@ import Data.Text (Text) import qualified Data.Text as T import NeatInterpolation (trimming) import StrongPath (File', Path, Rel, System) -import Wasp.AI.CodeAgent (CodeAgent, writeToLog) +import Text.Printf (printf) +import Wasp.AI.CodeAgent (CodeAgent, getTotalTokensUsage, writeToLog) import Wasp.AI.GenerateNewProject.Common (NewProjectDetails (..)) import Wasp.AI.GenerateNewProject.Entity (writeEntitiesToWaspFile) import Wasp.AI.GenerateNewProject.Operation (OperationType (..), generateAndWriteOperation, getOperationJsFilePath) @@ -42,7 +43,7 @@ generateNewProject newProjectDetails waspProjectSkeletonFiles = do writeToLog $ "Plan generated!\n" <> summarizePlan plan writeEntitiesToWaspFile waspFilePath (Plan.entities plan) - writeToLog "Added entities to wasp file." + writeToLog "Updated wasp file with entities." writeToLog "Generating actions..." actions <- @@ -72,11 +73,13 @@ generateNewProject newProjectDetails waspProjectSkeletonFiles = do writeToLog $ T.pack $ "Fixed NodeJS operations file '" <> opFp <> "'." writeToLog "NodeJS operations files fixed." - -- TODO: what about having additional step here that goes through all the files once again and - -- fixes any stuff in them (Wasp, JS files)? REPL? + -- TODO: Also try fixing Pages / JSX files. - -- TODO: Consider going through all the prompts and trying to reduce their length, - -- to make sure we are not droping anyting out of context + that we are not wasteful. + (promptTokensUsed, completionTokensUsed) <- getTotalTokensUsage + writeToLog $ + T.pack $ + printf "Total tokens usage: ~%.1fk" $ + fromIntegral (promptTokensUsed + completionTokensUsed) / (1000 :: Double) writeToLog "Done!" where diff --git a/waspc/src/Wasp/AI/GenerateNewProject/Skeleton.hs b/waspc/src/Wasp/AI/GenerateNewProject/Skeleton.hs index 7320e1acf..e27ee1113 100644 --- a/waspc/src/Wasp/AI/GenerateNewProject/Skeleton.hs +++ b/waspc/src/Wasp/AI/GenerateNewProject/Skeleton.hs @@ -81,17 +81,17 @@ generateBaseWaspFile newProjectDetails = ((path, content), planRules) route LoginRoute { path: "/login", to: LoginPage } page LoginPage { - component: import Login from "@client/Login.jsx" + component: import Login from "@client/pages/Login.jsx" } route SignupRoute { path: "/signup", to: SignupPage } page SignupPage { - component: import Signup from "@client/Signup.jsx" + component: import Signup from "@client/pages/Signup.jsx" } |] generateLoginJsPage :: File generateLoginJsPage = - ( "src/client/Login.jsx", + ( "src/client/pages/Login.jsx", [trimming| import React from 'react'; import { LoginForm } from '@wasp/auth/forms/Login'; @@ -109,7 +109,7 @@ generateLoginJsPage = generateSignupJsPage :: File generateSignupJsPage = - ( "src/client/Signup.jsx", + ( "src/client/pages/Signup.jsx", [trimming| import React from 'react'; import { SignupForm } from '@wasp/auth/forms/Signup'; diff --git a/waspc/src/Wasp/AI/OpenAI/ChatGPT.hs b/waspc/src/Wasp/AI/OpenAI/ChatGPT.hs index 195c90250..32f3a1765 100644 --- a/waspc/src/Wasp/AI/OpenAI/ChatGPT.hs +++ b/waspc/src/Wasp/AI/OpenAI/ChatGPT.hs @@ -9,6 +9,7 @@ module Wasp.AI.OpenAI.ChatGPT ChatResponseChoice (..), ChatMessage (..), ChatRole (..), + getChatResponseContent, ) where @@ -26,7 +27,7 @@ import UnliftIO.Exception (catch, throwIO) import Wasp.AI.OpenAI (OpenAIApiKey) import qualified Wasp.Util.IO.Retry as R -queryChatGPT :: OpenAIApiKey -> ChatGPTParams -> [ChatMessage] -> IO Text +queryChatGPT :: OpenAIApiKey -> ChatGPTParams -> [ChatMessage] -> IO ChatResponse queryChatGPT apiKey params requestMessages = do let reqBodyJson = Aeson.object $ @@ -59,7 +60,7 @@ queryChatGPT apiKey params requestMessages = do ) $ return () - return $ content $ message $ head $ choices chatResponse + return chatResponse where secondsToMicroSeconds :: Int -> Int secondsToMicroSeconds = (* 1000000) @@ -76,6 +77,9 @@ queryChatGPT apiKey params requestMessages = do ) >>= either throwIO pure +getChatResponseContent :: ChatResponse -> Text +getChatResponseContent = content . message . head . choices + data ChatGPTParams = ChatGPTParams { _model :: !Model, _temperature :: !(Maybe Float)