Added reporting of tokens usage.

This commit is contained in:
Martin Sosic 2023-06-28 23:19:06 +02:00
parent 08c235d12e
commit 302936bf7f
7 changed files with 56 additions and 27 deletions

View File

@ -125,23 +125,32 @@ export const ResultPage = () => {
}
function getEmoji(log) {
// log.toLowerCase().includes("generated") ? " " : " "
if (log.toLowerCase().includes("generated")) {
return "✅ ";
if (
log.toLowerCase().includes("generated") ||
log.toLowerCase().includes("fixed") ||
log.toLowerCase().includes("updated")
) {
return "✅";
}
if (log.toLowerCase().includes("done!")) {
return "🎉 ";
return "🎉";
}
if (
log.toLowerCase().includes("error") ||
log.toLowerCase().includes("fail")
) {
return "❌ ";
return "❌";
}
if (log.toLowerCase().includes("warning")) {
return "⚠️ ";
return "⚠️";
}
return "⌛️ ";
if (log.toLowerCase().includes("tokens usage")) {
return "📊";
}
if (log.toLowerCase().endsWith("...")) {
return "⌛️";
}
return "🤖";
}
return (
@ -197,7 +206,7 @@ export const ResultPage = () => {
: (previewLogsCount - i) * (1 / previewLogsCount),
}}
>
{getEmoji(log)}
{getEmoji(log) + " "}
{log}
</pre>
))}

View File

@ -103,7 +103,7 @@ createNewProjectNonInteractiveToStdout projectName appDescription = do
generateNewProject :: CA.CodeAgentConfig -> NewProjectAppName -> String -> IO ()
generateNewProject codeAgentConfig (NewProjectAppName appName) appDescription = do
waspProjectSkeletonFiles <- readWaspProjectSkeletonFiles
CA.runCodeAgent codeAgentConfig $
CA.runCodeAgent codeAgentConfig $ do
GNP.generateNewProject (newProjectDetails appName appDescription) waspProjectSkeletonFiles
getOpenAIApiKey :: Command OpenAIApiKey

View File

@ -92,8 +92,8 @@ print_usage () {
"Runs the code formatter and formats the code in place."
print_usage_cmd "module-graph" \
"Creates graph of modules in the project. Needs graphmod (available on hackage) and graphviz (your os distribution) installed."
print_usage_cmd "wasp-deploy:compile" \
"Compiles the TS project under packages/deploy"
print_usage_cmd "wasp-packages:compile" \
"Compiles the TS projects under packages/."
}
exitStatusToString () {

View File

@ -10,6 +10,7 @@ module Wasp.AI.CodeAgent
getFile,
getAllFiles,
queryChatGPT,
getTotalTokensUsage,
)
where
@ -35,7 +36,7 @@ runCodeAgent :: CodeAgentConfig -> CodeAgent a -> IO a
runCodeAgent config codeAgent =
fst <$> (_unCodeAgent codeAgent `runReaderT` config) `runStateT` initialState
where
initialState = CodeAgentState {_files = H.empty}
initialState = CodeAgentState {_files = H.empty, _usage = []}
writeToLog :: Text -> CodeAgent ()
writeToLog msg = asks _writeLog >>= \f -> liftIO $ f msg
@ -56,12 +57,24 @@ getFile path = gets $ H.lookup path . _files
getAllFiles :: CodeAgent [(FilePath, Text)]
getAllFiles = gets $ H.toList . _files
-- TODO: Make it so that if ChatGPT replies with being too busy, we try again.
queryChatGPT :: ChatGPTParams -> [ChatMessage] -> CodeAgent Text
queryChatGPT params messages = do
key <- asks _openAIApiKey
liftIO $ ChatGPT.queryChatGPT key params messages
chatResponse <- liftIO $ ChatGPT.queryChatGPT key params messages
modify $ \s -> s {_usage = _usage s <> [ChatGPT.usage chatResponse]}
return $ ChatGPT.getChatResponseContent chatResponse
type NumTokens = Int
-- | Returns total tokens usage: (<num_prompt_tokens>, <num_completion_tokens>).
getTotalTokensUsage :: CodeAgent (NumTokens, NumTokens)
getTotalTokensUsage = do
usage <- gets _usage
let numPromptTokens = sum $ ChatGPT.prompt_tokens <$> usage
let numCompletionTokens = sum $ ChatGPT.completion_tokens <$> usage
return (numPromptTokens, numCompletionTokens)
data CodeAgentState = CodeAgentState
{ _files :: H.HashMap FilePath Text -- TODO: Name this "cacheFiles" maybe?
{ _files :: H.HashMap FilePath Text, -- TODO: Name this "cacheFiles" maybe?
_usage :: [ChatGPT.ChatResponseUsage]
}

View File

@ -9,7 +9,8 @@ import Data.Text (Text)
import qualified Data.Text as T
import NeatInterpolation (trimming)
import StrongPath (File', Path, Rel, System)
import Wasp.AI.CodeAgent (CodeAgent, writeToLog)
import Text.Printf (printf)
import Wasp.AI.CodeAgent (CodeAgent, getTotalTokensUsage, writeToLog)
import Wasp.AI.GenerateNewProject.Common (NewProjectDetails (..))
import Wasp.AI.GenerateNewProject.Entity (writeEntitiesToWaspFile)
import Wasp.AI.GenerateNewProject.Operation (OperationType (..), generateAndWriteOperation, getOperationJsFilePath)
@ -42,7 +43,7 @@ generateNewProject newProjectDetails waspProjectSkeletonFiles = do
writeToLog $ "Plan generated!\n" <> summarizePlan plan
writeEntitiesToWaspFile waspFilePath (Plan.entities plan)
writeToLog "Added entities to wasp file."
writeToLog "Updated wasp file with entities."
writeToLog "Generating actions..."
actions <-
@ -72,11 +73,13 @@ generateNewProject newProjectDetails waspProjectSkeletonFiles = do
writeToLog $ T.pack $ "Fixed NodeJS operations file '" <> opFp <> "'."
writeToLog "NodeJS operations files fixed."
-- TODO: what about having additional step here that goes through all the files once again and
-- fixes any stuff in them (Wasp, JS files)? REPL?
-- TODO: Also try fixing Pages / JSX files.
-- TODO: Consider going through all the prompts and trying to reduce their length,
-- to make sure we are not droping anyting out of context + that we are not wasteful.
(promptTokensUsed, completionTokensUsed) <- getTotalTokensUsage
writeToLog $
T.pack $
printf "Total tokens usage: ~%.1fk" $
fromIntegral (promptTokensUsed + completionTokensUsed) / (1000 :: Double)
writeToLog "Done!"
where

View File

@ -81,17 +81,17 @@ generateBaseWaspFile newProjectDetails = ((path, content), planRules)
route LoginRoute { path: "/login", to: LoginPage }
page LoginPage {
component: import Login from "@client/Login.jsx"
component: import Login from "@client/pages/Login.jsx"
}
route SignupRoute { path: "/signup", to: SignupPage }
page SignupPage {
component: import Signup from "@client/Signup.jsx"
component: import Signup from "@client/pages/Signup.jsx"
}
|]
generateLoginJsPage :: File
generateLoginJsPage =
( "src/client/Login.jsx",
( "src/client/pages/Login.jsx",
[trimming|
import React from 'react';
import { LoginForm } from '@wasp/auth/forms/Login';
@ -109,7 +109,7 @@ generateLoginJsPage =
generateSignupJsPage :: File
generateSignupJsPage =
( "src/client/Signup.jsx",
( "src/client/pages/Signup.jsx",
[trimming|
import React from 'react';
import { SignupForm } from '@wasp/auth/forms/Signup';

View File

@ -9,6 +9,7 @@ module Wasp.AI.OpenAI.ChatGPT
ChatResponseChoice (..),
ChatMessage (..),
ChatRole (..),
getChatResponseContent,
)
where
@ -26,7 +27,7 @@ import UnliftIO.Exception (catch, throwIO)
import Wasp.AI.OpenAI (OpenAIApiKey)
import qualified Wasp.Util.IO.Retry as R
queryChatGPT :: OpenAIApiKey -> ChatGPTParams -> [ChatMessage] -> IO Text
queryChatGPT :: OpenAIApiKey -> ChatGPTParams -> [ChatMessage] -> IO ChatResponse
queryChatGPT apiKey params requestMessages = do
let reqBodyJson =
Aeson.object $
@ -59,7 +60,7 @@ queryChatGPT apiKey params requestMessages = do
)
$ return ()
return $ content $ message $ head $ choices chatResponse
return chatResponse
where
secondsToMicroSeconds :: Int -> Int
secondsToMicroSeconds = (* 1000000)
@ -76,6 +77,9 @@ queryChatGPT apiKey params requestMessages = do
)
>>= either throwIO pure
getChatResponseContent :: ChatResponse -> Text
getChatResponseContent = content . message . head . choices
data ChatGPTParams = ChatGPTParams
{ _model :: !Model,
_temperature :: !(Maybe Float)