Updated GPT models used in Wasp AI + improved Mage install instructions a bit.

This commit is contained in:
Martin Sosic 2024-02-14 16:20:48 +01:00
parent dc414f7cd1
commit 0b3ed2a11b
5 changed files with 23 additions and 22 deletions

View File

@ -561,7 +561,7 @@ export default function RunTheAppModal({ disabled, onDownloadZip }) {
<div className="mt-6 bg-slate-100 rounded-lg p-4 text-base text-slate-800">
<h2 className="font-bold flex items-center space-x-1">
<span>1. Install Wasp CLI</span>
<span>1. Install Wasp CLI (Linux / Mac / Win+WSL)</span>
<a
href="https://wasp-lang.dev/docs/quick-start#installation-1"
target="blank"

View File

@ -59,9 +59,9 @@ createNewProjectInteractiveOnDisk waspProjectDir appName = do
(Just "Ok results. Cheap and fast. Best cost/benefit ratio.")
(ChatGPT.GPT_4, ChatGPT.GPT_3_5_turbo),
Interactive.Option
"gpt-4 (planning) + gpt-4-1106-preview (coding)"
"gpt-4 (planning) + gpt-4-turbo-preview (coding)"
(Just "Possibly better results, but somewhat slower and somewhat more expensive (~2-3x).")
(ChatGPT.GPT_4, ChatGPT.GPT_4_1106_Preview),
(ChatGPT.GPT_4, ChatGPT.GPT_4_turbo_Preview),
Interactive.Option
"gpt-4 (planning + coding)"
(Just "Best results, but quite slower and quite more expensive (~5x).")

View File

@ -69,8 +69,7 @@ runCodeAgent config codeAgent =
initialState =
CodeAgentState
{ _files = H.empty,
_usage = [],
_isGpt4Available = Nothing
_usage = []
}
shortenWithEllipsisTo maxLen text =
@ -141,6 +140,5 @@ getTotalTokensUsage = do
data CodeAgentState = CodeAgentState
{ _files :: !(H.HashMap FilePath Text), -- TODO: Name this "cacheFiles" maybe?
_usage :: ![ChatGPT.ChatResponseUsage],
_isGpt4Available :: !(Maybe Bool)
_usage :: ![ChatGPT.ChatResponseUsage]
}

View File

@ -153,7 +153,7 @@ codingChatGPTParams projectDetails =
GPT._temperature = Just $ fromMaybe 0.7 (projectDefaultGptTemperature $ _projectConfig projectDetails)
}
where
defaultCodingGptModel = GPT.GPT_3_5_turbo_0613
defaultCodingGptModel = GPT.GPT_3_5_turbo
planningChatGPTParams :: NewProjectDetails -> ChatGPTParams
planningChatGPTParams projectDetails =
@ -162,7 +162,7 @@ planningChatGPTParams projectDetails =
GPT._temperature = Just $ fromMaybe 0.7 (projectDefaultGptTemperature $ _projectConfig projectDetails)
}
where
defaultPlanningGptModel = GPT.GPT_4_0613
defaultPlanningGptModel = GPT.GPT_4
fixingChatGPTParams :: ChatGPTParams -> ChatGPTParams
fixingChatGPTParams params = params {GPT._temperature = subtract 0.2 <$> GPT._temperature params}

View File

@ -86,17 +86,20 @@ data ChatGPTParams = ChatGPTParams
}
deriving (Show)
-- TODO: There are some more data models there but for now we went with these core ones.
data Model
= GPT_3_5_turbo_1106
| GPT_3_5_turbo
| GPT_3_5_turbo_16k
| GPT_3_5_turbo_0613
| GPT_3_5_turbo_16k_0613
= --
GPT_3_5_turbo -- Alias model
| GPT_3_5_turbo_0125
| GPT_3_5_turbo_1106
| --
GPT_4_turbo_Preview -- Alias model
| GPT_4_0125_Preview
| GPT_4_1106_Preview
| GPT_4
| GPT_4_32k
| --
GPT_4 -- Alias model
| GPT_4_0613
| --
GPT_4_32k -- Alias model
| GPT_4_32k_0613
deriving (Eq, Bounded, Enum)
@ -105,15 +108,15 @@ instance Show Model where
modelOpenAiId :: Model -> String
modelOpenAiId = \case
GPT_3_5_turbo_1106 -> "gpt-3.5-turbo-1106"
GPT_3_5_turbo -> "gpt-3.5-turbo"
GPT_3_5_turbo_16k -> "gpt-3.5-turbo-16k"
GPT_3_5_turbo_0613 -> "gpt-3.5-turbo-0613"
GPT_3_5_turbo_16k_0613 -> "gpt-3.5-turbo-16k-0613"
GPT_3_5_turbo_0125 -> "gpt-3.5-turbo-0125"
GPT_3_5_turbo_1106 -> "gpt-3.5-turbo-1106"
GPT_4_turbo_Preview -> "gpt-4-turbo-preview"
GPT_4_0125_Preview -> "gpt-4-0125-preview"
GPT_4_1106_Preview -> "gpt-4-1106-preview"
GPT_4 -> "gpt-4"
GPT_4_32k -> "gpt-4-32k"
GPT_4_0613 -> "gpt-4-0613"
GPT_4_32k -> "gpt-4-32k"
GPT_4_32k_0613 -> "gpt-4-32k-0613"
instance FromJSON Model where