mirror of
https://github.com/StanGirard/quivr.git
synced 2024-09-17 16:07:35 +03:00
feat: 🎸 tokens (#1678)
increased default value to 1000 # Description Please include a summary of the changes and the related issue. Please also include relevant motivation and context. ## Checklist before requesting a review Please delete options that are not relevant. - [ ] My code follows the style guidelines of this project - [ ] I have performed a self-review of my code - [ ] I have commented hard-to-understand areas - [ ] I have ideally added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] Any dependent changes have been merged ## Screenshots (if appropriate):
This commit is contained in:
parent
f65e2f07e5
commit
1a4c6c8741
@ -113,7 +113,7 @@ class APIBrainQA(
|
||||
content = chunk.choices[0].delta.content
|
||||
yield content
|
||||
else: # pragma: no cover
|
||||
yield "**Response too long, truncating...**"
|
||||
yield "**...**"
|
||||
break
|
||||
|
||||
async def generate_stream(self, chat_id: UUID, question: ChatQuestion):
|
||||
|
@ -33,7 +33,6 @@ export const AddBrainConfig = ({
|
||||
const {
|
||||
isShareModalOpen,
|
||||
setIsShareModalOpen,
|
||||
temperature,
|
||||
maxTokens,
|
||||
model,
|
||||
isPending,
|
||||
@ -134,20 +133,7 @@ export const AddBrainConfig = ({
|
||||
</select>
|
||||
</fieldset>
|
||||
|
||||
<fieldset className="w-full flex mt-4">
|
||||
<label className="flex-1" htmlFor="temp">
|
||||
{t("temperature", { ns: "config" })}: {temperature}
|
||||
</label>
|
||||
<input
|
||||
id="temp"
|
||||
type="range"
|
||||
min="0"
|
||||
max="1"
|
||||
step="0.01"
|
||||
value={temperature}
|
||||
{...register("temperature")}
|
||||
/>
|
||||
</fieldset>
|
||||
|
||||
<fieldset className="w-full flex mt-4">
|
||||
<label className="flex-1" htmlFor="tokens">
|
||||
{t("maxTokens", { ns: "config" })}: {maxTokens}
|
||||
|
@ -4,7 +4,7 @@ import { BrainConfig } from "../types/brainConfig";
|
||||
export const addBrainDefaultValues: CreateBrainInput = {
|
||||
model: "gpt-3.5-turbo",
|
||||
temperature: 0,
|
||||
max_tokens: 500,
|
||||
max_tokens: 1000,
|
||||
prompt_id: undefined,
|
||||
status: "private",
|
||||
name: "",
|
||||
@ -15,7 +15,7 @@ export const addBrainDefaultValues: CreateBrainInput = {
|
||||
export const defaultBrainConfig: BrainConfig = {
|
||||
model: "gpt-3.5-turbo",
|
||||
temperature: 0,
|
||||
maxTokens: 500,
|
||||
maxTokens: 1000,
|
||||
keepLocal: true,
|
||||
anthropicKey: undefined,
|
||||
backendUrl: undefined,
|
||||
|
@ -7,11 +7,13 @@ export const defineMaxTokens = (
|
||||
switch (model) {
|
||||
case "gpt-3.5-turbo":
|
||||
return 1000;
|
||||
case "gpt-3.5-turbo-1106":
|
||||
return 1000;
|
||||
case "gpt-3.5-turbo-16k":
|
||||
return 4000;
|
||||
case "gpt-4":
|
||||
return 4000;
|
||||
default:
|
||||
return 500;
|
||||
return 1000;
|
||||
}
|
||||
};
|
||||
|
@ -34,7 +34,7 @@ export type BrainConfigContextType = {
|
||||
config: BrainConfig;
|
||||
};
|
||||
|
||||
export const openAiFreeModels = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"] as const;
|
||||
export const openAiFreeModels = ["gpt-3.5-turbo","gpt-3.5-turbo-1106", "gpt-3.5-turbo-16k"] as const;
|
||||
|
||||
export const openAiPaidModels = [...openAiFreeModels, "gpt-4"] as const;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user