feat: gpt4 is not available for brains if there is no given openAiKey (#850)

* rename defineMaxToken

* use gpt-3.5-turbo instead of gpt-3.5-turbo-0613

* gpt4 not available if no open ai key
This commit is contained in:
ChloeMouret 2023-08-07 16:35:23 +02:00 committed by GitHub
parent 61cd0a6bde
commit e9ebeef72a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 42 additions and 48 deletions

View File

@ -16,7 +16,7 @@ class Brain(BaseModel):
name: Optional[str] = "Default brain" name: Optional[str] = "Default brain"
description: Optional[str] = "This is a description" description: Optional[str] = "This is a description"
status: Optional[str] = "private" status: Optional[str] = "private"
model: Optional[str] = "gpt-3.5-turbo-0613" model: Optional[str] = "gpt-3.5-turbo"
temperature: Optional[float] = 0.0 temperature: Optional[float] = 0.0
max_tokens: Optional[int] = 256 max_tokens: Optional[int] = 256
openai_api_key: Optional[str] = None openai_api_key: Optional[str] = None

View File

@ -17,7 +17,7 @@ class ChatMessage(BaseModel):
class ChatQuestion(BaseModel): class ChatQuestion(BaseModel):
model: str = "gpt-3.5-turbo-0613" model: str = "gpt-3.5-turbo"
question: str question: str
temperature: float = 0.0 temperature: float = 0.0
max_tokens: int = 256 max_tokens: int = 256

View File

@ -13,7 +13,7 @@ class CreateBrainProperties(BaseModel):
name: Optional[str] = "Default brain" name: Optional[str] = "Default brain"
description: Optional[str] = "This is a description" description: Optional[str] = "This is a description"
status: Optional[str] = "private" status: Optional[str] = "private"
model: Optional[str] = "gpt-3.5-turbo-0613" model: Optional[str] = "gpt-3.5-turbo"
temperature: Optional[float] = 0.0 temperature: Optional[float] = 0.0
max_tokens: Optional[int] = 256 max_tokens: Optional[int] = 256
openai_api_key: Optional[str] = None openai_api_key: Optional[str] = None

View File

@ -182,7 +182,7 @@ async def create_question_handler(
or not chat_question.max_tokens or not chat_question.max_tokens
): ):
# TODO: create ChatConfig class (pick config from brain or user or chat) and use it here # TODO: create ChatConfig class (pick config from brain or user or chat) and use it here
chat_question.model = chat_question.model or brain.model or "gpt-3.5-turbo-0613" chat_question.model = chat_question.model or brain.model or "gpt-3.5-turbo"
chat_question.temperature = chat_question.temperature or brain.temperature or 0 chat_question.temperature = chat_question.temperature or brain.temperature or 0
chat_question.max_tokens = chat_question.max_tokens or brain.max_tokens or 256 chat_question.max_tokens = chat_question.max_tokens or brain.max_tokens or 256
@ -254,7 +254,7 @@ async def create_stream_question_handler(
or not chat_question.max_tokens or not chat_question.max_tokens
): ):
# TODO: create ChatConfig class (pick config from brain or user or chat) and use it here # TODO: create ChatConfig class (pick config from brain or user or chat) and use it here
chat_question.model = chat_question.model or brain.model or "gpt-3.5-turbo-0613" chat_question.model = chat_question.model or brain.model or "gpt-3.5-turbo"
chat_question.temperature = chat_question.temperature or brain.temperature or 0 chat_question.temperature = chat_question.temperature or brain.temperature or 0
chat_question.max_tokens = chat_question.max_tokens or brain.max_tokens or 256 chat_question.max_tokens = chat_question.max_tokens or brain.max_tokens or 256

View File

@ -29,7 +29,7 @@ def test_create_brain(client, api_key):
payload = { payload = {
"name": random_brain_name, "name": random_brain_name,
"status": "public", "status": "public",
"model": "gpt-3.5-turbo-0613", "model": "gpt-3.5-turbo",
"temperature": 0, "temperature": 0,
"max_tokens": 256, "max_tokens": 256,
"file_sha1": "", "file_sha1": "",
@ -173,7 +173,7 @@ def test_set_as_default_brain_endpoint(client, api_key):
payload = { payload = {
"name": random_brain_name, "name": random_brain_name,
"status": "public", "status": "public",
"model": "gpt-3.5-turbo-0613", "model": "gpt-3.5-turbo",
"temperature": 0, "temperature": 0,
"max_tokens": 256, "max_tokens": 256,
} }

View File

@ -50,7 +50,7 @@ def test_create_chat_and_talk(client, api_key):
response = client.post( response = client.post(
f"/chat/{chat_id}/question?brain_id={default_brain_id}", f"/chat/{chat_id}/question?brain_id={default_brain_id}",
json={ json={
"model": "gpt-3.5-turbo-0613", "model": "gpt-3.5-turbo",
"question": "Hello, how are you?", "question": "Hello, how are you?",
"temperature": "0", "temperature": "0",
"max_tokens": "256", "max_tokens": "256",
@ -100,7 +100,7 @@ def test_create_chat_and_talk_with_no_brain(client, api_key):
response = client.post( response = client.post(
f"/chat/{chat_id}/question?brain_id=", f"/chat/{chat_id}/question?brain_id=",
json={ json={
"model": "gpt-3.5-turbo-0613", "model": "gpt-3.5-turbo",
"question": "Hello, how are you?", "question": "Hello, how are you?",
"temperature": "0", "temperature": "0",
"max_tokens": "256", "max_tokens": "256",

View File

@ -10,8 +10,11 @@ import Button from "@/lib/components/ui/Button";
import { Divider } from "@/lib/components/ui/Divider"; import { Divider } from "@/lib/components/ui/Divider";
import Field from "@/lib/components/ui/Field"; import Field from "@/lib/components/ui/Field";
import { TextArea } from "@/lib/components/ui/TextArea"; import { TextArea } from "@/lib/components/ui/TextArea";
import { models, paidModels } from "@/lib/context/BrainConfigProvider/types"; import {
import { defineMaxTokens } from "@/lib/helpers/defineMexTokens"; freeModels,
paidModels,
} from "@/lib/context/BrainConfigProvider/types";
import { defineMaxTokens } from "@/lib/helpers/defineMaxTokens";
import { PublicPrompts } from "./components/PublicPrompts/PublicPrompts"; import { PublicPrompts } from "./components/PublicPrompts/PublicPrompts";
import { useSettingsTab } from "./hooks/useSettingsTab"; import { useSettingsTab } from "./hooks/useSettingsTab";
@ -99,7 +102,7 @@ export const SettingsTab = ({ brainId }: SettingsTabProps): JSX.Element => {
{...register("model")} {...register("model")}
className="px-5 py-2 dark:bg-gray-700 bg-gray-200 rounded-md" className="px-5 py-2 dark:bg-gray-700 bg-gray-200 rounded-md"
> >
{(openAiKey !== undefined ? paidModels : models).map( {(openAiKey !== undefined ? paidModels : freeModels).map(
(availableModel) => ( (availableModel) => (
<option value={availableModel} key={availableModel}> <option value={availableModel} key={availableModel}>
{availableModel} {availableModel}

View File

@ -11,7 +11,7 @@ import { usePromptApi } from "@/lib/api/prompt/usePromptApi";
import { useBrainConfig } from "@/lib/context/BrainConfigProvider"; import { useBrainConfig } from "@/lib/context/BrainConfigProvider";
import { useBrainContext } from "@/lib/context/BrainProvider/hooks/useBrainContext"; import { useBrainContext } from "@/lib/context/BrainProvider/hooks/useBrainContext";
import { Brain } from "@/lib/context/BrainProvider/types"; import { Brain } from "@/lib/context/BrainProvider/types";
import { defineMaxTokens } from "@/lib/helpers/defineMexTokens"; import { defineMaxTokens } from "@/lib/helpers/defineMaxTokens";
import { useToast } from "@/lib/hooks"; import { useToast } from "@/lib/hooks";
type UseSettingsTabProps = { type UseSettingsTabProps = {
@ -317,7 +317,7 @@ export const useSettingsTab = ({ brainId }: UseSettingsTabProps) => {
return { return {
handleSubmit, handleSubmit,
register, register,
openAiKey, openAiKey: openAiKey === "" ? undefined : openAiKey,
model, model,
temperature, temperature,
maxTokens, maxTokens,

View File

@ -3,8 +3,8 @@ import { MdCheck, MdSettings } from "react-icons/md";
import Button from "@/lib/components/ui/Button"; import Button from "@/lib/components/ui/Button";
import { Modal } from "@/lib/components/ui/Modal"; import { Modal } from "@/lib/components/ui/Modal";
import { models } from "@/lib/context/BrainConfigProvider/types"; import { freeModels } from "@/lib/context/BrainConfigProvider/types";
import { defineMaxTokens } from "@/lib/helpers/defineMexTokens"; import { defineMaxTokens } from "@/lib/helpers/defineMaxTokens";
import { useConfigModal } from "./hooks/useConfigModal"; import { useConfigModal } from "./hooks/useConfigModal";
@ -56,7 +56,7 @@ export const ConfigModal = ({ chatId }: { chatId?: string }): JSX.Element => {
{...register("model")} {...register("model")}
className="px-5 py-2 dark:bg-gray-700 bg-gray-200 rounded-md" className="px-5 py-2 dark:bg-gray-700 bg-gray-200 rounded-md"
> >
{models.map((availableModel) => ( {freeModels.map((availableModel) => (
<option value={availableModel} key={availableModel}> <option value={availableModel} key={availableModel}>
{availableModel} {availableModel}
</option> </option>
@ -85,7 +85,7 @@ export const ConfigModal = ({ chatId }: { chatId?: string }): JSX.Element => {
<input <input
type="range" type="range"
min="10" min="10"
max={defineMaxTokens(model ?? "gpt-3.5-turbo-0613")} max={defineMaxTokens(model ?? "gpt-3.5-turbo")}
value={maxTokens} value={maxTokens}
{...register("maxTokens")} {...register("maxTokens")}
/> />

View File

@ -10,7 +10,7 @@ import {
import { useBrainConfig } from "@/lib/context/BrainConfigProvider"; import { useBrainConfig } from "@/lib/context/BrainConfigProvider";
import { useBrainContext } from "@/lib/context/BrainProvider/hooks/useBrainContext"; import { useBrainContext } from "@/lib/context/BrainProvider/hooks/useBrainContext";
import { ChatConfig } from "@/lib/context/ChatProvider/types"; import { ChatConfig } from "@/lib/context/ChatProvider/types";
import { defineMaxTokens } from "@/lib/helpers/defineMexTokens"; import { defineMaxTokens } from "@/lib/helpers/defineMaxTokens";
import { useToast } from "@/lib/hooks"; import { useToast } from "@/lib/hooks";
// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types // eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types

View File

@ -66,7 +66,7 @@ describe("useBrainApi", () => {
name: "Test Brain", name: "Test Brain",
description: "This is a description", description: "This is a description",
status: "public", status: "public",
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo",
temperature: 0.0, temperature: 0.0,
max_tokens: 256, max_tokens: 256,
openai_api_key: "123", openai_api_key: "123",
@ -217,7 +217,7 @@ describe("useBrainApi", () => {
name: "Test Brain", name: "Test Brain",
description: "This is a description", description: "This is a description",
status: "public", status: "public",
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo",
temperature: 0.0, temperature: 0.0,
max_tokens: 256, max_tokens: 256,
openai_api_key: "123", openai_api_key: "123",

View File

@ -7,8 +7,11 @@ import { PublicPrompts } from "@/app/brains-management/[brainId]/components/Brai
import Button from "@/lib/components/ui/Button"; import Button from "@/lib/components/ui/Button";
import Field from "@/lib/components/ui/Field"; import Field from "@/lib/components/ui/Field";
import { Modal } from "@/lib/components/ui/Modal"; import { Modal } from "@/lib/components/ui/Modal";
import { models, paidModels } from "@/lib/context/BrainConfigProvider/types"; import {
import { defineMaxTokens } from "@/lib/helpers/defineMexTokens"; freeModels,
paidModels,
} from "@/lib/context/BrainConfigProvider/types";
import { defineMaxTokens } from "@/lib/helpers/defineMaxTokens";
import { useAddBrainModal } from "./hooks/useAddBrainModal"; import { useAddBrainModal } from "./hooks/useAddBrainModal";
import { Divider } from "../ui/Divider"; import { Divider } from "../ui/Divider";
@ -84,7 +87,7 @@ export const AddBrainModal = (): JSX.Element => {
{...register("model")} {...register("model")}
className="px-5 py-2 dark:bg-gray-700 bg-gray-200 rounded-md" className="px-5 py-2 dark:bg-gray-700 bg-gray-200 rounded-md"
> >
{(openAiKey !== undefined ? paidModels : models).map( {(openAiKey !== undefined ? paidModels : freeModels).map(
(availableModel) => ( (availableModel) => (
<option value={availableModel} key={availableModel}> <option value={availableModel} key={availableModel}>
{availableModel} {availableModel}

View File

@ -8,7 +8,7 @@ import { useBrainApi } from "@/lib/api/brain/useBrainApi";
import { usePromptApi } from "@/lib/api/prompt/usePromptApi"; import { usePromptApi } from "@/lib/api/prompt/usePromptApi";
import { useBrainConfig } from "@/lib/context/BrainConfigProvider"; import { useBrainConfig } from "@/lib/context/BrainConfigProvider";
import { useBrainContext } from "@/lib/context/BrainProvider/hooks/useBrainContext"; import { useBrainContext } from "@/lib/context/BrainProvider/hooks/useBrainContext";
import { defineMaxTokens } from "@/lib/helpers/defineMexTokens"; import { defineMaxTokens } from "@/lib/helpers/defineMaxTokens";
import { useToast } from "@/lib/hooks"; import { useToast } from "@/lib/hooks";
// eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types // eslint-disable-next-line @typescript-eslint/explicit-module-boundary-types
@ -145,7 +145,7 @@ export const useAddBrainModal = () => {
setIsShareModalOpen, setIsShareModalOpen,
handleSubmit, handleSubmit,
register, register,
openAiKey, openAiKey: openAiKey === "" ? undefined : openAiKey,
model, model,
temperature, temperature,
maxTokens, maxTokens,

View File

@ -15,7 +15,7 @@ export const BrainConfigContext = createContext<
>(undefined); >(undefined);
const defaultBrainConfig: BrainConfig = { const defaultBrainConfig: BrainConfig = {
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo",
temperature: 0, temperature: 0,
maxTokens: 256, maxTokens: 256,
keepLocal: true, keepLocal: true,

View File

@ -13,7 +13,7 @@ export const BrainConfigProviderMock = ({
<BrainConfigContextMock.Provider <BrainConfigContextMock.Provider
value={{ value={{
config: { config: {
model: "gpt-3.5-turbo-0613", model: "gpt-3.5-turbo",
temperature: 0, temperature: 0,
maxTokens: 256, maxTokens: 256,
keepLocal: true, keepLocal: true,

View File

@ -21,19 +21,9 @@ export type BrainConfigContextType = {
resetConfig: () => void; resetConfig: () => void;
}; };
// export const openAiModels = ["gpt-3.5-turbo", "gpt-4"] as const; ## TODO activate GPT4 when not in demo mode export const openAiFreeModels = ["gpt-3.5-turbo", "gpt-3.5-turbo-16k"] as const;
export const openAiModels = [ export const openAiPaidModels = [...openAiFreeModels, "gpt-4"] as const;
"gpt-3.5-turbo",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
] as const;
export const openAiPaidModels = [
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-16k",
"gpt-4",
"gpt-4-0613",
] as const;
export const anthropicModels = [ export const anthropicModels = [
// "claude-v1", // "claude-v1",
@ -47,14 +37,14 @@ export const googleModels = [
] as const; // TODO activate when not in demo mode ] as const; // TODO activate when not in demo mode
// export const googleModels = [] as const; // export const googleModels = [] as const;
export const models = [ export const freeModels = [
...openAiModels, ...openAiFreeModels,
...anthropicModels, // ...anthropicModels,
...googleModels, // ...googleModels,
] as const; ] as const;
export const paidModels = [...openAiPaidModels] as const; export const paidModels = [...openAiPaidModels] as const;
export type PaidModels = (typeof paidModels)[number]; export type PaidModels = (typeof paidModels)[number];
export type Model = (typeof models)[number]; export type Model = (typeof freeModels)[number];

View File

@ -3,14 +3,12 @@ import { Model, PaidModels } from "../context/BrainConfigProvider/types";
export const defineMaxTokens = (model: Model | PaidModels): number => { export const defineMaxTokens = (model: Model | PaidModels): number => {
//At the moment is evaluating only models from OpenAI //At the moment is evaluating only models from OpenAI
switch (model) { switch (model) {
case "gpt-3.5-turbo-0613": case "gpt-3.5-turbo":
return 500; return 500;
case "gpt-3.5-turbo-16k": case "gpt-3.5-turbo-16k":
return 2000; return 2000;
case "gpt-4": case "gpt-4":
return 1000; return 1000;
case "gpt-4-0613":
return 100;
default: default:
return 250; return 250;
} }