2024-01-23 04:37:45 +03:00
|
|
|
import { Model, PaidModels } from "../types/BrainConfig";
|
2023-07-25 13:08:08 +03:00
|
|
|
|
2023-11-15 14:42:19 +03:00
|
|
|
export const defineMaxTokens = (
|
|
|
|
model: Model | PaidModels | undefined
|
|
|
|
): number => {
|
2023-07-25 13:08:08 +03:00
|
|
|
//At the moment is evaluating only models from OpenAI
|
|
|
|
switch (model) {
|
2023-08-07 17:35:23 +03:00
|
|
|
case "gpt-3.5-turbo":
|
2023-11-22 21:20:28 +03:00
|
|
|
return 2000;
|
2024-02-06 08:02:46 +03:00
|
|
|
case "gpt-3.5-turbo-0125":
|
2023-11-22 21:20:28 +03:00
|
|
|
return 2000;
|
2023-07-25 13:08:08 +03:00
|
|
|
case "gpt-3.5-turbo-16k":
|
2023-11-01 10:52:49 +03:00
|
|
|
return 4000;
|
2023-07-25 13:08:08 +03:00
|
|
|
case "gpt-4":
|
2023-11-01 10:52:49 +03:00
|
|
|
return 4000;
|
2024-02-06 08:02:46 +03:00
|
|
|
case "gpt-4-0125-preview":
|
|
|
|
return 4000;
|
2024-03-20 02:15:17 +03:00
|
|
|
case "mistral/mistral-small":
|
|
|
|
return 1000;
|
|
|
|
case "mistral/mistral-medium":
|
|
|
|
return 2000;
|
2024-03-20 02:56:45 +03:00
|
|
|
case "mistral/mistral-large-latest":
|
2024-03-20 02:15:17 +03:00
|
|
|
return 2000;
|
2023-07-25 13:08:08 +03:00
|
|
|
default:
|
2023-11-22 10:47:51 +03:00
|
|
|
return 1000;
|
2023-07-25 13:08:08 +03:00
|
|
|
}
|
|
|
|
};
|