fix: 🐛 models (#1973)

fixed when model is not choosen

# Description

Please include a summary of the changes and the related issue. Please
also include relevant motivation and context.

## Checklist before requesting a review

Please delete options that are not relevant.

- [ ] My code follows the style guidelines of this project
- [ ] I have performed a self-review of my code
- [ ] I have commented hard-to-understand areas
- [ ] I have ideally added tests that prove my fix is effective or that
my feature works
- [ ] New and existing unit tests pass locally with my changes
- [ ] Any dependent changes have been merged

## Screenshots (if appropriate):
This commit is contained in:
Stan Girard 2024-01-04 12:31:17 +01:00 committed by GitHub
parent 9a041e722b
commit 07563bd079
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 17 additions and 17 deletions

View File

@ -190,7 +190,7 @@ class CompositeBrainQA(
messages.append({"role": "user", "content": question.question})
response = completion(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-1106",
messages=messages,
tools=tools,
tool_choice="auto",
@ -312,7 +312,7 @@ class CompositeBrainQA(
)
response_after_tools_answers = completion(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-1106",
messages=messages,
tools=tools,
tool_choice="auto",
@ -439,7 +439,7 @@ class CompositeBrainQA(
messages.append({"role": "user", "content": question.question})
initial_response = completion(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-1106",
stream=True,
messages=messages,
tools=tools,
@ -552,7 +552,7 @@ class CompositeBrainQA(
messages.append({"role": "system", "content": PROMPT_2})
response_after_tools_answers = completion(
model="gpt-3.5-turbo",
model="gpt-3.5-turbo-1106",
messages=messages,
tools=tools,
tool_choice="auto",

View File

@ -31,7 +31,7 @@ def test_create_brain(client, api_key):
payload = {
"name": random_brain_name,
"status": "public",
"model": "gpt-3.5-turbo",
"model": "gpt-3.5-turbo-1106",
"temperature": 0,
"max_tokens": 256,
"brain_type": "doc",
@ -175,7 +175,7 @@ def test_set_as_default_brain_endpoint(client, api_key):
payload = {
"name": random_brain_name,
"status": "public",
"model": "gpt-3.5-turbo",
"model": "gpt-3.5-turbo-1106",
"temperature": 0,
"max_tokens": 256,
}
@ -228,7 +228,7 @@ def create_public_brain_retrieve_and_then_delete(client, api_key):
payload = {
"name": random_brain_name,
"status": "public",
"model": "gpt-3.5-turbo",
"model": "gpt-3.5-turbo-1106",
"temperature": 0,
"max_tokens": 256,
"brain_type": "doc",

View File

@ -13,7 +13,7 @@ models_supporting_function_calls = [
"gpt-4",
"gpt-4-1106-preview",
"gpt-4-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0613",
]

View File

@ -126,7 +126,7 @@ async def create_question_handler(
chat_instance.validate_authorization(user_id=current_user.id, brain_id=brain_id)
fallback_model = "gpt-3.5-turbo"
fallback_model = "gpt-3.5-turbo-1106"
fallback_temperature = 0.1
fallback_max_tokens = 512
@ -135,7 +135,7 @@ async def create_question_handler(
email=current_user.email,
)
user_settings = user_daily_usage.get_user_settings()
is_model_ok = (chat_question).model in user_settings.get("models", ["gpt-3.5-turbo"]) # type: ignore
is_model_ok = (chat_question).model in user_settings.get("models", ["gpt-3.5-turbo-1106"]) # type: ignore
# Retrieve chat model (temperature, max_tokens, model)
if (
@ -156,10 +156,10 @@ async def create_question_handler(
try:
check_user_requests_limit(current_user, chat_question.model)
is_model_ok = (chat_question).model in user_settings.get("models", ["gpt-3.5-turbo"]) # type: ignore
is_model_ok = (chat_question).model in user_settings.get("models", ["gpt-3.5-turbo-1106"]) # type: ignore
gpt_answer_generator = chat_instance.get_answer_generator(
chat_id=str(chat_id),
model=chat_question.model if is_model_ok else "gpt-3.5-turbo", # type: ignore
model=chat_question.model if is_model_ok else "gpt-3.5-turbo-1106", # type: ignore
max_tokens=chat_question.max_tokens,
temperature=chat_question.temperature,
brain_id=str(brain_id),
@ -212,7 +212,7 @@ async def create_stream_question_handler(
or chat_question.temperature is None
or not chat_question.max_tokens
):
fallback_model = "gpt-3.5-turbo"
fallback_model = "gpt-3.5-turbo-1106"
fallback_temperature = 0
fallback_max_tokens = 256
@ -232,10 +232,10 @@ async def create_stream_question_handler(
check_user_requests_limit(current_user, chat_question.model)
# TODO check if model is in the list of models available for the user
is_model_ok = chat_question.model in user_settings.get("models", ["gpt-3.5-turbo"]) # type: ignore
is_model_ok = chat_question.model in user_settings.get("models", ["gpt-3.5-turbo-1106"]) # type: ignore
gpt_answer_generator = chat_instance.get_answer_generator(
chat_id=str(chat_id),
model=chat_question.model if is_model_ok else "gpt-3.5-turbo", # type: ignore
model=chat_question.model if is_model_ok else "gpt-3.5-turbo-1106", # type: ignore
max_tokens=chat_question.max_tokens,
temperature=chat_question.temperature, # type: ignore
streaming=True,

View File

@ -50,7 +50,7 @@ def test_create_chat_and_talk(client, api_key):
response = client.post(
f"/chat/{chat_id}/question?brain_id={default_brain_id}",
json={
"model": "gpt-3.5-turbo",
"model": "gpt-3.5-turbo-1106",
"question": "Hello, how are you?",
"temperature": "0",
"max_tokens": "256",
@ -100,7 +100,7 @@ def test_create_chat_and_talk_with_no_brain(client, api_key):
response = client.post(
f"/chat/{chat_id}/question?brain_id=",
json={
"model": "gpt-3.5-turbo",
"model": "gpt-3.5-turbo-1106",
"question": "Hello, how are you?",
"temperature": "0",
"max_tokens": "256",