From d8372a56ccef08de74c26de767318485f01401c0 Mon Sep 17 00:00:00 2001 From: Martin Kask Date: Tue, 7 Nov 2023 12:11:34 +0200 Subject: [PATCH] Adds GPT-4 Turbo preview model --- camel/model_backend.py | 2 +- camel/typing.py | 1 + camel/utils.py | 4 +++- chatdev/statistics.py | 2 ++ run.py | 5 +++-- 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/camel/model_backend.py b/camel/model_backend.py index d54eea4..9b292d9 100644 --- a/camel/model_backend.py +++ b/camel/model_backend.py @@ -116,7 +116,7 @@ class ModelFactory: default_model_type = ModelType.GPT_3_5_TURBO if model_type in { - ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k, + ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_TURBO, ModelType.GPT_4_32k, None }: model_class = OpenAIModel diff --git a/camel/typing.py b/camel/typing.py index f334777..e5d4b56 100644 --- a/camel/typing.py +++ b/camel/typing.py @@ -46,6 +46,7 @@ class RoleType(Enum): class ModelType(Enum): GPT_3_5_TURBO = "gpt-3.5-turbo-16k-0613" GPT_4 = "gpt-4" + GPT_4_TURBO = "gpt-4-1106-preview" GPT_4_32k = "gpt-4-32k" STUB = "stub" diff --git a/camel/utils.py b/camel/utils.py index baad22f..14d0742 100644 --- a/camel/utils.py +++ b/camel/utils.py @@ -83,7 +83,7 @@ def num_tokens_from_messages( encoding = tiktoken.get_encoding("cl100k_base") if model in { - ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k, + ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_TURBO, ModelType.GPT_4_32k, ModelType.STUB }: return count_tokens_openai_chat_models(messages, encoding) @@ -113,6 +113,8 @@ def get_model_token_limit(model: ModelType) -> int: return 8192 elif model == ModelType.GPT_4_32k: return 32768 + elif model == ModelType.GPT_4_TURBO: + return 128000 elif model == ModelType.STUB: return 4096 else: diff --git a/chatdev/statistics.py b/chatdev/statistics.py index 05983dd..7d3d00c 100644 --- a/chatdev/statistics.py +++ b/chatdev/statistics.py @@ -107,6 +107,8 @@ def get_info(dir, log_filepath): model_type = "gpt-4" elif model_type == "GPT_4_32k": model_type = "gpt-4-32k" + elif model_type == "GPT_4_TURBO": + model_type = "gpt-4-1106-preview" # print("model_type:", model_type) lines = open(log_filepath, "r", encoding="utf8").read().split("\n") diff --git a/run.py b/run.py index b92139d..0623bcc 100644 --- a/run.py +++ b/run.py @@ -15,6 +15,7 @@ import argparse import logging import os import sys +from types import ModuleType from camel.typing import ModelType @@ -67,7 +68,7 @@ parser.add_argument('--task', type=str, default="Develop a basic Gomoku game.", parser.add_argument('--name', type=str, default="Gomoku", help="Name of software, your software will be generated in WareHouse/name_org_timestamp") parser.add_argument('--model', type=str, default="GPT_3_5_TURBO", - help="GPT Model, choose from {'GPT_3_5_TURBO','GPT_4','GPT_4_32K'}") + help="GPT Model, choose from {'GPT_3_5_TURBO','GPT_4','GPT_4_TURBO','GPT_4_32K'}") parser.add_argument('--path', type=str, default="", help="Your file directory, ChatDev will build upon your software in the Incremental mode") args = parser.parse_args() @@ -78,7 +79,7 @@ args = parser.parse_args() # Init ChatChain # ---------------------------------------- config_path, config_phase_path, config_role_path = get_config(args.config) -args2type = {'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO, 'GPT_4': ModelType.GPT_4, 'GPT_4_32K': ModelType.GPT_4_32k} +args2type = {'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO, 'GPT_4': ModelType.GPT_4, 'GPT_4_TURBO': ModelType.GPT_4_TURBO, 'GPT_4_32K': ModelType.GPT_4_32k} chat_chain = ChatChain(config_path=config_path, config_phase_path=config_phase_path, config_role_path=config_role_path,