updated openai version and added GPT_4_TURBO

This commit is contained in:
ZincCat 2023-11-18 16:51:01 -05:00
parent adcda972d0
commit b37760310c
11 changed files with 48 additions and 23 deletions

View File

@ -29,6 +29,8 @@ from camel.utils import (
openai_api_key_required,
)
from openai.types.chat import ChatCompletion
@dataclass(frozen=True)
class ChatAgentResponse:
@ -189,17 +191,17 @@ class ChatAgent(BaseAgent):
if num_tokens < self.model_token_limit:
response = self.model_backend.run(messages=openai_messages)
if not isinstance(response, dict):
if not isinstance(response, ChatCompletion):
raise RuntimeError("OpenAI returned unexpected struct")
output_messages = [
ChatMessage(role_name=self.role_name, role_type=self.role_type,
meta_dict=dict(), **dict(choice["message"]))
for choice in response["choices"]
meta_dict=dict(), **dict(choice.message))
for choice in response.choices
]
info = self.get_info(
response["id"],
response["usage"],
[str(choice["finish_reason"]) for choice in response["choices"]],
response.id,
response.usage,
[str(choice.finish_reason) for choice in response.choices],
num_tokens,
)

View File

@ -27,7 +27,7 @@ class EmbodiedAgent(ChatAgent):
Args:
system_message (SystemMessage): The system message for the chat agent.
model (ModelType, optional): The LLM model to use for generating
responses. (default :obj:`ModelType.GPT_4`)
responses. (default :obj:`ModelType.GPT_4_TURBO`)
model_config (Any, optional): Configuration options for the LLM model.
(default: :obj:`None`)
message_window_size (int, optional): The maximum number of previous
@ -43,7 +43,7 @@ class EmbodiedAgent(ChatAgent):
def __init__(
self,
system_message: SystemMessage,
model: ModelType = ModelType.GPT_4,
model: ModelType = ModelType.GPT_4_TURBO,
model_config: Optional[Any] = None,
message_window_size: Optional[int] = None,
action_space: Optional[List[BaseToolAgent]] = None,

View File

@ -24,6 +24,9 @@ from camel.messages import (
from camel.prompts import CodePrompt, TextPrompt
from camel.typing import ModelType, RoleType
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_message import FunctionCall
@dataclass
class BaseMessage:
@ -44,6 +47,8 @@ class BaseMessage:
meta_dict: Optional[Dict[str, str]]
role: str
content: str
function_call: Optional[FunctionCall] = None
tool_calls: Optional[ChatCompletionMessageToolCall] = None
def __getattribute__(self, name: str) -> Any:
r"""Get attribute override to delegate string methods to the

View File

@ -17,6 +17,8 @@ from typing import Dict, Optional
from camel.messages import BaseMessage
from camel.typing import RoleType
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall
from openai.types.chat.chat_completion_message import FunctionCall
@dataclass
class ChatMessage(BaseMessage):
@ -36,6 +38,8 @@ class ChatMessage(BaseMessage):
meta_dict: Optional[Dict[str, str]]
role: str
content: str = ""
function_call: Optional[FunctionCall] = None
tool_calls: Optional[ChatCompletionMessageToolCall] = None
def set_user_role_at_backend(self: BaseMessage):
return self.__class__(

View File

@ -21,13 +21,15 @@ from camel.typing import ModelType
from chatdev.statistics import prompt_cost
from chatdev.utils import log_and_print_online
from openai.types.chat import ChatCompletion
class ModelBackend(ABC):
r"""Base class for different model backends.
May be OpenAI API, a local LLM, a stub for unit tests, etc."""
@abstractmethod
def run(self, *args, **kwargs) -> Dict[str, Any]:
def run(self, *args, **kwargs) -> ChatCompletion:
r"""Runs the query to the backend model.
Raises:
@ -63,27 +65,29 @@ class OpenAIModel(ModelBackend):
"gpt-4": 8192,
"gpt-4-0613": 8192,
"gpt-4-32k": 32768,
"gpt-4-1106-preview": 4096,
"gpt-4-1106-vision-preview": 4096,
}
num_max_token = num_max_token_map[self.model_type.value]
num_max_completion_tokens = num_max_token - num_prompt_tokens
self.model_config_dict['max_tokens'] = num_max_completion_tokens
try:
response = openai.ChatCompletion.create(*args, **kwargs, model=self.model_type.value, **self.model_config_dict)
response = openai.chat.completions.create(*args, **kwargs, model=self.model_type.value, **self.model_config_dict)
except AttributeError:
response = openai.chat.completions.create(*args, **kwargs, model=self.model_type.value, **self.model_config_dict)
cost = prompt_cost(
self.model_type.value,
num_prompt_tokens=response["usage"]["prompt_tokens"],
num_completion_tokens=response["usage"]["completion_tokens"]
num_prompt_tokens=response.usage.prompt_tokens,
num_completion_tokens=response.usage.completion_tokens
)
log_and_print_online(
"**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\ncost: ${:.6f}\n".format(
response["usage"]["prompt_tokens"], response["usage"]["completion_tokens"],
response["usage"]["total_tokens"], cost))
if not isinstance(response, Dict):
response.usage.prompt_tokens, response.usage.completion_tokens,
response.usage.total_tokens, cost))
if not isinstance(response, ChatCompletion):
raise RuntimeError("Unexpected return from OpenAI API")
return response
@ -119,7 +123,7 @@ class ModelFactory:
default_model_type = ModelType.GPT_3_5_TURBO
if model_type in {
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k,
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k, ModelType.GPT_4_TURBO,
None
}:
model_class = OpenAIModel

View File

@ -47,6 +47,7 @@ class ModelType(Enum):
GPT_3_5_TURBO = "gpt-3.5-turbo-16k-0613"
GPT_4 = "gpt-4"
GPT_4_32k = "gpt-4-32k"
GPT_4_TURBO = "gpt-4-1106-preview"
STUB = "stub"
@property

View File

@ -83,7 +83,7 @@ def num_tokens_from_messages(
encoding = tiktoken.get_encoding("cl100k_base")
if model in {
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k,
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k, ModelType.GPT_4_TURBO,
ModelType.STUB
}:
return count_tokens_openai_chat_models(messages, encoding)
@ -113,6 +113,8 @@ def get_model_token_limit(model: ModelType) -> int:
return 8192
elif model == ModelType.GPT_4_32k:
return 32768
elif model == ModelType.GPT_4_TURBO:
return 128000
elif model == ModelType.STUB:
return 4096
else:

View File

@ -216,12 +216,12 @@ class ChatEnv:
if desc.endswith(".png"):
desc = desc.replace(".png", "")
print("{}: {}".format(filename, desc))
response = openai.Image.create(
response = openai.images.generate(
prompt=desc,
n=1,
size="256x256"
)
image_url = response['data'][0]['url']
image_url = response.data[0].url
download(image_url, filename)
def get_proposed_images_from_message(self, messages):
@ -263,7 +263,7 @@ class ChatEnv:
n=1,
size="256x256"
)
image_url = response['data'][0]['url']
image_url = response.data[0].url
download(image_url, filename)
return images

View File

@ -12,6 +12,8 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
"gpt-4": 0.03,
"gpt-4-0613": 0.03,
"gpt-4-32k": 0.06,
"gpt-4-1106-preview": 0.01,
"gpt-4-1106-vision-preview": 0.01,
}
output_cost_map = {
@ -22,6 +24,8 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
"gpt-4": 0.06,
"gpt-4-0613": 0.06,
"gpt-4-32k": 0.12,
"gpt-4-1106-preview": 0.03,
"gpt-4-1106-vision-preview": 0.03,
}
if model_type not in input_cost_map or model_type not in output_cost_map:
@ -107,6 +111,8 @@ def get_info(dir, log_filepath):
model_type = "gpt-4"
elif model_type == "GPT_4_32k":
model_type = "gpt-4-32k"
elif model_type == "GPT_4_TURBO":
model_type = "gpt-4-1106-preview"
# print("model_type:", model_type)
lines = open(log_filepath, "r", encoding="utf8").read().split("\n")

View File

@ -3,7 +3,7 @@ Flask==2.3.2
Flask-SocketIO==5.3.4
importlib-metadata==6.8.0
numpy==1.24.3
openai==0.27.8
openai==1.3.3
regex==2023.6.3
requests==2.31.0
tenacity==8.2.2

5
run.py
View File

@ -67,7 +67,7 @@ parser.add_argument('--task', type=str, default="Develop a basic Gomoku game.",
parser.add_argument('--name', type=str, default="Gomoku",
help="Name of software, your software will be generated in WareHouse/name_org_timestamp")
parser.add_argument('--model', type=str, default="GPT_3_5_TURBO",
help="GPT Model, choose from {'GPT_3_5_TURBO','GPT_4','GPT_4_32K'}")
help="GPT Model, choose from {'GPT_3_5_TURBO','GPT_4','GPT_4_32K', 'GPT_4_TURBO'}")
parser.add_argument('--path', type=str, default="",
help="Your file directory, ChatDev will build upon your software in the Incremental mode")
args = parser.parse_args()
@ -78,7 +78,8 @@ args = parser.parse_args()
# Init ChatChain
# ----------------------------------------
config_path, config_phase_path, config_role_path = get_config(args.config)
args2type = {'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO, 'GPT_4': ModelType.GPT_4, 'GPT_4_32K': ModelType.GPT_4_32k}
args2type = {'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO, 'GPT_4': ModelType.GPT_4, \
'GPT_4_32K': ModelType.GPT_4_32k, 'GPT_4_TURBO': ModelType.GPT_4_TURBO}
chat_chain = ChatChain(config_path=config_path,
config_phase_path=config_phase_path,
config_role_path=config_role_path,