mirror of
https://github.com/OpenBMB/ChatDev.git
synced 2024-11-07 18:40:13 +03:00
ChatCompletion
This commit is contained in:
parent
59965ab0bd
commit
629376976c
@ -63,27 +63,23 @@ class OpenAIModel(ModelBackend):
|
||||
"gpt-4": 8192,
|
||||
"gpt-4-0613": 8192,
|
||||
"gpt-4-32k": 32768,
|
||||
"gpt-4-1106-preview": 4096,
|
||||
}
|
||||
num_max_token = num_max_token_map[self.model_type.value]
|
||||
num_max_completion_tokens = num_max_token - num_prompt_tokens
|
||||
self.model_config_dict['max_tokens'] = num_max_completion_tokens
|
||||
print("using model: {}".format(self.model_type.value))
|
||||
response = openai.ChatCompletion.create(*args, **kwargs,
|
||||
model=self.model_type.value,
|
||||
**self.model_config_dict)
|
||||
cost = prompt_cost(
|
||||
self.model_type.value,
|
||||
num_prompt_tokens=response.usage.prompt_tokens,
|
||||
num_completion_tokens=response.usage.completion_tokens
|
||||
num_prompt_tokens=response["usage"]["prompt_tokens"],
|
||||
num_completion_tokens=response["usage"]["completion_tokens"]
|
||||
)
|
||||
|
||||
log_and_print_online(
|
||||
"**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\ncost: ${:.6f}\n".format(
|
||||
response.usage.prompt_tokens, response.usage.completion_tokens,
|
||||
response.usage.total_tokens, cost
|
||||
)
|
||||
)
|
||||
response["usage"]["prompt_tokens"], response["usage"]["completion_tokens"],
|
||||
response["usage"]["total_tokens"], cost))
|
||||
if not isinstance(response, Dict):
|
||||
raise RuntimeError("Unexpected return from OpenAI API")
|
||||
return response
|
||||
@ -120,7 +116,7 @@ class ModelFactory:
|
||||
default_model_type = ModelType.GPT_3_5_TURBO
|
||||
|
||||
if model_type in {
|
||||
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_TURBO, ModelType.GPT_4_32k,
|
||||
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k,
|
||||
None
|
||||
}:
|
||||
model_class = OpenAIModel
|
||||
|
@ -46,7 +46,6 @@ class RoleType(Enum):
|
||||
class ModelType(Enum):
|
||||
GPT_3_5_TURBO = "gpt-3.5-turbo-16k-0613"
|
||||
GPT_4 = "gpt-4"
|
||||
GPT_4_TURBO = "gpt-4-1106-preview"
|
||||
GPT_4_32k = "gpt-4-32k"
|
||||
STUB = "stub"
|
||||
|
||||
|
@ -83,7 +83,7 @@ def num_tokens_from_messages(
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
if model in {
|
||||
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_TURBO, ModelType.GPT_4_32k,
|
||||
ModelType.GPT_3_5_TURBO, ModelType.GPT_4, ModelType.GPT_4_32k,
|
||||
ModelType.STUB
|
||||
}:
|
||||
return count_tokens_openai_chat_models(messages, encoding)
|
||||
@ -113,8 +113,6 @@ def get_model_token_limit(model: ModelType) -> int:
|
||||
return 8192
|
||||
elif model == ModelType.GPT_4_32k:
|
||||
return 32768
|
||||
elif model == ModelType.GPT_4_TURBO:
|
||||
return 128000
|
||||
elif model == ModelType.STUB:
|
||||
return 4096
|
||||
else:
|
||||
|
@ -12,7 +12,6 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
|
||||
"gpt-4": 0.03,
|
||||
"gpt-4-0613": 0.03,
|
||||
"gpt-4-32k": 0.06,
|
||||
"gpt-4-1106-preview": 0.01,
|
||||
}
|
||||
|
||||
output_cost_map = {
|
||||
@ -23,7 +22,6 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
|
||||
"gpt-4": 0.06,
|
||||
"gpt-4-0613": 0.06,
|
||||
"gpt-4-32k": 0.12,
|
||||
"gpt-4-1106-preview": 0.03,
|
||||
}
|
||||
|
||||
if model_type not in input_cost_map or model_type not in output_cost_map:
|
||||
@ -109,8 +107,6 @@ def get_info(dir, log_filepath):
|
||||
model_type = "gpt-4"
|
||||
elif model_type == "GPT_4_32k":
|
||||
model_type = "gpt-4-32k"
|
||||
elif model_type == "GPT_4_TURBO":
|
||||
model_type = "gpt-4-1106-preview"
|
||||
# print("model_type:", model_type)
|
||||
|
||||
lines = open(log_filepath, "r", encoding="utf8").read().split("\n")
|
||||
|
@ -3,7 +3,7 @@ Flask==2.3.2
|
||||
Flask-SocketIO==5.3.4
|
||||
importlib-metadata==6.8.0
|
||||
numpy==1.24.3
|
||||
openai==1.1.1
|
||||
openai==0.27.8
|
||||
regex==2023.6.3
|
||||
requests==2.31.0
|
||||
tenacity==8.2.2
|
||||
|
5
run.py
5
run.py
@ -15,7 +15,6 @@ import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from types import ModuleType
|
||||
|
||||
from camel.typing import ModelType
|
||||
|
||||
@ -68,7 +67,7 @@ parser.add_argument('--task', type=str, default="Develop a basic Gomoku game.",
|
||||
parser.add_argument('--name', type=str, default="Gomoku",
|
||||
help="Name of software, your software will be generated in WareHouse/name_org_timestamp")
|
||||
parser.add_argument('--model', type=str, default="GPT_3_5_TURBO",
|
||||
help="GPT Model, choose from {'GPT_3_5_TURBO','GPT_4','GPT_4_TURBO','GPT_4_32K'}")
|
||||
help="GPT Model, choose from {'GPT_3_5_TURBO','GPT_4','GPT_4_32K'}")
|
||||
parser.add_argument('--path', type=str, default="",
|
||||
help="Your file directory, ChatDev will build upon your software in the Incremental mode")
|
||||
args = parser.parse_args()
|
||||
@ -79,7 +78,7 @@ args = parser.parse_args()
|
||||
# Init ChatChain
|
||||
# ----------------------------------------
|
||||
config_path, config_phase_path, config_role_path = get_config(args.config)
|
||||
args2type = {'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO, 'GPT_4': ModelType.GPT_4, 'GPT_4_TURBO': ModelType.GPT_4_TURBO, 'GPT_4_32K': ModelType.GPT_4_32k}
|
||||
args2type = {'GPT_3_5_TURBO': ModelType.GPT_3_5_TURBO, 'GPT_4': ModelType.GPT_4, 'GPT_4_32K': ModelType.GPT_4_32k}
|
||||
chat_chain = ChatChain(config_path=config_path,
|
||||
config_phase_path=config_phase_path,
|
||||
config_role_path=config_role_path,
|
||||
|
Loading…
Reference in New Issue
Block a user