mirror of
https://github.com/OpenBMB/ChatDev.git
synced 2024-12-27 05:42:55 +03:00
Update OpenAI api - add costs
This commit is contained in:
parent
331c21e395
commit
0b4c9dde7b
@ -63,24 +63,27 @@ class OpenAIModel(ModelBackend):
|
||||
"gpt-4": 8192,
|
||||
"gpt-4-0613": 8192,
|
||||
"gpt-4-32k": 32768,
|
||||
"gpt-4-1106-preview": 128000,
|
||||
"gpt-4-1106-preview": 4096,
|
||||
}
|
||||
num_max_token = num_max_token_map[self.model_type.value]
|
||||
num_max_completion_tokens = num_max_token - num_prompt_tokens
|
||||
self.model_config_dict['max_tokens'] = num_max_completion_tokens
|
||||
response = openai.ChatCompletion.create(*args, **kwargs,
|
||||
print("using model: {}".format(self.model_type.value))
|
||||
response = openai.chat.completions.create(*args, **kwargs,
|
||||
model=self.model_type.value,
|
||||
**self.model_config_dict)
|
||||
cost = prompt_cost(
|
||||
self.model_type.value,
|
||||
num_prompt_tokens=response["usage"]["prompt_tokens"],
|
||||
num_completion_tokens=response["usage"]["completion_tokens"]
|
||||
self.model_type.value,
|
||||
num_prompt_tokens=response.usage.prompt_tokens,
|
||||
num_completion_tokens=response.usage.completion_tokens
|
||||
)
|
||||
|
||||
log_and_print_online(
|
||||
"**[OpenAI_Usage_Info Receive]**\nprompt_tokens: {}\ncompletion_tokens: {}\ntotal_tokens: {}\ncost: ${:.6f}\n".format(
|
||||
response["usage"]["prompt_tokens"], response["usage"]["completion_tokens"],
|
||||
response["usage"]["total_tokens"], cost))
|
||||
response.usage.prompt_tokens, response.usage.completion_tokens,
|
||||
response.usage.total_tokens, cost
|
||||
)
|
||||
)
|
||||
if not isinstance(response, Dict):
|
||||
raise RuntimeError("Unexpected return from OpenAI API")
|
||||
return response
|
||||
|
@ -12,6 +12,7 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
|
||||
"gpt-4": 0.03,
|
||||
"gpt-4-0613": 0.03,
|
||||
"gpt-4-32k": 0.06,
|
||||
"gpt-4-1106-preview": 0.01,
|
||||
}
|
||||
|
||||
output_cost_map = {
|
||||
@ -22,6 +23,7 @@ def prompt_cost(model_type: str, num_prompt_tokens: float, num_completion_tokens
|
||||
"gpt-4": 0.06,
|
||||
"gpt-4-0613": 0.06,
|
||||
"gpt-4-32k": 0.12,
|
||||
"gpt-4-1106-preview": 0.03,
|
||||
}
|
||||
|
||||
if model_type not in input_cost_map or model_type not in output_cost_map:
|
||||
|
@ -3,7 +3,7 @@ Flask==2.3.2
|
||||
Flask-SocketIO==5.3.4
|
||||
importlib-metadata==6.8.0
|
||||
numpy==1.24.3
|
||||
openai==0.27.8
|
||||
openai==1.1.1
|
||||
regex==2023.6.3
|
||||
requests==2.31.0
|
||||
tenacity==8.2.2
|
||||
|
Loading…
Reference in New Issue
Block a user