mirror of
https://github.com/OpenBMB/ChatDev.git
synced 2024-11-08 02:43:57 +03:00
Merge pull request #140 from delconis/feautre/retry-model-backend
Fix timeouts to API rate limits when using GPT_4 - Added Retry to running ModelBackend.
This commit is contained in:
commit
37f5718f4b
@ -16,6 +16,7 @@ from typing import Any, Dict
|
|||||||
|
|
||||||
import openai
|
import openai
|
||||||
import tiktoken
|
import tiktoken
|
||||||
|
from retry import retry
|
||||||
|
|
||||||
from camel.typing import ModelType
|
from camel.typing import ModelType
|
||||||
from chatdev.utils import log_and_print_online
|
from chatdev.utils import log_and_print_online
|
||||||
@ -46,7 +47,8 @@ class OpenAIModel(ModelBackend):
|
|||||||
super().__init__()
|
super().__init__()
|
||||||
self.model_type = model_type
|
self.model_type = model_type
|
||||||
self.model_config_dict = model_config_dict
|
self.model_config_dict = model_config_dict
|
||||||
|
|
||||||
|
@retry(tries=-1, delay=0, max_delay=None, backoff=1, jitter=0)
|
||||||
def run(self, *args, **kwargs) -> Dict[str, Any]:
|
def run(self, *args, **kwargs) -> Dict[str, Any]:
|
||||||
string = "\n".join([message["content"] for message in kwargs["messages"]])
|
string = "\n".join([message["content"] for message in kwargs["messages"]])
|
||||||
encoding = tiktoken.encoding_for_model(self.model_type.value)
|
encoding = tiktoken.encoding_for_model(self.model_type.value)
|
||||||
|
@ -77,3 +77,4 @@ wrapt==1.15.0
|
|||||||
yapf==0.32.0
|
yapf==0.32.0
|
||||||
yarl==1.9.2
|
yarl==1.9.2
|
||||||
zipp==3.16.2
|
zipp==3.16.2
|
||||||
|
retry==0.9.2
|
||||||
|
Loading…
Reference in New Issue
Block a user