Merge pull request #140 from delconis/feautre/retry-model-backend

Fix timeouts to API rate limits when using GPT_4 - Added Retry to running ModelBackend.
This commit is contained in:
Chen Qian 2023-10-09 13:27:07 +08:00 committed by GitHub
commit 37f5718f4b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 4 additions and 1 deletions

View File

@ -16,6 +16,7 @@ from typing import Any, Dict
import openai
import tiktoken
from retry import retry
from camel.typing import ModelType
from chatdev.utils import log_and_print_online
@ -46,7 +47,8 @@ class OpenAIModel(ModelBackend):
super().__init__()
self.model_type = model_type
self.model_config_dict = model_config_dict
@retry(tries=-1, delay=0, max_delay=None, backoff=1, jitter=0)
def run(self, *args, **kwargs) -> Dict[str, Any]:
string = "\n".join([message["content"] for message in kwargs["messages"]])
encoding = tiktoken.encoding_for_model(self.model_type.value)

View File

@ -77,3 +77,4 @@ wrapt==1.15.0
yapf==0.32.0
yarl==1.9.2
zipp==3.16.2
retry==0.9.2