diff --git a/g4f/Provider/retry_provider.py b/g4f/Provider/retry_provider.py index 4ecfaffb..ee342315 100644 --- a/g4f/Provider/retry_provider.py +++ b/g4f/Provider/retry_provider.py @@ -41,11 +41,14 @@ class RetryProvider(AsyncProvider): try: if self.logging: print(f"Using {provider.__name__} provider") + for token in provider.create_completion(model, messages, stream, **kwargs): yield token started = True + if started: return + except Exception as e: self.exceptions[provider.__name__] = e if self.logging: diff --git a/g4f/api/__init__.py b/g4f/api/__init__.py index bca14050..a682f094 100644 --- a/g4f/api/__init__.py +++ b/g4f/api/__init__.py @@ -1,4 +1,4 @@ -import g4f +import g4f; g4f.logging = True import time import json import random diff --git a/g4f/api/run.py b/g4f/api/run.py index d214aae7..9a2a5786 100644 --- a/g4f/api/run.py +++ b/g4f/api/run.py @@ -2,4 +2,5 @@ import g4f import g4f.api if __name__ == "__main__": + print(f'Starting server... [g4f v-{g4f.version}]') g4f.api.Api(g4f).run('localhost:1337', 8) \ No newline at end of file diff --git a/g4f/models.py b/g4f/models.py index 07259eca..798b4684 100644 --- a/g4f/models.py +++ b/g4f/models.py @@ -11,6 +11,7 @@ from .Provider import ( ChatgptAi, GptChatly, Liaobots, + ChatgptX, Yqcloud, GeekGpt, Myshell, @@ -69,9 +70,9 @@ gpt_35_long = Model( gpt_35_turbo = Model( name = 'gpt-3.5-turbo', base_provider = 'openai', - best_provider = RetryProvider([ - Aichat, ChatgptDemo, AiAsk, ChatForAi, GPTalk, - GptGo, You, GptForLove, ChatBase + best_provider=RetryProvider([ + ChatgptX, ChatgptDemo, GptGo, You, + NoowAi, GPTalk, GptForLove, Phind, ChatBase ]) )