fix provider thing in gui x2

This commit is contained in:
Commenter123321 2023-10-10 15:11:17 +02:00
parent 2c105cb595
commit 9239c57200
4 changed files with 23 additions and 17 deletions

View File

@ -118,9 +118,13 @@
<div class="field">
<select name="model" id="model">
<option value="gpt-3.5-turbo" selected>gpt-3.5</option>
<option value="gpt-4">gpt-4</option>
<option value="gpt-3.5-turbo-0613">gpt-3.5 fast</option>
<option value="gpt-3.5-turbo-16k">gpt-3.5 16k</option>
<option value="gpt-3.5-turbo-16k-0613">gpt-3.5 16k fast</option>
<option value="gpt-4" selected>gpt-4</option>
<option value="gpt-4-0613">gpt-4 fast</option>
<option value="gpt-4-32k">gpt-4 32k</option>
<option value="gpt-4-32k-0613">gpt-4 32k fast</option>
</select>
</div>
<div class="field">

View File

@ -33,17 +33,17 @@ class Backend_Api:
conversation = request.json['meta']['content']['conversation']
prompt = request.json['meta']['content']['parts'][0]
model = request.json['model']
provider = get_provider(request.json.get('provider'))
provider = request.json.get('provider').split("g4f.Provider.")[1]
messages = special_instructions[jailbreak] + conversation + search(internet_access, prompt) + [prompt]
def stream():
if provider:
answer = g4f.ChatCompletion.create(model=model,
provider=g4f.Provider.ProviderUtils.convert.get(provider), messages=messages, stream=True)
provider=get_provider(provider), messages=messages, stream=True)
else:
answer = g4f.ChatCompletion.create(model=model,
messages=messages, stream=True)
messages=messages, stream=True)
for token in answer:
yield token

View File

@ -3,17 +3,12 @@ from g4f import BaseProvider
def get_provider(provider: str) -> BaseProvider | None:
if isinstance(provider, str):
print(provider)
if provider == 'g4f.Provider.Auto':
return None
if provider in g4f.Provider.ProviderUtils.convert:
return g4f.Provider.ProviderUtils.convert[provider]
else:
return None
return g4f.Provider.ProviderUtils.convert.get(provider)
else:
return None

View File

@ -65,14 +65,16 @@ gpt_35_turbo = Model(
name = 'gpt-3.5-turbo',
base_provider = 'openai',
best_provider = RetryProvider([
DeepAi, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
DeepAi, Aivvm, ChatgptLogin, ChatgptAi, GptGo, AItianhu, Aichat, AItianhuSpace, Myshell, Aibn, FreeGpt, Ylokh
])
)
gpt_4 = Model(
name = 'gpt-4',
base_provider = 'openai',
best_provider = Bing
best_provider = RetryProvider([
Aivvm, Bing
])
)
# Bard
@ -165,27 +167,32 @@ gpt_35_turbo_16k = Model(
gpt_35_turbo_16k_0613 = Model(
name = 'gpt-3.5-turbo-16k-0613',
base_provider = 'openai')
base_provider = 'openai',
best_provider = Aivvm
)
gpt_35_turbo_0613 = Model(
name = 'gpt-3.5-turbo-0613',
base_provider = 'openai',
best_provider=Aivvm
best_provider = Aivvm
)
gpt_4_0613 = Model(
name = 'gpt-4-0613',
base_provider = 'openai'
base_provider = 'openai',
best_provider = Aivvm
)
gpt_4_32k = Model(
name = 'gpt-4-32k',
base_provider = 'openai'
base_provider = 'openai',
best_provider = Aivvm
)
gpt_4_32k_0613 = Model(
name = 'gpt-4-32k-0613',
base_provider = 'openai'
base_provider = 'openai',
best_provider = Aivvm
)
text_ada_001 = Model(