mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-11-22 15:05:57 +03:00
gpt-4o (beta)
This commit is contained in:
parent
97b2a73cd7
commit
1399758a4e
@ -301,7 +301,7 @@ set G4F_PROXY=http://host:port
|
|||||||
While we wait for gpt-5, here is a list of new models that are at least better than gpt-3.5-turbo. **Some are better than gpt-4**. Expect this list to grow.
|
While we wait for gpt-5, here is a list of new models that are at least better than gpt-3.5-turbo. **Some are better than gpt-4**. Expect this list to grow.
|
||||||
|
|
||||||
| Website | Provider | parameters | better than |
|
| Website | Provider | parameters | better than |
|
||||||
| ------ | ------- | ------ | ------ |
|
| ------ | ------- | ------ | ------ |
|
||||||
| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
|
| [claude-3-opus](https://anthropic.com/) | `g4f.Provider.You` | ?B | gpt-4-0125-preview |
|
||||||
| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
|
| [command-r+](https://txt.cohere.com/command-r-plus-microsoft-azure/) | `g4f.Provider.HuggingChat` | 104B | gpt-4-0314 |
|
||||||
| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
|
| [llama-3-70b](https://meta.ai/) | `g4f.Provider.Llama` or `DeepInfra` | 70B | gpt-4-0314 |
|
||||||
|
@ -1,5 +1,3 @@
|
|||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import re
|
import re
|
||||||
import json
|
import json
|
||||||
import base64
|
import base64
|
||||||
@ -24,6 +22,7 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
image_models = ["dall-e"]
|
image_models = ["dall-e"]
|
||||||
models = [
|
models = [
|
||||||
default_model,
|
default_model,
|
||||||
|
"gpt-4o",
|
||||||
"gpt-4",
|
"gpt-4",
|
||||||
"gpt-4-turbo",
|
"gpt-4-turbo",
|
||||||
"claude-instant",
|
"claude-instant",
|
||||||
@ -42,7 +41,8 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
*image_models
|
*image_models
|
||||||
]
|
]
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"claude-v2": "claude-2"
|
"claude-v2": "claude-2",
|
||||||
|
"gpt-4o": "gpt-4o",
|
||||||
}
|
}
|
||||||
_cookies = None
|
_cookies = None
|
||||||
_cookies_used = 0
|
_cookies_used = 0
|
||||||
@ -99,7 +99,9 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"selectedChatMode": chat_mode,
|
"selectedChatMode": chat_mode,
|
||||||
}
|
}
|
||||||
if chat_mode == "custom":
|
if chat_mode == "custom":
|
||||||
|
# print(f"You model: {model}")
|
||||||
params["selectedAIModel"] = model.replace("-", "_")
|
params["selectedAIModel"] = model.replace("-", "_")
|
||||||
|
|
||||||
async with (session.post if chat_mode == "default" else session.get)(
|
async with (session.post if chat_mode == "default" else session.get)(
|
||||||
f"{cls.url}/api/streamingSearch",
|
f"{cls.url}/api/streamingSearch",
|
||||||
data=data,
|
data=data,
|
||||||
@ -183,7 +185,15 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
@classmethod
|
@classmethod
|
||||||
async def create_cookies(cls, client: StreamSession) -> Cookies:
|
async def create_cookies(cls, client: StreamSession) -> Cookies:
|
||||||
if not cls._telemetry_ids:
|
if not cls._telemetry_ids:
|
||||||
cls._telemetry_ids = await get_telemetry_ids()
|
try:
|
||||||
|
cls._telemetry_ids = await get_telemetry_ids()
|
||||||
|
except RuntimeError as e:
|
||||||
|
if str(e) == "Event loop is closed":
|
||||||
|
if debug.logging:
|
||||||
|
print("Event loop is closed error occurred in create_cookies.")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
user_uuid = str(uuid.uuid4())
|
user_uuid = str(uuid.uuid4())
|
||||||
telemetry_id = cls._telemetry_ids.pop()
|
telemetry_id = cls._telemetry_ids.pop()
|
||||||
if debug.logging:
|
if debug.logging:
|
||||||
@ -212,4 +222,4 @@ class You(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
'stytch_session_jwt': session["session_jwt"],
|
'stytch_session_jwt': session["session_jwt"],
|
||||||
'ydc_stytch_session': session["session_token"],
|
'ydc_stytch_session': session["session_token"],
|
||||||
'ydc_stytch_session_jwt': session["session_jwt"],
|
'ydc_stytch_session_jwt': session["session_jwt"],
|
||||||
}
|
}
|
@ -4,11 +4,14 @@ import json
|
|||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
import random
|
import random
|
||||||
|
import logging
|
||||||
|
|
||||||
from ...requests import StreamSession, raise_for_status
|
from ...requests import StreamSession, raise_for_status
|
||||||
from ...errors import MissingRequirementsError
|
from ...errors import MissingRequirementsError
|
||||||
from ... import debug
|
from ... import debug
|
||||||
|
|
||||||
|
logging.basicConfig(level=logging.ERROR)
|
||||||
|
|
||||||
class NoValidHarFileError(Exception):
|
class NoValidHarFileError(Exception):
|
||||||
...
|
...
|
||||||
|
|
||||||
@ -62,10 +65,16 @@ def parseHAREntry(entry) -> arkReq:
|
|||||||
return tmpArk
|
return tmpArk
|
||||||
|
|
||||||
async def sendRequest(tmpArk: arkReq, proxy: str = None):
|
async def sendRequest(tmpArk: arkReq, proxy: str = None):
|
||||||
async with StreamSession(headers=tmpArk.arkHeaders, cookies=tmpArk.arkCookies, proxy=proxy) as session:
|
try:
|
||||||
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
|
async with StreamSession(headers=tmpArk.arkHeaders, cookies=tmpArk.arkCookies, proxy=proxy) as session:
|
||||||
await raise_for_status(response)
|
async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
|
||||||
return await response.text()
|
await raise_for_status(response)
|
||||||
|
return await response.text()
|
||||||
|
except RuntimeError as e:
|
||||||
|
if str(e) == "Event loop is closed":
|
||||||
|
print("Event loop is closed error occurred in sendRequest.")
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
async def create_telemetry_id(proxy: str = None):
|
async def create_telemetry_id(proxy: str = None):
|
||||||
global chatArks
|
global chatArks
|
||||||
@ -78,9 +87,9 @@ async def get_telemetry_ids(proxy: str = None) -> list:
|
|||||||
return [await create_telemetry_id(proxy)]
|
return [await create_telemetry_id(proxy)]
|
||||||
except NoValidHarFileError as e:
|
except NoValidHarFileError as e:
|
||||||
if debug.logging:
|
if debug.logging:
|
||||||
print(e)
|
logging.error(e)
|
||||||
if debug.logging:
|
if debug.logging:
|
||||||
print('Getting telemetry_id for you.com with nodriver')
|
logging.error('Getting telemetry_id for you.com with nodriver')
|
||||||
try:
|
try:
|
||||||
from nodriver import start
|
from nodriver import start
|
||||||
except ImportError:
|
except ImportError:
|
||||||
@ -100,6 +109,15 @@ async def get_telemetry_ids(proxy: str = None) -> list:
|
|||||||
)
|
)
|
||||||
|
|
||||||
return [await get_telemetry_id()]
|
return [await get_telemetry_id()]
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
if page is not None:
|
try:
|
||||||
await page.close()
|
if page is not None:
|
||||||
|
await page.close()
|
||||||
|
|
||||||
|
if browser is not None:
|
||||||
|
await browser.close()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
if debug.logging:
|
||||||
|
logging.error(e)
|
||||||
|
@ -100,6 +100,14 @@ gpt_4 = Model(
|
|||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
||||||
|
gpt_4o = Model(
|
||||||
|
name = 'gpt-4o',
|
||||||
|
base_provider = 'openai',
|
||||||
|
best_provider = RetryProvider([
|
||||||
|
You
|
||||||
|
])
|
||||||
|
)
|
||||||
|
|
||||||
gpt_4_turbo = Model(
|
gpt_4_turbo = Model(
|
||||||
name = 'gpt-4-turbo',
|
name = 'gpt-4-turbo',
|
||||||
base_provider = 'openai',
|
base_provider = 'openai',
|
||||||
@ -330,6 +338,7 @@ class ModelUtils:
|
|||||||
'gpt-3.5-long': gpt_35_long,
|
'gpt-3.5-long': gpt_35_long,
|
||||||
|
|
||||||
# gpt-4
|
# gpt-4
|
||||||
|
'gpt-4o' : gpt_4o,
|
||||||
'gpt-4' : gpt_4,
|
'gpt-4' : gpt_4,
|
||||||
'gpt-4-0613' : gpt_4_0613,
|
'gpt-4-0613' : gpt_4_0613,
|
||||||
'gpt-4-32k' : gpt_4_32k,
|
'gpt-4-32k' : gpt_4_32k,
|
||||||
|
Loading…
Reference in New Issue
Block a user