mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-23 11:02:40 +03:00
Merge pull request #2490 from hlohaus/ccccc
Fix docker build and fix api_base issue in OpenaiAPI providers
This commit is contained in:
commit
0332d0d820
26
.github/workflows/publish-workflow.yaml
vendored
26
.github/workflows/publish-workflow.yaml
vendored
@ -57,19 +57,19 @@ jobs:
|
|||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.repository_owner }}
|
||||||
password: ${{ secrets.GHCR_PAT }}
|
password: ${{ secrets.GHCR_PAT }}
|
||||||
|
|
||||||
- name: Build and push armv7 image
|
# - name: Build and push armv7 image
|
||||||
uses: docker/build-push-action@v5
|
# uses: docker/build-push-action@v5
|
||||||
with:
|
# with:
|
||||||
context: .
|
# context: .
|
||||||
file: docker/Dockerfile-armv7
|
# file: docker/Dockerfile-armv7
|
||||||
platforms: linux/arm/v7
|
# platforms: linux/arm/v7
|
||||||
push: true
|
# push: true
|
||||||
tags: |
|
# tags: |
|
||||||
hlohaus789/g4f:latest-armv7
|
# hlohaus789/g4f:latest-armv7
|
||||||
hlohaus789/g4f:${{ github.ref_name }}-armv7
|
# hlohaus789/g4f:${{ github.ref_name }}-armv7
|
||||||
labels: ${{ steps.metadata.outputs.labels }}
|
# labels: ${{ steps.metadata.outputs.labels }}
|
||||||
build-args: |
|
# build-args: |
|
||||||
G4F_VERSION=${{ github.ref_name }}
|
# G4F_VERSION=${{ github.ref_name }}
|
||||||
|
|
||||||
- name: Build and push small images
|
- name: Build and push small images
|
||||||
uses: docker/build-push-action@v5
|
uses: docker/build-push-action@v5
|
||||||
|
@ -18,7 +18,7 @@ g4f.debug.version_check = False
|
|||||||
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
|
GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
|
||||||
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
|
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
|
||||||
G4F_PROVIDER = os.getenv('G4F_PROVIDER')
|
G4F_PROVIDER = os.getenv('G4F_PROVIDER')
|
||||||
G4F_MODEL = os.getenv('G4F_MODEL') or g4f.models.default
|
G4F_MODEL = os.getenv('G4F_MODEL') or g4f.models.gpt_4
|
||||||
|
|
||||||
def get_pr_details(github: Github) -> PullRequest:
|
def get_pr_details(github: Github) -> PullRequest:
|
||||||
"""
|
"""
|
||||||
|
@ -158,7 +158,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
||||||
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
|
"Accept": "image/avif,image/webp,image/png,image/svg+xml,image/*;q=0.8,*/*;q=0.5",
|
||||||
"Accept-Language": "en-US,en;q=0.5",
|
"Accept-Language": "en-US,en;q=0.5",
|
||||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
"Accept-Encoding": "gzip, deflate, br",
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Authorization": f"Bearer {api_key}",
|
"Authorization": f"Bearer {api_key}",
|
||||||
}
|
}
|
||||||
@ -192,7 +192,7 @@ class Airforce(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:133.0) Gecko/20100101 Firefox/133.0",
|
||||||
"Accept": "application/json, text/event-stream",
|
"Accept": "application/json, text/event-stream",
|
||||||
"Accept-Language": "en-US,en;q=0.5",
|
"Accept-Language": "en-US,en;q=0.5",
|
||||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
"Accept-Encoding": "gzip, deflate, br",
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Authorization": f"Bearer {api_key}",
|
"Authorization": f"Bearer {api_key}",
|
||||||
}
|
}
|
||||||
|
@ -79,9 +79,9 @@ class Cloudflare(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
|
cls._args["cookies"] = merge_cookies(cls._args["cookies"] , response)
|
||||||
try:
|
try:
|
||||||
await raise_for_status(response)
|
await raise_for_status(response)
|
||||||
except ResponseStatusError as e:
|
except ResponseStatusError:
|
||||||
cls._args = None
|
cls._args = None
|
||||||
raise e
|
raise
|
||||||
async for line in response.iter_lines():
|
async for line in response.iter_lines():
|
||||||
if line.startswith(b'data: '):
|
if line.startswith(b'data: '):
|
||||||
if line == b'data: [DONE]':
|
if line == b'data: [DONE]':
|
||||||
|
@ -7,6 +7,7 @@ class DeepInfraChat(OpenaiAPI):
|
|||||||
label = "DeepInfra Chat"
|
label = "DeepInfra Chat"
|
||||||
url = "https://deepinfra.com/chat"
|
url = "https://deepinfra.com/chat"
|
||||||
working = True
|
working = True
|
||||||
|
api_base = "https://api.deepinfra.com/v1/openai"
|
||||||
|
|
||||||
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
|
default_model = 'meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo'
|
||||||
models = [
|
models = [
|
||||||
@ -34,7 +35,6 @@ class DeepInfraChat(OpenaiAPI):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
api_base: str = "https://api.deepinfra.com/v1/openai",
|
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
headers = {
|
headers = {
|
||||||
@ -46,4 +46,4 @@ class DeepInfraChat(OpenaiAPI):
|
|||||||
'X-Deepinfra-Source': 'web-page',
|
'X-Deepinfra-Source': 'web-page',
|
||||||
'accept': 'text/event-stream',
|
'accept': 'text/event-stream',
|
||||||
}
|
}
|
||||||
return super().create_async_generator(model, messages, proxy, api_base=api_base, headers=headers, **kwargs)
|
return super().create_async_generator(model, messages, proxy, headers=headers, **kwargs)
|
@ -15,16 +15,14 @@ from .helper import format_prompt
|
|||||||
class PollinationsAI(OpenaiAPI):
|
class PollinationsAI(OpenaiAPI):
|
||||||
label = "Pollinations AI"
|
label = "Pollinations AI"
|
||||||
url = "https://pollinations.ai"
|
url = "https://pollinations.ai"
|
||||||
|
|
||||||
working = True
|
working = True
|
||||||
needs_auth = False
|
needs_auth = False
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
|
api_base = "https://text.pollinations.ai/openai"
|
||||||
|
|
||||||
default_model = "openai"
|
default_model = "openai"
|
||||||
|
|
||||||
additional_models_image = ["midjourney", "dall-e-3"]
|
additional_models_image = ["midjourney", "dall-e-3"]
|
||||||
additional_models_text = ["sur", "sur-mistral", "claude"]
|
additional_models_text = ["sur", "sur-mistral", "claude"]
|
||||||
|
|
||||||
model_aliases = {
|
model_aliases = {
|
||||||
"gpt-4o": "openai",
|
"gpt-4o": "openai",
|
||||||
"mistral-nemo": "mistral",
|
"mistral-nemo": "mistral",
|
||||||
@ -66,7 +64,6 @@ class PollinationsAI(OpenaiAPI):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
prompt: str = None,
|
prompt: str = None,
|
||||||
api_base: str = "https://text.pollinations.ai/openai",
|
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
proxy: str = None,
|
proxy: str = None,
|
||||||
seed: str = None,
|
seed: str = None,
|
||||||
@ -76,25 +73,28 @@ class PollinationsAI(OpenaiAPI):
|
|||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
model = cls.get_model(model)
|
model = cls.get_model(model)
|
||||||
if model in cls.image_models:
|
if model in cls.image_models:
|
||||||
async for response in cls._generate_image(model, messages, prompt, seed, width, height):
|
async for response in cls._generate_image(model, messages, prompt, proxy, seed, width, height):
|
||||||
yield response
|
yield response
|
||||||
elif model in cls.models:
|
elif model in cls.models:
|
||||||
async for response in cls._generate_text(model, messages, api_base, api_key, proxy, **kwargs):
|
async for response in cls._generate_text(model, messages, api_key, proxy, **kwargs):
|
||||||
yield response
|
yield response
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unknown model: {model}")
|
raise ValueError(f"Unknown model: {model}")
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, seed: str = None, width: int = 1024, height: int = 1024):
|
async def _generate_image(cls, model: str, messages: Messages, prompt: str = None, proxy: str = None, seed: str = None, width: int = 1024, height: int = 1024):
|
||||||
if prompt is None:
|
if prompt is None:
|
||||||
prompt = messages[-1]["content"]
|
prompt = messages[-1]["content"]
|
||||||
if seed is None:
|
if seed is None:
|
||||||
seed = random.randint(0, 100000)
|
seed = random.randint(0, 100000)
|
||||||
image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
|
image = f"https://image.pollinations.ai/prompt/{quote(prompt)}?width={width}&height={height}&seed={int(seed)}&nofeed=true&nologo=true&model={quote(model)}"
|
||||||
|
async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
|
||||||
|
async with session.get(image) as response:
|
||||||
|
await raise_for_status(response)
|
||||||
yield ImageResponse(image, prompt)
|
yield ImageResponse(image, prompt)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
async def _generate_text(cls, model: str, messages: Messages, api_base: str, api_key: str = None, proxy: str = None, **kwargs):
|
async def _generate_text(cls, model: str, messages: Messages, api_key: str = None, proxy: str = None, **kwargs):
|
||||||
if api_key is None:
|
if api_key is None:
|
||||||
async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
|
async with ClientSession(connector=get_connector(proxy=proxy), headers=cls.headers) as session:
|
||||||
prompt = format_prompt(messages)
|
prompt = format_prompt(messages)
|
||||||
@ -104,6 +104,6 @@ class PollinationsAI(OpenaiAPI):
|
|||||||
yield line.decode(errors="ignore")
|
yield line.decode(errors="ignore")
|
||||||
else:
|
else:
|
||||||
async for chunk in super().create_async_generator(
|
async for chunk in super().create_async_generator(
|
||||||
model, messages, api_base=api_base, proxy=proxy, **kwargs
|
model, messages, proxy=proxy, **kwargs
|
||||||
):
|
):
|
||||||
yield chunk
|
yield chunk
|
@ -31,10 +31,10 @@ class Ollama(OpenaiAPI):
|
|||||||
api_base: str = None,
|
api_base: str = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if not api_base:
|
if api_base is None:
|
||||||
host = os.getenv("OLLAMA_HOST", "localhost")
|
host = os.getenv("OLLAMA_HOST", "localhost")
|
||||||
port = os.getenv("OLLAMA_PORT", "11434")
|
port = os.getenv("OLLAMA_PORT", "11434")
|
||||||
api_base: str = f"http://{host}:{port}/v1"
|
api_base: str = f"http://{host}:{port}/v1"
|
||||||
return super().create_async_generator(
|
return super().create_async_generator(
|
||||||
model, messages, api_base=api_base, **kwargs
|
model, messages, api_base=api_base, **kwargs
|
||||||
)
|
)
|
@ -25,7 +25,6 @@ class Cerebras(OpenaiAPI):
|
|||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
api_base: str = api_base,
|
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
cookies: Cookies = None,
|
cookies: Cookies = None,
|
||||||
**kwargs
|
**kwargs
|
||||||
@ -41,7 +40,6 @@ class Cerebras(OpenaiAPI):
|
|||||||
api_key = data.get("user", {}).get("demoApiKey")
|
api_key = data.get("user", {}).get("demoApiKey")
|
||||||
async for chunk in super().create_async_generator(
|
async for chunk in super().create_async_generator(
|
||||||
model, messages,
|
model, messages,
|
||||||
api_base=api_base,
|
|
||||||
impersonate="chrome",
|
impersonate="chrome",
|
||||||
api_key=api_key,
|
api_key=api_key,
|
||||||
headers={
|
headers={
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from ..base_provider import ProviderModelMixin
|
|
||||||
from ..Copilot import Copilot
|
from ..Copilot import Copilot
|
||||||
|
|
||||||
class CopilotAccount(Copilot, ProviderModelMixin):
|
class CopilotAccount(Copilot):
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
parent = "Copilot"
|
parent = "Copilot"
|
||||||
default_model = "Copilot"
|
default_model = "Copilot"
|
||||||
|
@ -8,6 +8,7 @@ class DeepInfra(OpenaiAPI):
|
|||||||
label = "DeepInfra"
|
label = "DeepInfra"
|
||||||
url = "https://deepinfra.com"
|
url = "https://deepinfra.com"
|
||||||
working = True
|
working = True
|
||||||
|
api_base = "https://api.deepinfra.com/v1/openai",
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
supports_stream = True
|
supports_stream = True
|
||||||
supports_message_history = True
|
supports_message_history = True
|
||||||
@ -27,7 +28,6 @@ class DeepInfra(OpenaiAPI):
|
|||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
stream: bool,
|
stream: bool,
|
||||||
api_base: str = "https://api.deepinfra.com/v1/openai",
|
|
||||||
temperature: float = 0.7,
|
temperature: float = 0.7,
|
||||||
max_tokens: int = 1028,
|
max_tokens: int = 1028,
|
||||||
**kwargs
|
**kwargs
|
||||||
@ -50,7 +50,6 @@ class DeepInfra(OpenaiAPI):
|
|||||||
return super().create_async_generator(
|
return super().create_async_generator(
|
||||||
model, messages,
|
model, messages,
|
||||||
stream=stream,
|
stream=stream,
|
||||||
api_base=api_base,
|
|
||||||
temperature=temperature,
|
temperature=temperature,
|
||||||
max_tokens=max_tokens,
|
max_tokens=max_tokens,
|
||||||
headers=headers,
|
headers=headers,
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from .OpenaiAPI import OpenaiAPI
|
from .OpenaiAPI import OpenaiAPI
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
|
|
||||||
class Groq(OpenaiAPI):
|
class Groq(OpenaiAPI):
|
||||||
label = "Groq"
|
label = "Groq"
|
||||||
@ -29,16 +28,4 @@ class Groq(OpenaiAPI):
|
|||||||
"whisper-large-v3",
|
"whisper-large-v3",
|
||||||
"whisper-large-v3-turbo",
|
"whisper-large-v3-turbo",
|
||||||
]
|
]
|
||||||
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
|
model_aliases = {"mixtral-8x7b": "mixtral-8x7b-32768", "llama2-70b": "llama2-70b-4096"}
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
api_base: str = api_base,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
return super().create_async_generator(
|
|
||||||
model, messages, api_base=api_base, **kwargs
|
|
||||||
)
|
|
@ -2,7 +2,6 @@ from __future__ import annotations
|
|||||||
|
|
||||||
from .OpenaiAPI import OpenaiAPI
|
from .OpenaiAPI import OpenaiAPI
|
||||||
from .HuggingChat import HuggingChat
|
from .HuggingChat import HuggingChat
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
|
|
||||||
class HuggingFaceAPI(OpenaiAPI):
|
class HuggingFaceAPI(OpenaiAPI):
|
||||||
label = "HuggingFace (Inference API)"
|
label = "HuggingFace (Inference API)"
|
||||||
@ -13,17 +12,4 @@ class HuggingFaceAPI(OpenaiAPI):
|
|||||||
default_vision_model = default_model
|
default_vision_model = default_model
|
||||||
models = [
|
models = [
|
||||||
*HuggingChat.models
|
*HuggingChat.models
|
||||||
]
|
]
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
api_base: str = api_base,
|
|
||||||
max_tokens: int = 500,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
return super().create_async_generator(
|
|
||||||
model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
|
|
||||||
)
|
|
@ -23,10 +23,12 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
fallback_models = []
|
fallback_models = []
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_models(cls, api_key: str = None, api_base: str = api_base) -> list[str]:
|
def get_models(cls, api_key: str = None, api_base: str = None) -> list[str]:
|
||||||
if not cls.models:
|
if not cls.models:
|
||||||
try:
|
try:
|
||||||
headers = {}
|
headers = {}
|
||||||
|
if api_base is None:
|
||||||
|
api_base = cls.api_base
|
||||||
if api_key is not None:
|
if api_key is not None:
|
||||||
headers["authorization"] = f"Bearer {api_key}"
|
headers["authorization"] = f"Bearer {api_key}"
|
||||||
response = requests.get(f"{api_base}/models", headers=headers)
|
response = requests.get(f"{api_base}/models", headers=headers)
|
||||||
@ -48,7 +50,7 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
timeout: int = 120,
|
timeout: int = 120,
|
||||||
images: ImagesType = None,
|
images: ImagesType = None,
|
||||||
api_key: str = None,
|
api_key: str = None,
|
||||||
api_base: str = api_base,
|
api_base: str = None,
|
||||||
temperature: float = None,
|
temperature: float = None,
|
||||||
max_tokens: int = None,
|
max_tokens: int = None,
|
||||||
top_p: float = None,
|
top_p: float = None,
|
||||||
@ -61,6 +63,8 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
) -> AsyncResult:
|
) -> AsyncResult:
|
||||||
if cls.needs_auth and api_key is None:
|
if cls.needs_auth and api_key is None:
|
||||||
raise MissingAuthError('Add a "api_key"')
|
raise MissingAuthError('Add a "api_key"')
|
||||||
|
if api_base is None:
|
||||||
|
api_base = cls.api_base
|
||||||
if images is not None:
|
if images is not None:
|
||||||
if not model and hasattr(cls, "default_vision_model"):
|
if not model and hasattr(cls, "default_vision_model"):
|
||||||
model = cls.default_vision_model
|
model = cls.default_vision_model
|
||||||
@ -134,8 +138,10 @@ class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
elif "error" in data:
|
elif "error" in data:
|
||||||
if "code" in data["error"]:
|
if "code" in data["error"]:
|
||||||
raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
|
raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
|
||||||
else:
|
elif "message" in data["error"]:
|
||||||
raise ResponseError(data["error"]["message"])
|
raise ResponseError(data["error"]["message"])
|
||||||
|
else:
|
||||||
|
raise ResponseError(data["error"])
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
|
def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
|
||||||
|
@ -438,7 +438,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
|
|||||||
async for line in response.iter_lines():
|
async for line in response.iter_lines():
|
||||||
async for chunk in cls.iter_messages_line(session, line, conversation):
|
async for chunk in cls.iter_messages_line(session, line, conversation):
|
||||||
yield chunk
|
yield chunk
|
||||||
if not history_disabled and RequestConfig.access_token is not None:
|
if not history_disabled and cls._api_key is not None:
|
||||||
yield SynthesizeData(cls.__name__, {
|
yield SynthesizeData(cls.__name__, {
|
||||||
"conversation_id": conversation.conversation_id,
|
"conversation_id": conversation.conversation_id,
|
||||||
"message_id": conversation.message_id,
|
"message_id": conversation.message_id,
|
||||||
|
@ -1,12 +1,12 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from .OpenaiAPI import OpenaiAPI
|
from .OpenaiAPI import OpenaiAPI
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
|
|
||||||
class PerplexityApi(OpenaiAPI):
|
class PerplexityApi(OpenaiAPI):
|
||||||
label = "Perplexity API"
|
label = "Perplexity API"
|
||||||
url = "https://www.perplexity.ai"
|
url = "https://www.perplexity.ai"
|
||||||
working = True
|
working = True
|
||||||
|
api_base = "https://api.perplexity.ai"
|
||||||
default_model = "llama-3-sonar-large-32k-online"
|
default_model = "llama-3-sonar-large-32k-online"
|
||||||
models = [
|
models = [
|
||||||
"llama-3-sonar-small-32k-chat",
|
"llama-3-sonar-small-32k-chat",
|
||||||
@ -15,16 +15,4 @@ class PerplexityApi(OpenaiAPI):
|
|||||||
"llama-3-sonar-large-32k-online",
|
"llama-3-sonar-large-32k-online",
|
||||||
"llama-3-8b-instruct",
|
"llama-3-8b-instruct",
|
||||||
"llama-3-70b-instruct",
|
"llama-3-70b-instruct",
|
||||||
]
|
]
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
api_base: str = "https://api.perplexity.ai",
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
return super().create_async_generator(
|
|
||||||
model, messages, api_base=api_base, **kwargs
|
|
||||||
)
|
|
@ -31,6 +31,7 @@ class ThebApi(OpenaiAPI):
|
|||||||
label = "TheB.AI API"
|
label = "TheB.AI API"
|
||||||
url = "https://theb.ai"
|
url = "https://theb.ai"
|
||||||
working = True
|
working = True
|
||||||
|
api_base = "https://api.theb.ai/v1"
|
||||||
needs_auth = True
|
needs_auth = True
|
||||||
default_model = "gpt-3.5-turbo"
|
default_model = "gpt-3.5-turbo"
|
||||||
models = list(models)
|
models = list(models)
|
||||||
@ -40,7 +41,6 @@ class ThebApi(OpenaiAPI):
|
|||||||
cls,
|
cls,
|
||||||
model: str,
|
model: str,
|
||||||
messages: Messages,
|
messages: Messages,
|
||||||
api_base: str = "https://api.theb.ai/v1",
|
|
||||||
temperature: float = 1,
|
temperature: float = 1,
|
||||||
top_p: float = 1,
|
top_p: float = 1,
|
||||||
**kwargs
|
**kwargs
|
||||||
@ -58,4 +58,4 @@ class ThebApi(OpenaiAPI):
|
|||||||
"top_p": top_p,
|
"top_p": top_p,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
|
return super().create_async_generator(model, messages, extra_data=data, **kwargs)
|
||||||
|
@ -1,22 +1,9 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
from .OpenaiAPI import OpenaiAPI
|
from .OpenaiAPI import OpenaiAPI
|
||||||
from ...typing import AsyncResult, Messages
|
|
||||||
|
|
||||||
class xAI(OpenaiAPI):
|
class xAI(OpenaiAPI):
|
||||||
label = "xAI"
|
label = "xAI"
|
||||||
url = "https://console.x.ai"
|
url = "https://console.x.ai"
|
||||||
api_base = "https://api.x.ai/v1"
|
api_base = "https://api.x.ai/v1"
|
||||||
working = True
|
working = True
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def create_async_generator(
|
|
||||||
cls,
|
|
||||||
model: str,
|
|
||||||
messages: Messages,
|
|
||||||
api_base: str = api_base,
|
|
||||||
**kwargs
|
|
||||||
) -> AsyncResult:
|
|
||||||
return super().create_async_generator(
|
|
||||||
model, messages, api_base=api_base, **kwargs
|
|
||||||
)
|
|
@ -93,8 +93,9 @@ body {
|
|||||||
height: 100vh;
|
height: 100vh;
|
||||||
}
|
}
|
||||||
|
|
||||||
a:-webkit-any-link {
|
body:not(.white) a:link,
|
||||||
color: var(--accent);
|
body:not(.white) a:visited{
|
||||||
|
color: var(--colour-3);
|
||||||
}
|
}
|
||||||
|
|
||||||
.row {
|
.row {
|
||||||
@ -380,7 +381,6 @@ body.white .gradient{
|
|||||||
.message .content_inner a:visited{
|
.message .content_inner a:visited{
|
||||||
font-size: 15px;
|
font-size: 15px;
|
||||||
line-height: 1.3;
|
line-height: 1.3;
|
||||||
color: var(--colour-3);
|
|
||||||
}
|
}
|
||||||
.message .content_inner pre{
|
.message .content_inner pre{
|
||||||
white-space: pre-wrap;
|
white-space: pre-wrap;
|
||||||
|
@ -513,21 +513,7 @@ async function add_message_chunk(message, message_id) {
|
|||||||
content_map.inner.innerHTML = markdown_render(message.preview);
|
content_map.inner.innerHTML = markdown_render(message.preview);
|
||||||
} else if (message.type == "content") {
|
} else if (message.type == "content") {
|
||||||
message_storage[message_id] += message.content;
|
message_storage[message_id] += message.content;
|
||||||
html = markdown_render(message_storage[message_id]);
|
update_message(content_map, message_id);
|
||||||
let lastElement, lastIndex = null;
|
|
||||||
for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
|
|
||||||
const index = html.lastIndexOf(element)
|
|
||||||
if (index - element.length > lastIndex) {
|
|
||||||
lastElement = element;
|
|
||||||
lastIndex = index;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (lastIndex) {
|
|
||||||
html = html.substring(0, lastIndex) + '<span class="cursor"></span>' + lastElement;
|
|
||||||
}
|
|
||||||
content_map.inner.innerHTML = html;
|
|
||||||
content_map.count.innerText = count_words_and_tokens(message_storage[message_id], provider_storage[message_id]?.model);
|
|
||||||
highlight(content_map.inner);
|
|
||||||
content_map.inner.style.height = "";
|
content_map.inner.style.height = "";
|
||||||
} else if (message.type == "log") {
|
} else if (message.type == "log") {
|
||||||
let p = document.createElement("p");
|
let p = document.createElement("p");
|
||||||
@ -536,16 +522,6 @@ async function add_message_chunk(message, message_id) {
|
|||||||
} else if (message.type == "synthesize") {
|
} else if (message.type == "synthesize") {
|
||||||
synthesize_storage[message_id] = message.synthesize;
|
synthesize_storage[message_id] = message.synthesize;
|
||||||
}
|
}
|
||||||
let scroll_down = ()=>{
|
|
||||||
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 100) {
|
|
||||||
window.scrollTo(0, 0);
|
|
||||||
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!content_map.container.classList.contains("regenerate")) {
|
|
||||||
scroll_down();
|
|
||||||
setTimeout(scroll_down, 200);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const ask_gpt = async (message_id, message_index = -1, regenerate = false, provider = null, model = null) => {
|
const ask_gpt = async (message_id, message_index = -1, regenerate = false, provider = null, model = null) => {
|
||||||
@ -1233,6 +1209,36 @@ function count_words_and_tokens(text, model) {
|
|||||||
return `(${count_words(text)} words, ${count_chars(text)} chars, ${count_tokens(model, text)} tokens)`;
|
return `(${count_words(text)} words, ${count_chars(text)} chars, ${count_tokens(model, text)} tokens)`;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function update_message(content_map, message_id) {
|
||||||
|
content_map.inner.dataset.timeout = setTimeout(() => {
|
||||||
|
html = markdown_render(message_storage[message_id]);
|
||||||
|
let lastElement, lastIndex = null;
|
||||||
|
for (element of ['</p>', '</code></pre>', '</p>\n</li>\n</ol>', '</li>\n</ol>', '</li>\n</ul>']) {
|
||||||
|
const index = html.lastIndexOf(element)
|
||||||
|
if (index - element.length > lastIndex) {
|
||||||
|
lastElement = element;
|
||||||
|
lastIndex = index;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (lastIndex) {
|
||||||
|
html = html.substring(0, lastIndex) + '<span class="cursor"></span>' + lastElement;
|
||||||
|
}
|
||||||
|
if (error_storage[message_id]) {
|
||||||
|
content_map.inner.innerHTML += markdown_render(`**An error occured:** ${error_storage[message_id]}`);
|
||||||
|
}
|
||||||
|
content_map.inner.innerHTML = html;
|
||||||
|
content_map.count.innerText = count_words_and_tokens(message_storage[message_id], provider_storage[message_id]?.model);
|
||||||
|
highlight(content_map.inner);
|
||||||
|
if (!content_map.container.classList.contains("regenerate")) {
|
||||||
|
if (message_box.scrollTop >= message_box.scrollHeight - message_box.clientHeight - 200) {
|
||||||
|
window.scrollTo(0, 0);
|
||||||
|
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "auto" });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (content_map.inner.dataset.timeout) clearTimeout(content_map.inner.dataset.timeout);
|
||||||
|
}, 100);
|
||||||
|
};
|
||||||
|
|
||||||
let countFocus = messageInput;
|
let countFocus = messageInput;
|
||||||
let timeoutId;
|
let timeoutId;
|
||||||
const count_input = async () => {
|
const count_input = async () => {
|
||||||
|
@ -24,7 +24,6 @@ from .Provider import (
|
|||||||
HuggingFace,
|
HuggingFace,
|
||||||
Liaobots,
|
Liaobots,
|
||||||
Airforce,
|
Airforce,
|
||||||
Mhystical,
|
|
||||||
MetaAI,
|
MetaAI,
|
||||||
MicrosoftDesigner,
|
MicrosoftDesigner,
|
||||||
OpenaiChat,
|
OpenaiChat,
|
||||||
@ -68,7 +67,6 @@ default = Model(
|
|||||||
best_provider = IterListProvider([
|
best_provider = IterListProvider([
|
||||||
DDG,
|
DDG,
|
||||||
Pizzagpt,
|
Pizzagpt,
|
||||||
ReplicateHome,
|
|
||||||
Blackbox2,
|
Blackbox2,
|
||||||
Blackbox,
|
Blackbox,
|
||||||
Copilot,
|
Copilot,
|
||||||
@ -78,7 +76,7 @@ default = Model(
|
|||||||
Cloudflare,
|
Cloudflare,
|
||||||
PollinationsAI,
|
PollinationsAI,
|
||||||
ChatGptEs,
|
ChatGptEs,
|
||||||
ChatGpt,
|
OpenaiChat,
|
||||||
])
|
])
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,14 +151,14 @@ async def get_args_from_nodriver(
|
|||||||
else:
|
else:
|
||||||
await browser.cookies.set_all(get_cookie_params_from_dict(cookies, url=url, domain=domain))
|
await browser.cookies.set_all(get_cookie_params_from_dict(cookies, url=url, domain=domain))
|
||||||
page = await browser.get(url)
|
page = await browser.get(url)
|
||||||
for c in await browser.cookies.get_all():
|
for c in await page.send(nodriver.cdp.network.get_cookies([url])):
|
||||||
if c.domain.endswith(domain):
|
cookies[c.name] = c.value
|
||||||
cookies[c.name] = c.value
|
|
||||||
user_agent = await page.evaluate("window.navigator.userAgent")
|
user_agent = await page.evaluate("window.navigator.userAgent")
|
||||||
await page.wait_for("body:not(.no-js)", timeout=timeout)
|
await page.wait_for("body:not(.no-js)", timeout=timeout)
|
||||||
await page.close()
|
await page.close()
|
||||||
browser.stop()
|
browser.stop()
|
||||||
return {
|
return {
|
||||||
|
"impersonate": "chrome",
|
||||||
"cookies": cookies,
|
"cookies": cookies,
|
||||||
"headers": {
|
"headers": {
|
||||||
**DEFAULT_HEADERS,
|
**DEFAULT_HEADERS,
|
||||||
|
Loading…
Reference in New Issue
Block a user