Merge pull request #1807 from hlohaus/satur

Add WhiteRabbitNeo Provider, Many tiny improvments in the gui
This commit is contained in:
H Lohaus 2024-04-09 19:40:42 +02:00 committed by GitHub
commit 4c23b4cad4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 4987 additions and 162 deletions

2
.github/FUNDING.yml vendored
View File

@ -1,3 +1,3 @@
ko_fi: xtekky ko_fi: xtekky
github: [xtekky] github: [xtekky, hlohaus]
patreon: xtekky patreon: xtekky

View File

@ -188,7 +188,8 @@ image_url = response.data[0].url
**Full Documentation for Python API** **Full Documentation for Python API**
- New Client API like the OpenAI Python library: [/docs/client](/docs/client.md) - New AsyncClient API from G4F: [/docs/async_client](/docs/async_client.md)
- Client API like the OpenAI Python library: [/docs/client](/docs/async_client.md)
- Legacy API with python modules: [/docs/legacy](/docs/legacy.md) - Legacy API with python modules: [/docs/legacy](/docs/legacy.md)
#### Web UI #### Web UI

95
docs/async_client.md Normal file
View File

@ -0,0 +1,95 @@
# How to Use the G4F AsyncClient API
The AsyncClient API is the asynchronous counterpart to the standard G4F Client API. It offers the same functionality as the synchronous API, but with the added benefit of improved performance due to its asynchronous nature.
Designed to maintain compatibility with the existing OpenAI API, the G4F AsyncClient API ensures a seamless transition for users already familiar with the OpenAI client.
## Key Features
The G4F AsyncClient API offers several key features:
- **Custom Providers:** The G4F Client API allows you to use custom providers. This feature enhances the flexibility of the API, enabling it to cater to a wide range of use cases.
- **ChatCompletion Interface:** The G4F package provides an interface for interacting with chat models through the ChatCompletion class. This class provides methods for creating both streaming and non-streaming responses.
- **Streaming Responses:** The ChatCompletion.create method can return a response iteratively as and when they are received if the stream parameter is set to True.
- **Non-Streaming Responses:** The ChatCompletion.create method can also generate non-streaming responses.
- **Image Generation and Vision Models:** The G4F Client API also supports image generation and vision models, expanding its utility beyond text-based interactions.
## Using AsyncClient
### Text Completions:
You can use the ChatCompletions endpoint to generate text completions as follows:
```python
response = await client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
...
)
print(response.choices[0].message.content)
```
Streaming completions are also supported:
```python
stream = client.chat.completions.create(
model="gpt-4",
messages=[{"role": "user", "content": "Say this is a test"}],
stream=True,
...
)
async for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content or "", end="")
```
### Image Generation:
You can generate images using a specified prompt:
```python
response = await client.images.generate(
model="dall-e-3",
prompt="a white siamese cat",
...
)
image_url = response.data[0].url
```
### Example usage with asyncio.gather
Start two tasks at the same time:
```python
import asyncio
from g4f.client import AsyncClient
from g4f.Provider import BingCreateImages, OpenaiChat, Gemini
async def main():
client = AsyncClient(
provider=OpenaiChat,
image_provider=Gemini,
# other parameters...
)
task1 = client.chat.completions.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": "Say this is a test"}],
)
task2 = client.images.generate(
model="dall-e-3",
prompt="a white siamese cat",
)
responses = await asyncio.gather(task1, task2)
print(responses)
asyncio.run(main())
```

View File

@ -46,6 +46,7 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
messages: Messages, messages: Messages,
proxy: str = None, proxy: str = None,
timeout: int = 900, timeout: int = 900,
api_key: str = None,
cookies: Cookies = None, cookies: Cookies = None,
connector: BaseConnector = None, connector: BaseConnector = None,
tone: str = None, tone: str = None,
@ -68,6 +69,8 @@ class Bing(AsyncGeneratorProvider, ProviderModelMixin):
:return: An asynchronous result object. :return: An asynchronous result object.
""" """
prompt = messages[-1]["content"] prompt = messages[-1]["content"]
if api_key is not None:
cookies["_U"] = api_key
if context is None: if context is None:
context = create_context(messages[:-1]) if len(messages) > 1 else None context = create_context(messages[:-1]) if len(messages) > 1 else None
if tone is None: if tone is None:

View File

@ -0,0 +1,64 @@
from __future__ import annotations
import json
import aiohttp
from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
from ..typing import AsyncResult, Messages
from ..requests.raise_for_status import raise_for_status
class DuckDuckGo(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://duckduckgo.com/duckchat"
working = True
supports_gpt_35_turbo = True
supports_message_history = True
default_model = "gpt-3.5-turbo-0125"
models = ["gpt-3.5-turbo-0125", "claude-instant-1.2"]
model_aliases = {"gpt-3.5-turbo": "gpt-3.5-turbo-0125"}
status_url = "https://duckduckgo.com/duckchat/v1/status"
chat_url = "https://duckduckgo.com/duckchat/v1/chat"
user_agent = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0'
headers = {
'User-Agent': user_agent,
'Accept': 'text/event-stream',
'Accept-Language': 'de,en-US;q=0.7,en;q=0.3',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://duckduckgo.com/',
'Content-Type': 'application/json',
'Origin': 'https://duckduckgo.com',
'Connection': 'keep-alive',
'Cookie': 'dcm=1',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Pragma': 'no-cache',
'TE': 'trailers'
}
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
**kwargs
) -> AsyncResult:
async with aiohttp.ClientSession(headers=cls.headers) as session:
async with session.get(cls.status_url, headers={"x-vqd-accept": "1"}) as response:
await raise_for_status(response)
vqd_4 = response.headers.get("x-vqd-4")
payload = {
'model': cls.get_model(model),
'messages': messages
}
async with session.post(cls.chat_url, json=payload, headers={"x-vqd-4": vqd_4}) as response:
await raise_for_status(response)
async for line in response.content:
if line.startswith(b"data: "):
chunk = line[6:]
if chunk.startswith(b"[DONE]"):
break
data = json.loads(chunk)
if "message" in data:
yield data["message"]

View File

@ -13,6 +13,10 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
url = "https://huggingface.co/chat" url = "https://huggingface.co/chat"
working = True working = True
supports_message_history = True supports_message_history = True
models = [
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"mistralai/Mistral-7B-Instruct-v0.2"
]
default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1" default_model = "mistralai/Mixtral-8x7B-Instruct-v0.1"
@classmethod @classmethod
@ -29,7 +33,7 @@ class HuggingFace(AsyncGeneratorProvider, ProviderModelMixin):
temperature: float = 0.7, temperature: float = 0.7,
**kwargs **kwargs
) -> AsyncResult: ) -> AsyncResult:
model = cls.get_model(model) model = cls.get_model(model) if not model else model
headers = {} headers = {}
if api_key is not None: if api_key is not None:
headers["Authorization"] = f"Bearer {api_key}" headers["Authorization"] = f"Bearer {api_key}"

View File

@ -0,0 +1,57 @@
from __future__ import annotations
from aiohttp import ClientSession, BaseConnector
from ..typing import AsyncResult, Messages, Cookies
from ..requests.raise_for_status import raise_for_status
from .base_provider import AsyncGeneratorProvider
from .helper import get_cookies, get_connector, get_random_string
class WhiteRabbitNeo(AsyncGeneratorProvider):
url = "https://www.whiterabbitneo.com"
working = True
supports_message_history = True
needs_auth = True
@classmethod
async def create_async_generator(
cls,
model: str,
messages: Messages,
cookies: Cookies = None,
connector: BaseConnector = None,
proxy: str = None,
**kwargs
) -> AsyncResult:
if cookies is None:
cookies = get_cookies("www.whiterabbitneo.com")
headers = {
"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
"Accept": "*/*",
"Accept-Language": "de,en-US;q=0.7,en;q=0.3",
"Accept-Encoding": "gzip, deflate, br",
"Referer": f"{cls.url}/",
"Content-Type": "text/plain;charset=UTF-8",
"Origin": cls.url,
"Connection": "keep-alive",
"Sec-Fetch-Dest": "empty",
"Sec-Fetch-Mode": "cors",
"Sec-Fetch-Site": "same-origin",
"TE": "trailers"
}
async with ClientSession(
headers=headers,
cookies=cookies,
connector=get_connector(connector, proxy)
) as session:
data = {
"messages": messages,
"id": get_random_string(6),
"enhancePrompt": False,
"useFunctions": False
}
async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
await raise_for_status(response)
async for chunk in response.content.iter_any():
if chunk:
yield chunk.decode(errors="ignore")

View File

@ -21,6 +21,7 @@ from .ChatgptFree import ChatgptFree
from .ChatgptNext import ChatgptNext from .ChatgptNext import ChatgptNext
from .ChatgptX import ChatgptX from .ChatgptX import ChatgptX
from .DeepInfra import DeepInfra from .DeepInfra import DeepInfra
from .DuckDuckGo import DuckDuckGo
from .FlowGpt import FlowGpt from .FlowGpt import FlowGpt
from .FreeChatgpt import FreeChatgpt from .FreeChatgpt import FreeChatgpt
from .FreeGpt import FreeGpt from .FreeGpt import FreeGpt
@ -37,6 +38,7 @@ from .Local import Local
from .PerplexityLabs import PerplexityLabs from .PerplexityLabs import PerplexityLabs
from .Pi import Pi from .Pi import Pi
from .Vercel import Vercel from .Vercel import Vercel
from .WhiteRabbitNeo import WhiteRabbitNeo
from .You import You from .You import You
import sys import sys

View File

@ -37,6 +37,7 @@
import llamaTokenizer from "llama-tokenizer-js" import llamaTokenizer from "llama-tokenizer-js"
</script> </script>
<script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script> <script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
<script src="/static/js/text_to_speech/index.js" async></script>
<script> <script>
const user_image = '<img src="/static/img/user.png" alt="your avatar">'; const user_image = '<img src="/static/img/user.png" alt="your avatar">';
const gpt_image = '<img src="/static/img/gpt.png" alt="your avatar">'; const gpt_image = '<img src="/static/img/gpt.png" alt="your avatar">';
@ -49,13 +50,93 @@
<body> <body>
<div class="gradient"></div> <div class="gradient"></div>
<div class="row"> <div class="row">
<div class="box conversations hidden"> <div class="box conversations">
<div class="top"> <div class="top">
<button class="new_convo" onclick="new_conversation()"> <button class="new_convo" onclick="new_conversation()">
<i class="fa-regular fa-plus"></i> <i class="fa-regular fa-plus"></i>
<span>New Conversation</span> <span>New Conversation</span>
</button> </button>
</div> </div>
<div class="bottom_buttons">
<button onclick="open_settings();">
<i class="fa-solid fa-toolbox"></i>
<span>Open Settings</span>
</button>
<div class="info">
<i class="fa-brands fa-discord"></i>
<span class="convo-title">discord ~ <a href="https://discord.gg/XfybzPXPH5">discord.gg/XfybzPXPH5</a>
</span>
</div>
<div class="info">
<i class="fa-brands fa-github"></i>
<span class="convo-title">github ~ <a href="https://github.com/xtekky/gpt4free">@xtekky/gpt4free</a>
</span>
</div>
<div class="info">
<i class="fa-solid fa-star"></i>
<span id="version_text" class="convo-title"></span>
</div>
</div>
</div>
<div class="settings hidden">
<div class="paper">
<div class="field">
<span class="label">Web Access</span>
<input type="checkbox" id="switch" />
<label for="switch" class="toogle" title="Add the pages of the first 5 search results to the query."></label>
</div>
<div class="field">
<span class="label">Disable History</span>
<input type="checkbox" id="history" />
<label for="history" class="toogle" title="To improve the reaction time or if you have trouble with large conversations."></label>
</div>
<div class="field">
<span class="label">Hide System prompt</span>
<input type="checkbox" id="hide-systemPrompt" />
<label for="hide-systemPrompt" class="toogle" title="For more space on phones"></label>
</div>
<div class="field">
<span class="label">Auto continue</span>
<input id="auto_continue" type="checkbox" name="auto_continue" checked/>
<label for="auto_continue" class="toogle" title="Continue large responses in OpenaiChat"></label>
</div>
<div class="field box">
<label for="message-input-height" class="label" title="">Input max. grow height</label>
<input type="number" id="message-input-height" value="200"/>
</div>
<div class="field box">
<label for="recognition-language" class="label" title="">Speech recognition lang</label>
<input type="text" id="recognition-language" value="" placeholder="navigator.language"/>
</div>
<div class="field box">
<label for="OpenaiChat-api_key" class="label" title="">OpenaiChat: api_key</label>
<textarea id="OpenaiChat-api_key" name="OpenaiChat[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="Bing-api_key" class="label" title="">Bing: "_U" cookie</label>
<textarea id="Bing-api_key" name="Bing[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="Gemini-api_key" class="label" title="">Gemini: Cookies</label>
<textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="Openai-api_key" class="label" title="">Openai: api_key</label>
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="Groq-api_key" class="label" title="">Groq: api_key</label>
<textarea id="Groq-api_key" name="Groq[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="GeminiPro-api_key" class="label" title="">GeminiPro: api_key</label>
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="HuggingFace-api_key" class="label" title="">HuggingFace: api_key</label>
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="..."></textarea>
</div>
</div>
<div class="bottom_buttons"> <div class="bottom_buttons">
<button onclick="delete_conversations()"> <button onclick="delete_conversations()">
<i class="fa-regular fa-trash"></i> <i class="fa-regular fa-trash"></i>
@ -65,46 +146,6 @@
<i class="fa-solid fa-download"></i> <i class="fa-solid fa-download"></i>
<a href="" onclick="return false;">Export Conversations</a> <a href="" onclick="return false;">Export Conversations</a>
</button> </button>
<div class="info">
<i class="fa-brands fa-github"></i>
<span class="convo-title">github ~ <a href="https://github.com/xtekky/gpt4free">@gpt4free</a>
</span>
</div>
<div class="info">
<i class="fa-solid fa-star"></i>
<span id="version_text" class="convo-title"></span>
</div>
</div>
</div>
<div class="settings">
<div class="field box">
<label for="OpenaiChat-api_key" class="label" title="">OpenaiChat: access_token</label>
<textarea id="OpenaiChat-api_key" name="OpenaiChat[api_key]" placeholder="..."></textarea>
</div>
<div class="field">
<span class="label">OpenaiChat: Auto continue</span>
<input id="OpenaiChat-auto_continue" type="checkbox" name="OpenaiChat[auto_continue]" checked/>
<label for="OpenaiChat-auto_continue" class="toogle" title=""></label>
</div>
<div class="field box">
<label for="Bing-api_key" class="label" title="">Bing: "_U" cookie</label>
<textarea id="Bing-api_key" name="Bing[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="Gemini-api_key" class="label" title="">Gemini: Auth cookies</label>
<textarea id="Gemini-api_key" name="Gemini[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="Openai-api_key" class="label" title="">Openai: api_key</label>
<textarea id="Openai-api_key" name="Openai[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="GeminiPro-api_key" class="label" title="">GeminiPro: api_key</label>
<textarea id="GeminiPro-api_key" name="GeminiPro[api_key]" placeholder="..."></textarea>
</div>
<div class="field box">
<label for="HuggingFace-api_key" class="label" title="">HuggingFace: api_key</label>
<textarea id="HuggingFace-api_key" name="HuggingFace[api_key]" placeholder="..."></textarea>
</div> </div>
</div> </div>
<div class="conversation"> <div class="conversation">
@ -143,6 +184,9 @@
<input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/> <input type="file" id="file" name="file" accept="text/plain, text/html, text/xml, application/json, text/javascript, .sh, .py, .php, .css, .yaml, .sql, .log, .csv, .twig, .md" required/>
<i class="fa-solid fa-paperclip"></i> <i class="fa-solid fa-paperclip"></i>
</label> </label>
<label class="micro-label" for="micro">
<i class="fa-solid fa-microphone-slash"></i>
</label>
<div id="send-button"> <div id="send-button">
<i class="fa-solid fa-paper-plane-top"></i> <i class="fa-solid fa-paper-plane-top"></i>
</div> </div>
@ -161,43 +205,15 @@
<select name="model2" id="model2" class="hidden"></select> <select name="model2" id="model2" class="hidden"></select>
</div> </div>
<div class="field"> <div class="field">
<select name="jailbreak" id="jailbreak" style="display: none;"> <select name="provider" id="provider">
<option value="default" selected>Set Jailbreak</option> <option value="">Provider: Auto</option>
<option value="gpt-math-1.0">math 1.0</option> <option value="Bing">Bing</option>
<option value="gpt-dude-1.0">dude 1.0</option> <option value="OpenaiChat">OpenaiChat</option>
<option value="gpt-dan-1.0">dan 1.0</option> <option value="Gemini">Gemini</option>
<option value="gpt-dan-2.0">dan 2.0</option> <option value="Liaobots">Liaobots</option>
<option value="gpt-dev-2.0">dev 2.0</option> <option value="You">You</option>
<option value="gpt-evil-1.0">evil 1.0</option> <option value="">----</option>
</select> </select>
<div class="field">
<select name="provider" id="provider">
<option value="">Provider: Auto</option>
<option value="Bing">Bing</option>
<option value="OpenaiChat">OpenaiChat</option>
<option value="Gemini">Gemini</option>
<option value="Liaobots">Liaobots</option>
<option value="You">You</option>
<option value="">----</option>
</select>
</div>
</div>
<div class="field">
<input type="checkbox" id="switch" />
<label for="switch" title="Add the pages of the first 5 search results to the query."></label>
<span class="about">Web Access</span>
</div>
<!--
<div class="field">
<input type="checkbox" id="patch" />
<label for="patch" title="Enable create images with Bing."></label>
<span class="about">Image Generator</span>
</div>
-->
<div class="field">
<input type="checkbox" id="history" />
<label for="history" title="To improve the reaction time or if you have trouble with large conversations."></label>
<span class="about">Disable History</span>
</div> </div>
</div> </div>
</div> </div>

View File

@ -84,15 +84,17 @@ body {
} }
body { body {
padding: var(--section-gap); padding: 10px;
background: var(--colour-1); background: var(--colour-1);
color: var(--colour-3); color: var(--colour-3);
height: 100vh; height: 100vh;
max-width: 1600px;
margin: auto;
} }
.row { .row {
display: flex; display: flex;
gap: var(--section-gap); gap: 10px;
height: 100%; height: 100%;
} }
@ -106,12 +108,8 @@ body {
border: 1px solid var(--blur-border); border: 1px solid var(--blur-border);
} }
.hidden {
display: none;
}
.conversations { .conversations {
max-width: 260px; max-width: 280px;
padding: var(--section-gap); padding: var(--section-gap);
overflow: auto; overflow: auto;
flex-shrink: 0; flex-shrink: 0;
@ -138,8 +136,7 @@ body {
} }
.conversation .user-input { .conversation .user-input {
max-height: 200px; margin-bottom: 4px;
margin-bottom: 10px;
} }
.conversation .user-input input { .conversation .user-input input {
@ -183,8 +180,8 @@ body {
.conversations { .conversations {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: var(--inner-gap); gap: 10px;
padding: var(--inner-gap); padding: 10px;
} }
.conversations .title { .conversations .title {
@ -207,7 +204,12 @@ body {
cursor: pointer; cursor: pointer;
display: flex; display: flex;
align-items: center; align-items: center;
gap: 10px; gap: 4px;
}
.conversations .convo .fa-trash {
position: absolute;
right: 8px;
} }
.conversations .convo .choise { .conversations .convo .choise {
@ -216,7 +218,7 @@ body {
background-color: var(--blur-bg); background-color: var(--blur-bg);
} }
.conversations i { .conversations i, .bottom_buttons i {
color: var(--conversations); color: var(--conversations);
cursor: pointer; cursor: pointer;
} }
@ -229,6 +231,10 @@ body {
white-space: nowrap; white-space: nowrap;
} }
.convo-title .datetime {
font-size: 10px;
}
.message { .message {
width: 100%; width: 100%;
overflow-wrap: break-word; overflow-wrap: break-word;
@ -309,6 +315,7 @@ body {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 10px; gap: 10px;
flex-wrap: wrap;
} }
.message .content, .message .content,
@ -326,6 +333,10 @@ body {
max-width: 400px; max-width: 400px;
} }
.message .content .audio{
display: flex;
}
.message .user i { .message .user i {
position: absolute; position: absolute;
bottom: -6px; bottom: -6px;
@ -351,10 +362,28 @@ body {
cursor: pointer; cursor: pointer;
} }
.message .count .fa-clipboard,
.message .count .fa-volume-high {
z-index: 1000;
cursor: pointer;
}
.message .user .fa-xmark { .message .user .fa-xmark {
color: var(--colour-1); color: var(--colour-1);
} }
.message .count .fa-clipboard {
color: var(--colour-3);
}
.message .count .fa-clipboard.clicked {
color: var(--accent);
}
.message .count .fa-volume-high.active {
color: var(--accent);
}
.message .assistant:hover .fa-xmark, .message .assistant:hover .fa-xmark,
.message .user:hover .fa-xmark { .message .user:hover .fa-xmark {
display: block; display: block;
@ -482,7 +511,8 @@ body {
display: none; display: none;
} }
.file-label { .file-label,
.micro-label {
cursor: pointer; cursor: pointer;
position: absolute; position: absolute;
top: 10px; top: 10px;
@ -490,7 +520,8 @@ body {
} }
.file-label:has(> input:valid), .file-label:has(> input:valid),
.file-label.selected { .file-label.selected,
.micro-label.recognition {
color: var(--accent); color: var(--accent);
} }
@ -498,11 +529,12 @@ label[for="image"] {
top: 32px; top: 32px;
} }
label[for="camera"] { label[for="micro"] {
top: 54px; top: 54px;
} }
label[for="camera"] { label[for="camera"] {
top: 74px;
display: none; display: none;
} }
@ -556,6 +588,16 @@ label[for="camera"] {
background: var(--accent); background: var(--accent);
} }
.settings .bottom_buttons {
flex-direction: row;
}
.settings .bottom_buttons button {
display: inline-block;
max-width: 210px;
width: 100%;
}
.buttons input:checked+label:after { .buttons input:checked+label:after {
left: calc(100% - 5px - 20px); left: calc(100% - 5px - 20px);
} }
@ -565,6 +607,7 @@ label[for="camera"] {
align-items: center; align-items: center;
justify-content: left; justify-content: left;
width: 100%; width: 100%;
margin-bottom: 4px;
} }
.field { .field {
@ -635,6 +678,7 @@ select {
display: flex; display: flex;
flex-direction: column; flex-direction: column;
gap: 10px; gap: 10px;
margin: 4px 0;
} }
.bottom_buttons button { .bottom_buttons button {
@ -807,7 +851,7 @@ a:-webkit-any-link {
color: var(--colour-3); color: var(--colour-3);
resize: vertical; resize: vertical;
max-height: 150px; max-height: 200px;
min-height: 80px; min-height: 80px;
} }
@ -1026,7 +1070,14 @@ a:-webkit-any-link {
.settings { .settings {
width: 100%; width: 100%;
display: none; min-width: 700px;
display: flex;
flex-direction: column;
}
.settings .paper {
overflow: auto;
flex-direction: column;
} }
.settings .field { .settings .field {
@ -1039,6 +1090,19 @@ a:-webkit-any-link {
padding: var(--inner-gap) 0; padding: var(--inner-gap) 0;
} }
.settings input {
background-color: transparent;
padding: 2px;
border: none;
font-size: 15px;
width: 100%;
color: var(--colour-3);
}
.settings input:focus {
outline: none;
}
.settings .label { .settings .label {
font-size: 15px; font-size: 15px;
padding: var(--inner-gap) 0; padding: var(--inner-gap) 0;
@ -1059,7 +1123,7 @@ a:-webkit-any-link {
border-radius: 5px; border-radius: 5px;
} }
::-webkit-scrollbar-thumb:hover { ::-webkit-scrollbar-thumb:hover {
background: var(--accent) background: var(--accent);
} }
.hljs { .hljs {
@ -1073,8 +1137,23 @@ a:-webkit-any-link {
#message-input { #message-input {
height: 82px; height: 82px;
margin-left: 20px; margin-left: 20px;
max-height: 200px;
} }
#message-input::-webkit-scrollbar { #message-input::-webkit-scrollbar {
width: 5px; width: 5px;
} }
.hidden {
display: none;
}
.blink {
animation: blinker 1s step-start infinite;
}
@keyframes blinker {
50% {
opacity: 0;
}
}

View File

@ -10,18 +10,19 @@ const sendButton = document.getElementById("send-button");
const imageInput = document.getElementById("image"); const imageInput = document.getElementById("image");
const cameraInput = document.getElementById("camera"); const cameraInput = document.getElementById("camera");
const fileInput = document.getElementById("file"); const fileInput = document.getElementById("file");
const microLabel = document.querySelector(".micro-label")
const inputCount = document.getElementById("input-count") const inputCount = document.getElementById("input-count")
const providerSelect = document.getElementById("provider"); const providerSelect = document.getElementById("provider");
const modelSelect = document.getElementById("model"); const modelSelect = document.getElementById("model");
const modelProvider = document.getElementById("model2"); const modelProvider = document.getElementById("model2");
const systemPrompt = document.getElementById("systemPrompt") const systemPrompt = document.getElementById("systemPrompt")
const jailbreak = document.getElementById("jailbreak"); const settings = document.querySelector(".settings")
let prompt_lock = false; let prompt_lock = false;
let content, content_inner, content_count = null; let content, content_inner, content_count = null;
const options = ["switch", "model", "model2", "jailbreak", "patch", "provider", "history"]; const optionElements = document.querySelectorAll(".settings input, .settings textarea, #model, #model2, #provider")
messageInput.addEventListener("blur", () => { messageInput.addEventListener("blur", () => {
window.scrollTo(0, 0); window.scrollTo(0, 0);
@ -63,7 +64,8 @@ const highlight = (container) => {
); );
} }
const register_remove_message = async () => { let stopped = false;
const register_message_buttons = async () => {
document.querySelectorAll(".message .fa-xmark").forEach(async (el) => { document.querySelectorAll(".message .fa-xmark").forEach(async (el) => {
if (!("click" in el.dataset)) { if (!("click" in el.dataset)) {
el.dataset.click = "true"; el.dataset.click = "true";
@ -77,6 +79,86 @@ const register_remove_message = async () => {
}) })
} }
}); });
document.querySelectorAll(".message .fa-clipboard").forEach(async (el) => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
el.addEventListener("click", async () => {
const message_el = el.parentElement.parentElement.parentElement;
const copyText = await get_message(window.conversation_id, message_el.dataset.index);
navigator.clipboard.writeText(copyText);
el.classList.add("clicked");
setTimeout(() => el.classList.remove("clicked"), 1000);
})
}
});
document.querySelectorAll(".message .fa-volume-high").forEach(async (el) => {
if (!("click" in el.dataset)) {
el.dataset.click = "true";
el.addEventListener("click", async () => {
if ("active" in el.classList || window.doSpeech) {
stopped = true;
return;
}
if (stopped) {
stopped = false;
return;
}
el.classList.add("blink")
el.classList.add("active")
const message_el = el.parentElement.parentElement.parentElement;
const content_el = el.parentElement.parentElement;
let speechText = await get_message(window.conversation_id, message_el.dataset.index);
speechText = speechText.replaceAll(/\[(.+)\]\(.+\)/gm, "($1)");
speechText = speechText.replaceAll("`", "").replaceAll("#", "")
speechText = speechText.replaceAll(
/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm,
""
)
const lines = speechText.trim().split(/\n|\.|;/);
let ended = true;
window.onSpeechResponse = (url) => {
el.classList.remove("blink")
if (url) {
var sound = document.createElement('audio');
sound.controls = 'controls';
sound.src = url;
sound.type = 'audio/wav';
if (ended) {
sound.autoplay = true;
}
sound.onended = function() {
ended = true;
};
sound.onplay = function() {
ended = false;
};
var container = document.createElement('div');
container.classList.add("audio");
container.appendChild(sound);
content_el.appendChild(container);
}
if (lines.length < 1 || stopped) {
el.classList.remove("active");
return;
}
while (lines.length > 0) {
let line = lines.shift();
var reg = new RegExp('^[0-9]$');
if (line && !reg.test(line)) {
return handleGenerateSpeech(line);
}
}
if (!line) {
el.classList.remove("active")
}
}
let line = lines.shift();
return handleGenerateSpeech(line);
});
}
});
} }
const delete_conversations = async () => { const delete_conversations = async () => {
@ -132,7 +214,11 @@ const handle_ask = async () => {
: '' : ''
} }
</div> </div>
<div class="count">${count_words_and_tokens(message, get_selected_model())}</div> <div class="count">
${count_words_and_tokens(message, get_selected_model())}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
</div>
</div> </div>
</div> </div>
`; `;
@ -305,15 +391,22 @@ const ask_gpt = async () => {
try { try {
const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput; const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput;
const file = input && input.files.length > 0 ? input.files[0] : null; const file = input && input.files.length > 0 ? input.files[0] : null;
const provider = providerSelect.options[providerSelect.selectedIndex].value;
const auto_continue = document.getElementById("auto_continue")?.checked;
if (file && !provider)
provider = "Bing";
let api_key = null;
if (provider)
api_key = document.getElementById(`${provider}-api_key`)?.value;
await api("conversation", { await api("conversation", {
id: window.token, id: window.token,
conversation_id: window.conversation_id, conversation_id: window.conversation_id,
model: get_selected_model(), model: get_selected_model(),
jailbreak: jailbreak?.options[jailbreak.selectedIndex].value,
web_search: document.getElementById("switch").checked, web_search: document.getElementById("switch").checked,
provider: providerSelect.options[providerSelect.selectedIndex].value, provider: provider,
patch_provider: document.getElementById("patch")?.checked, messages: messages,
messages: messages auto_continue: auto_continue,
api_key: api_key
}, file); }, file);
if (!error) { if (!error) {
html = markdown_render(text); html = markdown_render(text);
@ -341,7 +434,7 @@ const ask_gpt = async () => {
window.scrollTo(0, 0); window.scrollTo(0, 0);
message_box.scrollTop = message_box.scrollHeight; message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button(); await remove_cancel_button();
await register_remove_message(); await register_message_buttons();
prompt_lock = false; prompt_lock = false;
await load_conversations(); await load_conversations();
regenerate.classList.remove("regenerate-hidden"); regenerate.classList.remove("regenerate-hidden");
@ -459,7 +552,11 @@ const load_conversation = async (conversation_id, scroll=true) => {
<div class="content"> <div class="content">
${provider} ${provider}
<div class="content_inner">${markdown_render(item.content)}</div> <div class="content_inner">${markdown_render(item.content)}</div>
<div class="count">${count_words_and_tokens(item.content, next_provider?.model)}</div> <div class="count">
${count_words_and_tokens(item.content, next_provider?.model)}
<i class="fa-solid fa-volume-high"></i>
<i class="fa-regular fa-clipboard"></i>
</div>
</div> </div>
</div> </div>
`; `;
@ -475,8 +572,9 @@ const load_conversation = async (conversation_id, scroll=true) => {
} }
message_box.innerHTML = elements; message_box.innerHTML = elements;
register_remove_message(); register_message_buttons();
highlight(message_box); highlight(message_box);
regenerate.classList.remove("regenerate-hidden");
if (scroll) { if (scroll) {
message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" }); message_box.scrollTo({ top: message_box.scrollHeight, behavior: "smooth" });
@ -495,6 +593,7 @@ async function get_conversation(conversation_id) {
} }
async function save_conversation(conversation_id, conversation) { async function save_conversation(conversation_id, conversation) {
conversation.updated = Date.now();
appStorage.setItem( appStorage.setItem(
`conversation:${conversation_id}`, `conversation:${conversation_id}`,
JSON.stringify(conversation) JSON.stringify(conversation)
@ -517,6 +616,7 @@ async function add_conversation(conversation_id, content) {
await save_conversation(conversation_id, { await save_conversation(conversation_id, {
id: conversation_id, id: conversation_id,
title: title, title: title,
added: Date.now(),
system: systemPrompt?.value, system: systemPrompt?.value,
items: [], items: [],
}); });
@ -530,8 +630,10 @@ async function save_system_message() {
return; return;
} }
const conversation = await get_conversation(window.conversation_id); const conversation = await get_conversation(window.conversation_id);
conversation.system = systemPrompt?.value; if (conversation) {
await save_conversation(window.conversation_id, conversation); conversation.system = systemPrompt?.value;
await save_conversation(window.conversation_id, conversation);
}
} }
const hide_last_message = async (conversation_id) => { const hide_last_message = async (conversation_id) => {
@ -563,6 +665,12 @@ const remove_message = async (conversation_id, index) => {
await save_conversation(conversation_id, conversation); await save_conversation(conversation_id, conversation);
}; };
const get_message = async (conversation_id, index) => {
const messages = await get_messages(conversation_id);
if (index in messages)
return messages[index]["content"];
};
const add_message = async (conversation_id, role, content, provider) => { const add_message = async (conversation_id, role, content, provider) => {
const conversation = await get_conversation(conversation_id); const conversation = await get_conversation(conversation_id);
conversation.items.push({ conversation.items.push({
@ -586,11 +694,17 @@ const load_conversations = async () => {
await clear_conversations(); await clear_conversations();
for (conversation of conversations) { for (conversation of conversations) {
let updated = "";
if (conversation.updated) {
const date = new Date(conversation.updated);
updated = date.toLocaleString('en-GB', {dateStyle: 'short', timeStyle: 'short', monthStyle: 'short'});
updated = updated.replace("/" + date.getFullYear(), "")
}
box_conversations.innerHTML += ` box_conversations.innerHTML += `
<div class="convo" id="convo-${conversation.id}"> <div class="convo" id="convo-${conversation.id}">
<div class="left" onclick="set_conversation('${conversation.id}')"> <div class="left" onclick="set_conversation('${conversation.id}')">
<i class="fa-regular fa-comments"></i> <i class="fa-regular fa-comments"></i>
<span class="convo-title">${conversation.title}</span> <span class="convo-title"><span class="datetime">${updated}</span> ${conversation.title}</span>
</div> </div>
<i onclick="show_option('${conversation.id}')" class="fa-regular fa-trash" id="conv-${conversation.id}"></i> <i onclick="show_option('${conversation.id}')" class="fa-regular fa-trash" id="conv-${conversation.id}"></i>
<div id="cho-${conversation.id}" class="choise" style="display:none;"> <div id="cho-${conversation.id}" class="choise" style="display:none;">
@ -642,7 +756,8 @@ const message_id = () => {
async function hide_sidebar() { async function hide_sidebar() {
sidebar.classList.remove("shown"); sidebar.classList.remove("shown");
sidebar_button.classList.remove("rotated"); sidebar_button.classList.remove("rotated");
if (window.location.pathname == "/menu/") { settings.classList.add("hidden");
if (window.location.pathname == "/menu/" || window.location.pathname == "/settings/") {
history.back(); history.back();
} }
} }
@ -650,6 +765,7 @@ async function hide_sidebar() {
window.addEventListener('popstate', hide_sidebar, false); window.addEventListener('popstate', hide_sidebar, false);
sidebar_button.addEventListener("click", (event) => { sidebar_button.addEventListener("click", (event) => {
settings.classList.add("hidden");
if (sidebar.classList.contains("shown")) { if (sidebar.classList.contains("shown")) {
hide_sidebar(); hide_sidebar();
} else { } else {
@ -660,31 +776,45 @@ sidebar_button.addEventListener("click", (event) => {
window.scrollTo(0, 0); window.scrollTo(0, 0);
}); });
function open_settings() {
if (settings.classList.contains("hidden")) {
sidebar.classList.remove("shown");
settings.classList.remove("hidden");
history.pushState({}, null, "/settings/");
} else {
settings.classList.add("hidden");
}
}
const register_settings_storage = async () => { const register_settings_storage = async () => {
options.forEach((id) => { optionElements.forEach((element) => {
element = document.getElementById(id); if (element.type == "textarea") {
if (!element) { element.addEventListener('input', async (event) => {
return; appStorage.setItem(element.id, element.value);
});
} else {
element.addEventListener('change', async (event) => {
switch (element.type) {
case "checkbox":
appStorage.setItem(element.id, element.checked);
break;
case "select-one":
appStorage.setItem(element.id, element.selectedIndex);
break;
case "text":
appStorage.setItem(element.id, element.value);
break;
default:
console.warn("Unresolved element type");
}
});
} }
element.addEventListener('change', async (event) => {
switch (event.target.type) {
case "checkbox":
appStorage.setItem(id, event.target.checked);
break;
case "select-one":
appStorage.setItem(id, event.target.selectedIndex);
break;
default:
console.warn("Unresolved element type");
}
});
}); });
} }
const load_settings_storage = async () => { const load_settings_storage = async () => {
options.forEach((id) => { optionElements.forEach((element) => {
element = document.getElementById(id); if (!(value = appStorage.getItem(element.id))) {
if (!element || !(value = appStorage.getItem(id))) {
return; return;
} }
if (value) { if (value) {
@ -695,6 +825,10 @@ const load_settings_storage = async () => {
case "select-one": case "select-one":
element.selectedIndex = parseInt(value); element.selectedIndex = parseInt(value);
break; break;
case "text":
case "textarea":
element.value = value;
break;
default: default:
console.warn("Unresolved element type"); console.warn("Unresolved element type");
} }
@ -791,7 +925,7 @@ systemPrompt.addEventListener("focus", function() {
countFocus = systemPrompt; countFocus = systemPrompt;
count_input(); count_input();
}); });
systemPrompt.addEventListener("blur", function() { systemPrompt.addEventListener("input", function() {
countFocus = messageInput; countFocus = messageInput;
count_input(); count_input();
}); });
@ -859,6 +993,27 @@ async function on_api() {
await load_provider_models(appStorage.getItem("provider")); await load_provider_models(appStorage.getItem("provider"));
await load_settings_storage() await load_settings_storage()
const hide_systemPrompt = document.getElementById("hide-systemPrompt")
if (hide_systemPrompt.checked) {
systemPrompt.classList.add("hidden");
}
hide_systemPrompt.addEventListener('change', async (event) => {
if (event.target.checked) {
systemPrompt.classList.add("hidden");
} else {
systemPrompt.classList.remove("hidden");
}
});
const messageInputHeight = document.getElementById("message-input-height");
if (messageInputHeight) {
if (messageInputHeight.value) {
messageInput.style.maxHeight = `${messageInputHeight.value}px`;
}
messageInputHeight.addEventListener('change', async () => {
messageInput.style.maxHeight = `${messageInputHeight.value}px`;
});
}
} }
async function load_version() { async function load_version() {
@ -875,7 +1030,7 @@ async function load_version() {
} }
document.getElementById("version_text").innerHTML = text document.getElementById("version_text").innerHTML = text
} }
setTimeout(load_version, 5000); setTimeout(load_version, 2000);
for (const el of [imageInput, cameraInput]) { for (const el of [imageInput, cameraInput]) {
el.addEventListener('click', async () => { el.addEventListener('click', async () => {
@ -928,7 +1083,7 @@ fileInput.addEventListener('change', async (event) => {
} }
}); });
systemPrompt?.addEventListener("blur", async () => { systemPrompt?.addEventListener("input", async () => {
await save_system_message(); await save_system_message();
}); });
@ -1035,12 +1190,12 @@ function save_storage() {
let item = appStorage.getItem(key); let item = appStorage.getItem(key);
if (key.startsWith("conversation:")) { if (key.startsWith("conversation:")) {
data[key] = JSON.parse(item); data[key] = JSON.parse(item);
} else { } else if (!key.includes("api_key")) {
data["options"][key] = item; data["options"][key] = item;
} }
} }
data = JSON.stringify(data, null, 4); data = JSON.stringify(data, null, 4);
const blob = new Blob([data], {type: 'text/csv'}); const blob = new Blob([data], {type: 'application/json'});
if(window.navigator.msSaveOrOpenBlob) { if(window.navigator.msSaveOrOpenBlob) {
window.navigator.msSaveBlob(blob, filename); window.navigator.msSaveBlob(blob, filename);
} else{ } else{
@ -1052,3 +1207,67 @@ function save_storage() {
document.body.removeChild(elem); document.body.removeChild(elem);
} }
} }
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
if (SpeechRecognition) {
const mircoIcon = microLabel.querySelector("i");
mircoIcon.classList.add("fa-microphone");
mircoIcon.classList.remove("fa-microphone-slash");
const recognition = new SpeechRecognition();
recognition.continuous = true;
recognition.interimResults = true;
recognition.maxAlternatives = 1;
function may_stop() {
if (microLabel.classList.contains("recognition")) {
recognition.stop();
}
}
let startValue;
let lastValue;
let timeoutHandle;
recognition.onstart = function() {
microLabel.classList.add("recognition");
startValue = messageInput.value;
lastValue = "";
timeoutHandle = window.setTimeout(may_stop, 8000);
};
recognition.onend = function() {
microLabel.classList.remove("recognition");
};
recognition.onresult = function(event) {
if (!event.results) {
return;
}
window.clearTimeout(timeoutHandle);
let newText;
Array.from(event.results).forEach((result) => {
newText = result[0].transcript;
if (newText && newText != lastValue) {
messageInput.value = `${startValue ? startValue+"\n" : ""}${newText.trim()}`;
if (result.isFinal) {
lastValue = newText;
startValue = messageInput.value;
messageInput.focus();
}
messageInput.style.height = messageInput.scrollHeight + "px";
messageInput.scrollTop = messageInput.scrollHeight;
}
});
window.clearTimeout(timeoutHandle);
timeoutHandle = window.setTimeout(may_stop, newText ? 8000 : 5000);
};
microLabel.addEventListener("click", () => {
if (microLabel.classList.contains("recognition")) {
window.clearTimeout(timeoutHandle);
recognition.stop();
} else {
const lang = document.getElementById("recognition-language")?.value;
recognition.lang = lang || navigator.language;
recognition.start();
}
});
}

View File

@ -0,0 +1 @@
(()=>{var e,t,r,n,a={896:(e,t,r)=>{"use strict";var n=r(900);function a(e,t,r){for(let n=0;n<r.length;++n)e.setUint8(t+n,r.charCodeAt(n))}n._K2.allowLocalModels=!1;class s{static BASE_URL="https://huggingface.co/datasets/Xenova/cmu-arctic-xvectors-extracted/resolve/main/";static model_id="Xenova/speecht5_tts";static vocoder_id="Xenova/speecht5_hifigan";static tokenizer_instance=null;static model_instance=null;static vocoder_instance=null;static async getInstance(e=null){return null===this.tokenizer_instance&&(this.tokenizer=n.v6I.from_pretrained(this.model_id,{progress_callback:e})),null===this.model_instance&&(this.model_instance=n.fqH.from_pretrained(this.model_id,{quantized:!1,progress_callback:e})),null===this.vocoder_instance&&(this.vocoder_instance=n.oJL.from_pretrained(this.vocoder_id,{quantized:!1,progress_callback:e})),new Promise((async(e,t)=>{const r=await Promise.all([this.tokenizer,this.model_instance,this.vocoder_instance]);self.postMessage({status:"ready"}),e(r)}))}static async getSpeakerEmbeddings(e){const t=`${this.BASE_URL}${e}.bin`;return new n.qYS("float32",new Float32Array(await(await fetch(t)).arrayBuffer()),[1,512])}}const o=new Map;self.addEventListener("message",(async e=>{const[t,r,n]=await s.getInstance((e=>{self.postMessage(e)})),{input_ids:i}=t(e.data.text);let c,l=o.get(e.data.speaker_id);void 0===l&&(l=await s.getSpeakerEmbeddings(e.data.speaker_id),o.set(e.data.speaker_id,l));try{c=await r.generate_speech(i,l,{vocoder:n})}catch(e){throw self.postMessage({status:"error",exception:e}),e}const{waveform:d}=c,p=function(e){let t=44;const r=new ArrayBuffer(t+4*e.length),n=new DataView(r);a(n,0,"RIFF"),n.setUint32(4,36+4*e.length,!0),a(n,8,"WAVE"),a(n,12,"fmt "),n.setUint32(16,16,!0),n.setUint16(20,3,!0),n.setUint16(22,1,!0),n.setUint32(24,16e3,!0),n.setUint32(28,64e3,!0),n.setUint16(32,4,!0),n.setUint16(34,32,!0),a(n,36,"data"),n.setUint32(40,4*e.length,!0);for(let r=0;r<e.length;++r,t+=4)n.setFloat32(t,e[r],!0);return r}(d.data);self.postMessage({status:"complete",output:new Blob([p],{type:"audio/wav"})})}))},52:()=>{},143:()=>{},603:()=>{},806:()=>{},853:()=>{},9:()=>{},837:()=>{},499:()=>{}},s={};function o(e){var t=s[e];if(void 0!==t)return t.exports;var r=s[e]={exports:{}};return a[e](r,r.exports,o),r.exports}o.m=a,o.x=()=>{var e=o.O(void 0,[900],(()=>o(896)));return o.O(e)},e=[],o.O=(t,r,n,a)=>{if(!r){var s=1/0;for(d=0;d<e.length;d++){for(var[r,n,a]=e[d],i=!0,c=0;c<r.length;c++)(!1&a||s>=a)&&Object.keys(o.O).every((e=>o.O[e](r[c])))?r.splice(c--,1):(i=!1,a<s&&(s=a));if(i){e.splice(d--,1);var l=n();void 0!==l&&(t=l)}}return t}a=a||0;for(var d=e.length;d>0&&e[d-1][2]>a;d--)e[d]=e[d-1];e[d]=[r,n,a]},r=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,o.t=function(e,n){if(1&n&&(e=this(e)),8&n)return e;if("object"==typeof e&&e){if(4&n&&e.__esModule)return e;if(16&n&&"function"==typeof e.then)return e}var a=Object.create(null);o.r(a);var s={};t=t||[null,r({}),r([]),r(r)];for(var i=2&n&&e;"object"==typeof i&&!~t.indexOf(i);i=r(i))Object.getOwnPropertyNames(i).forEach((t=>s[t]=()=>e[t]));return s.default=()=>e,o.d(a,s),a},o.d=(e,t)=>{for(var r in t)o.o(t,r)&&!o.o(e,r)&&Object.defineProperty(e,r,{enumerable:!0,get:t[r]})},o.f={},o.e=e=>Promise.all(Object.keys(o.f).reduce(((t,r)=>(o.f[r](e,t),t)),[])),o.u=e=>e+".index.js",o.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),o.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},(()=>{var e;o.g.importScripts&&(e=o.g.location+"");var t=o.g.document;if(!e&&t&&(t.currentScript&&(e=t.currentScript.src),!e)){var r=t.getElementsByTagName("script");if(r.length)for(var n=r.length-1;n>-1&&(!e||!/^http(s?):/.test(e));)e=r[n--].src}if(!e)throw new Error("Automatic publicPath is not supported in this browser");e=e.replace(/#.*$/,"").replace(/\?.*$/,"").replace(/\/[^\/]+$/,"/"),o.p=e})(),(()=>{var e={630:1};o.f.i=(t,r)=>{e[t]||importScripts(o.p+o.u(t))};var t=self.webpackChunk=self.webpackChunk||[],r=t.push.bind(t);t.push=t=>{var[n,a,s]=t;for(var i in a)o.o(a,i)&&(o.m[i]=a[i]);for(s&&s(o);n.length;)e[n.pop()]=1;r(t)}})(),n=o.x,o.x=()=>o.e(900).then(n),o.x()})();

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
(()=>{"use strict";var e={m:{},u:e=>e+".index.js"};e.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),e.o=(e,c)=>Object.prototype.hasOwnProperty.call(e,c),(()=>{var c;e.g.importScripts&&(c=e.g.location+"");var t=e.g.document;if(!c&&t&&(t.currentScript&&(c=t.currentScript.src),!c)){var a=t.getElementsByTagName("script");if(a.length)for(var r=a.length-1;r>-1&&(!c||!/^http(s?):/.test(c));)c=a[r--].src}if(!c)throw new Error("Automatic publicPath is not supported in this browser");c=c.replace(/#.*$/,"").replace(/\?.*$/,"").replace(/\/[^\/]+$/,"/"),e.p=c})(),e.b=document.baseURI||self.location.href;const c={};c.current||(c.current=new Worker(new URL(e.p+e.u(630),e.b),{type:void 0})),window.doSpeech=!1,c.current.addEventListener("message",(e=>{switch(e.data.status){case"error":window.onSpeechResponse(null),window.doSpeech=!1;break;case"complete":const c=URL.createObjectURL(e.data.output);window.onSpeechResponse(c),window.doSpeech=!1}})),window.SPEAKERS={"US female 1":"cmu_us_slt_arctic-wav-arctic_a0001","US female 2":"cmu_us_clb_arctic-wav-arctic_a0001","US male 1":"cmu_us_bdl_arctic-wav-arctic_a0003","US male 2":"cmu_us_rms_arctic-wav-arctic_a0003","Canadian male":"cmu_us_jmk_arctic-wav-arctic_a0002","Scottish male":"cmu_us_awb_arctic-wav-arctic_b0002","Indian male":"cmu_us_ksp_arctic-wav-arctic_a0007"},window.handleGenerateSpeech=(e,t="cmu_us_slt_arctic-wav-arctic_a0001")=>{window.doSpeech=!0,c.current.postMessage({text:e,speaker_id:t})},window.onSpeechResponse=e=>console.log(e)})();

View File

@ -80,13 +80,12 @@ class Api():
Returns: Returns:
dict: Arguments prepared for chat completion. dict: Arguments prepared for chat completion.
""" """
provider = json_data.get('provider', None) model = json_data.get('model') or models.default
if "image" in kwargs and provider is None: provider = json_data.get('provider')
provider = "Bing"
if provider == 'OpenaiChat':
kwargs['auto_continue'] = True
messages = json_data['messages'] messages = json_data['messages']
api_key = json_data.get("api_key")
if api_key is not None:
kwargs["api_key"] = api_key
if json_data.get('web_search'): if json_data.get('web_search'):
if provider == "Bing": if provider == "Bing":
kwargs['web_search'] = True kwargs['web_search'] = True
@ -98,8 +97,6 @@ class Api():
if conversation_id and provider in conversations and conversation_id in conversations[provider]: if conversation_id and provider in conversations and conversation_id in conversations[provider]:
kwargs["conversation"] = conversations[provider][conversation_id] kwargs["conversation"] = conversations[provider][conversation_id]
model = json_data.get('model') or models.default
return { return {
"model": model, "model": model,
"provider": provider, "provider": provider,

View File

@ -1,6 +1,5 @@
from __future__ import annotations from __future__ import annotations
import logging
import json import json
import os.path import os.path
from typing import Iterator from typing import Iterator

View File

@ -4,9 +4,11 @@ from flask import render_template, redirect
class Website: class Website:
def __init__(self, app) -> None: def __init__(self, app) -> None:
self.app = app self.app = app
def redirect_home():
return redirect('/chat')
self.routes = { self.routes = {
'/': { '/': {
'function': lambda: redirect('/chat'), 'function': redirect_home,
'methods': ['GET', 'POST'] 'methods': ['GET', 'POST']
}, },
'/chat/': { '/chat/': {
@ -17,6 +19,14 @@ class Website:
'function': self._chat, 'function': self._chat,
'methods': ['GET', 'POST'] 'methods': ['GET', 'POST']
}, },
'/menu/': {
'function': redirect_home,
'methods': ['GET', 'POST']
},
'/settings/': {
'function': redirect_home,
'methods': ['GET', 'POST']
},
} }
def _chat(self, conversation_id): def _chat(self, conversation_id):

View File

@ -0,0 +1,455 @@
[app]
# (str) Title of your application
title = g4f
# (str) Package name
package.name = g4a
# (str) Package domain (needed for android/ios packaging)
package.domain = org.g4f
# (str) Source code where the main.py live
source.dir = .
# (list) Source files to include (let empty to include all the files)
#source.include_exts = py,png,jpg,kv,atlas,html,css,js,png
# (list) List of inclusions using pattern matching
source.include_patterns = main.py
# (list) Source files to exclude (let empty to not exclude anything)
#source.exclude_exts = spec
# (list) List of directory to exclude (let empty to not exclude anything)
source.exclude_dirs = etc, docker, docs, .github
# (list) List of exclusions using pattern matching
# Do not prefix with './'
#source.exclude_patterns = license,images/*/*.jpg
# (str) Application versioning (method 1)
version = 0.1
# (str) Application versioning (method 2)
# version.regex = __version__ = ['"](.*)['"]
# version.filename = %(source.dir)s/main.py
# (list) Application requirements
# comma separated e.g. requirements = sqlite3,kivy
requirements = python3==3.11.7,kivy==2.3.0,aiohttp==3.9.3,aiosignal==1.3.1,attrs==23.2.0,beautifulsoup4==4.12.3,certifi==2024.2.2,charset-normalizer==3.3.2,docutils==0.20.1,frozenlist==1.4.1,idna==3.6,multidict==6.0.5,requests==2.31.0,soupsieve==2.5,Unidecode==1.3.8,urllib3==2.2.0,yarl==1.9.4,pywebview,pillow,platformdirs,python-for-android,plyer,/home/heiner/gpt4free2/,proxy_tools,bottle,typing_extensions,cryptography,brotli,android
# (str) Custom source folders for requirements
# Sets custom source for any requirements with recipes
# requirements.source.g4f = /home/.../gpt4free
# (str) Presplash of the application
#presplash.filename = %(source.dir)s/data/presplash.png
# (str) Icon of the application
icon.filename = %(source.dir)s/g4f/gui/client/static/img/android-chrome-512x512.png
# (list) Supported orientations
# Valid options are: landscape, portrait, portrait-reverse or landscape-reverse
orientation = portrait
# (list) List of service to declare
#services = NAME:ENTRYPOINT_TO_PY,NAME2:ENTRYPOINT2_TO_PY
#
# OSX Specific
#
#
# author = © Copyright Info
# change the major version of python used by the app
osx.python_version = 3
# Kivy version to use
osx.kivy_version = 1.9.1
#
# Android specific
#
# (bool) Indicate if the application should be fullscreen or not
fullscreen = 0
# (string) Presplash background color (for android toolchain)
# Supported formats are: #RRGGBB #AARRGGBB or one of the following names:
# red, blue, green, black, white, gray, cyan, magenta, yellow, lightgray,
# darkgray, grey, lightgrey, darkgrey, aqua, fuchsia, lime, maroon, navy,
# olive, purple, silver, teal.
#android.presplash_color = #FFFFFF
# (string) Presplash animation using Lottie format.
# see https://lottiefiles.com/ for examples and https://airbnb.design/lottie/
# for general documentation.
# Lottie files can be created using various tools, like Adobe After Effect or Synfig.
#android.presplash_lottie = "path/to/lottie/file.json"
# (str) Adaptive icon of the application (used if Android API level is 26+ at runtime)
#icon.adaptive_foreground.filename = %(source.dir)s/data/icon_fg.png
#icon.adaptive_background.filename = %(source.dir)s/data/icon_bg.png
# (list) Permissions
# (See https://python-for-android.readthedocs.io/en/latest/buildoptions/#build-options-1 for all the supported syntaxes and properties)
android.permissions = android.permission.INTERNET, android.permission.CAMERA, android.permission.WRITE_EXTERNAL_STORAGE, android.permission.READ_EXTERNAL_STORAGE
# (list) features (adds uses-feature -tags to manifest)
#android.features = android.hardware.usb.host
# (int) Target Android API, should be as high as possible.
android.api = 21
#31
# (int) Minimum API your APK / AAB will support.
android.minapi = 21
# (int) Android SDK version to use
#android.sdk = 23
# File path exposed error
# (str) Android NDK version to use
#android.ndk = 23b
# (int) Android NDK API to use. This is the minimum API your app will support, it should usually match android.minapi.
#android.ndk_api = 21
# (bool) Use --private data storage (True) or --dir public storage (False)
#android.private_storage = True
# (str) Android NDK directory (if empty, it will be automatically downloaded.)
#android.ndk_path =
# (str) Android SDK directory (if empty, it will be automatically downloaded.)
#android.sdk_path =
# (str) ANT directory (if empty, it will be automatically downloaded.)
#android.ant_path =
# (bool) If True, then skip trying to update the Android sdk
# This can be useful to avoid excess Internet downloads or save time
# when an update is due and you just want to test/build your package
# android.skip_update = False
# (bool) If True, then automatically accept SDK license
# agreements. This is intended for automation only. If set to False,
# the default, you will be shown the license when first running
# buildozer.
# android.accept_sdk_license = False
# (str) Android entry point, default is ok for Kivy-based app
#android.entrypoint = org.kivy.android.PythonActivity
# (str) Full name including package path of the Java class that implements Android Activity
# use that parameter together with android.entrypoint to set custom Java class instead of PythonActivity
#android.activity_class_name = org.kivy.android.PythonActivity
# (str) Extra xml to write directly inside the <manifest> element of AndroidManifest.xml
# use that parameter to provide a filename from where to load your custom XML code
#android.extra_manifest_xml = ./src/android/extra_manifest.xml
# (str) Extra xml to write directly inside the <manifest><application> tag of AndroidManifest.xml
# use that parameter to provide a filename from where to load your custom XML arguments:
#android.extra_manifest_application_arguments = ./src/android/extra_manifest_application_arguments.xml
# (str) Full name including package path of the Java class that implements Python Service
# use that parameter to set custom Java class which extends PythonService
#android.service_class_name = org.kivy.android.PythonService
# (str) Android app theme, default is ok for Kivy-based app
# android.apptheme = "@android:style/Theme.NoTitleBar"
# (list) Pattern to whitelist for the whole project
#android.whitelist =
# (str) Path to a custom whitelist file
#android.whitelist_src =
# (str) Path to a custom blacklist file
#android.blacklist_src =
# (list) List of Java .jar files to add to the libs so that pyjnius can access
# their classes. Don't add jars that you do not need, since extra jars can slow
# down the build process. Allows wildcards matching, for example:
# OUYA-ODK/libs/*.jar
android.add_jars = /home/heiner/.local/lib/python3.10/site-packages/webview/lib/pywebview-android.jar
# (list) List of Java files to add to the android project (can be java or a
# directory containing the files)
#android.add_src =
# (list) Android AAR archives to add
#android.add_aars =
# (list) Put these files or directories in the apk assets directory.
# Either form may be used, and assets need not be in 'source.include_exts'.
# 1) android.add_assets = source_asset_relative_path
# 2) android.add_assets = source_asset_path:destination_asset_relative_path
#android.add_assets =
# (list) Put these files or directories in the apk res directory.
# The option may be used in three ways, the value may contain one or zero ':'
# Some examples:
# 1) A file to add to resources, legal resource names contain ['a-z','0-9','_']
# android.add_resources = my_icons/all-inclusive.png:drawable/all_inclusive.png
# 2) A directory, here 'legal_icons' must contain resources of one kind
# android.add_resources = legal_icons:drawable
# 3) A directory, here 'legal_resources' must contain one or more directories,
# each of a resource kind: drawable, xml, etc...
# android.add_resources = legal_resources
#android.add_resources =
# (list) Gradle dependencies to add
#android.gradle_dependencies =
# (bool) Enable AndroidX support. Enable when 'android.gradle_dependencies'
# contains an 'androidx' package, or any package from Kotlin source.
# android.enable_androidx requires android.api >= 28
#android.enable_androidx = True
# (list) add java compile options
# this can for example be necessary when importing certain java libraries using the 'android.gradle_dependencies' option
# see https://developer.android.com/studio/write/java8-support for further information
# android.add_compile_options = "sourceCompatibility = 1.8", "targetCompatibility = 1.8"
# (list) Gradle repositories to add {can be necessary for some android.gradle_dependencies}
# please enclose in double quotes
# e.g. android.gradle_repositories = "maven { url 'https://kotlin.bintray.com/ktor' }"
#android.add_gradle_repositories =
# (list) packaging options to add
# see https://google.github.io/android-gradle-dsl/current/com.android.build.gradle.internal.dsl.PackagingOptions.html
# can be necessary to solve conflicts in gradle_dependencies
# please enclose in double quotes
# e.g. android.add_packaging_options = "exclude 'META-INF/common.kotlin_module'", "exclude 'META-INF/*.kotlin_module'"
#android.add_packaging_options =
# (list) Java classes to add as activities to the manifest.
#android.add_activities = com.example.ExampleActivity
# (str) OUYA Console category. Should be one of GAME or APP
# If you leave this blank, OUYA support will not be enabled
#android.ouya.category = GAME
# (str) Filename of OUYA Console icon. It must be a 732x412 png image.
#android.ouya.icon.filename = %(source.dir)s/data/ouya_icon.png
# (str) XML file to include as an intent filters in <activity> tag
#android.manifest.intent_filters =
# (list) Copy these files to src/main/res/xml/ (used for example with intent-filters)
#android.res_xml = PATH_TO_FILE,
# (str) launchMode to set for the main activity
#android.manifest.launch_mode = standard
# (str) screenOrientation to set for the main activity.
# Valid values can be found at https://developer.android.com/guide/topics/manifest/activity-element
#android.manifest.orientation = fullSensor
# (list) Android additional libraries to copy into libs/armeabi
#android.add_libs_armeabi = libs/android/*.so
#android.add_libs_armeabi_v7a = libs/android-v7/*.so
#android.add_libs_arm64_v8a = libs/android-v8/*.so
#android.add_libs_x86 = libs/android-x86/*.so
#android.add_libs_mips = libs/android-mips/*.so
# (bool) Indicate whether the screen should stay on
# Don't forget to add the WAKE_LOCK permission if you set this to True
#android.wakelock = False
# (list) Android application meta-data to set (key=value format)
#android.meta_data =
# (list) Android library project to add (will be added in the
# project.properties automatically.)
#android.library_references =
# (list) Android shared libraries which will be added to AndroidManifest.xml using <uses-library> tag
#android.uses_library =
# (str) Android logcat filters to use
#android.logcat_filters = *:S python:D
# (bool) Android logcat only display log for activity's pid
#android.logcat_pid_only = False
# (str) Android additional adb arguments
#android.adb_args = -H host.docker.internal
# (bool) Copy library instead of making a libpymodules.so
#android.copy_libs = 1
# (list) The Android archs to build for, choices: armeabi-v7a, arm64-v8a, x86, x86_64
# In past, was `android.arch` as we weren't supporting builds for multiple archs at the same time.
android.archs = arm64-v8a
#, armeabi-v7a
# (int) overrides automatic versionCode computation (used in build.gradle)
# this is not the same as app version and should only be edited if you know what you're doing
# android.numeric_version = 1
# (bool) enables Android auto backup feature (Android API >=23)
android.allow_backup = True
# (str) XML file for custom backup rules (see official auto backup documentation)
# android.backup_rules =
# (str) If you need to insert variables into your AndroidManifest.xml file,
# you can do so with the manifestPlaceholders property.
# This property takes a map of key-value pairs. (via a string)
# Usage example : android.manifest_placeholders = [myCustomUrl:\"org.kivy.customurl\"]
# android.manifest_placeholders = [:]
# (bool) Skip byte compile for .py files
# android.no-byte-compile-python = False
# (str) The format used to package the app for release mode (aab or apk or aar).
# android.release_artifact = aab
# (str) The format used to package the app for debug mode (apk or aar).
# android.debug_artifact = apk
#
# Python for android (p4a) specific
#
# (str) python-for-android URL to use for checkout
#p4a.url =
# (str) python-for-android fork to use in case if p4a.url is not specified, defaults to upstream (kivy)
#p4a.fork = kivy
# (str) python-for-android branch to use, defaults to master
#p4a.branch = master
# (str) python-for-android specific commit to use, defaults to HEAD, must be within p4a.branch
#p4a.commit = HEAD
# (str) python-for-android git clone directory (if empty, it will be automatically cloned from github)
#p4a.source_dir =
# (str) The directory in which python-for-android should look for your own build recipes (if any)
#p4a.local_recipes =
# (str) Filename to the hook for p4a
#p4a.hook =
# (str) Bootstrap to use for android builds
# p4a.bootstrap = sdl2
# (int) port number to specify an explicit --port= p4a argument (eg for bootstrap flask)
#p4a.port =
# Control passing the --use-setup-py vs --ignore-setup-py to p4a
# "in the future" --use-setup-py is going to be the default behaviour in p4a, right now it is not
# Setting this to false will pass --ignore-setup-py, true will pass --use-setup-py
# NOTE: this is general setuptools integration, having pyproject.toml is enough, no need to generate
# setup.py if you're using Poetry, but you need to add "toml" to source.include_exts.
#p4a.setup_py = false
# (str) extra command line arguments to pass when invoking pythonforandroid.toolchain
#p4a.extra_args =
#
# iOS specific
#
# (str) Path to a custom kivy-ios folder
#ios.kivy_ios_dir = ../kivy-ios
# Alternately, specify the URL and branch of a git checkout:
ios.kivy_ios_url = https://github.com/kivy/kivy-ios
ios.kivy_ios_branch = master
# Another platform dependency: ios-deploy
# Uncomment to use a custom checkout
#ios.ios_deploy_dir = ../ios_deploy
# Or specify URL and branch
ios.ios_deploy_url = https://github.com/phonegap/ios-deploy
ios.ios_deploy_branch = 1.10.0
# (bool) Whether or not to sign the code
ios.codesign.allowed = false
# (str) Name of the certificate to use for signing the debug version
# Get a list of available identities: buildozer ios list_identities
#ios.codesign.debug = "iPhone Developer: <lastname> <firstname> (<hexstring>)"
# (str) The development team to use for signing the debug version
#ios.codesign.development_team.debug = <hexstring>
# (str) Name of the certificate to use for signing the release version
#ios.codesign.release = %(ios.codesign.debug)s
# (str) The development team to use for signing the release version
#ios.codesign.development_team.release = <hexstring>
# (str) URL pointing to .ipa file to be installed
# This option should be defined along with `display_image_url` and `full_size_image_url` options.
#ios.manifest.app_url =
# (str) URL pointing to an icon (57x57px) to be displayed during download
# This option should be defined along with `app_url` and `full_size_image_url` options.
#ios.manifest.display_image_url =
# (str) URL pointing to a large icon (512x512px) to be used by iTunes
# This option should be defined along with `app_url` and `display_image_url` options.
#ios.manifest.full_size_image_url =
[buildozer]
# (int) Log level (0 = error only, 1 = info, 2 = debug (with command output))
log_level = 2
# (int) Display warning if buildozer is run as root (0 = False, 1 = True)
warn_on_root = 1
# (str) Path to build artifact storage, absolute or relative to spec file
# build_dir = ./.buildozer
# (str) Path to build output (i.e. .apk, .aab, .ipa) storage
# bin_dir = ./bin
# -----------------------------------------------------------------------------
# List as sections
#
# You can define all the "list" as [section:key].
# Each line will be considered as a option to the list.
# Let's take [app] / source.exclude_patterns.
# Instead of doing:
#
#[app]
#source.exclude_patterns = license,data/audio/*.wav,data/images/original/*
#
# This can be translated into:
#
#[app:source.exclude_patterns]
#license
#data/audio/*.wav
#data/images/original/*
#
# -----------------------------------------------------------------------------
# Profiles
#
# You can extend section / key with a profile
# For example, you want to deploy a demo version of your application without
# HD content. You could first change the title to add "(demo)" in the name
# and extend the excluded directories to remove the HD content.
#
#[app@demo]
#title = My Application (demo)
#
#[app:source.exclude_patterns@demo]
#images/hd/*
#
# Then, invoke the command line with the "demo" profile:
#
#buildozer --profile demo android debug

View File

@ -0,0 +1,3 @@
Fork from:
https://github.com/xenova/transformers.js/tree/main/examples/text-to-speech-client

View File

@ -0,0 +1,11 @@
export const SPEAKERS = {
"US female 1": "cmu_us_slt_arctic-wav-arctic_a0001",
"US female 2": "cmu_us_clb_arctic-wav-arctic_a0001",
"US male 1": "cmu_us_bdl_arctic-wav-arctic_a0003",
"US male 2": "cmu_us_rms_arctic-wav-arctic_a0003",
"Canadian male": "cmu_us_jmk_arctic-wav-arctic_a0002",
"Scottish male": "cmu_us_awb_arctic-wav-arctic_b0002",
"Indian male": "cmu_us_ksp_arctic-wav-arctic_a0007",
}
export const DEFAULT_SPEAKER = "cmu_us_slt_arctic-wav-arctic_a0001";

View File

@ -0,0 +1,38 @@
const worker = {}
if (!worker.current) {
// Create the worker if it does not yet exist.
worker.current = new Worker(new URL('./worker.js', import.meta.url), {
type: 'module'
});
}
window.doSpeech = false;
const onMessageReceived = (e) => {
switch (e.data.status) {
case 'error':
window.onSpeechResponse(null);
window.doSpeech = false;
break;
case 'complete':
const blobUrl = URL.createObjectURL(e.data.output);
window.onSpeechResponse(blobUrl);
window.doSpeech = false;
break;
}
};
worker.current.addEventListener('message', onMessageReceived);
import { DEFAULT_SPEAKER, SPEAKERS } from './constants';
const handleGenerateSpeech = (text, speaker_id=DEFAULT_SPEAKER) => {
window.doSpeech = true;
worker.current.postMessage({
text,
speaker_id: speaker_id,
});
};
window.SPEAKERS = SPEAKERS;
window.handleGenerateSpeech = handleGenerateSpeech;
window.onSpeechResponse = (url) => console.log(url);

3574
projects/text_to_speech/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
{
"main": "index.js",
"dependencies": {
"@xenova/transformers": "^2.16.1",
"webpack": "^5.91.0",
"webpack-node-externals": "^3.0.0"
},
"bundledDependencies": [
"@xenova/transformers"
],
"devDependencies": {
"pack": "^2.2.0",
"web": "^0.0.2",
"webpack-cli": "^5.1.4"
},
"bundleDependencies": [
"@xenova/transformers"
]
}

View File

@ -0,0 +1,47 @@
// Adapted from https://www.npmjs.com/package/audiobuffer-to-wav
export function encodeWAV(samples) {
let offset = 44;
const buffer = new ArrayBuffer(offset + samples.length * 4);
const view = new DataView(buffer);
const sampleRate = 16000;
/* RIFF identifier */
writeString(view, 0, 'RIFF')
/* RIFF chunk length */
view.setUint32(4, 36 + samples.length * 4, true)
/* RIFF type */
writeString(view, 8, 'WAVE')
/* format chunk identifier */
writeString(view, 12, 'fmt ')
/* format chunk length */
view.setUint32(16, 16, true)
/* sample format (raw) */
view.setUint16(20, 3, true)
/* channel count */
view.setUint16(22, 1, true)
/* sample rate */
view.setUint32(24, sampleRate, true)
/* byte rate (sample rate * block align) */
view.setUint32(28, sampleRate * 4, true)
/* block align (channel count * bytes per sample) */
view.setUint16(32, 4, true)
/* bits per sample */
view.setUint16(34, 32, true)
/* data chunk identifier */
writeString(view, 36, 'data')
/* data chunk length */
view.setUint32(40, samples.length * 4, true)
for (let i = 0; i < samples.length; ++i, offset += 4) {
view.setFloat32(offset, samples[i], true)
}
return buffer
}
function writeString(view, offset, string) {
for (let i = 0; i < string.length; ++i) {
view.setUint8(offset + i, string.charCodeAt(i))
}
}

View File

@ -0,0 +1,20 @@
const path = require('path');
const webpack = require('webpack');
module.exports = {
mode: 'production',
entry: {
server: './index.js',
},
output: {
path: path.join(__dirname, 'build'),
filename: 'index.js'
},
module: {
rules: [
{
exclude: /node_modules/
}
]
}
};

View File

@ -0,0 +1,105 @@
import { env, Tensor, AutoTokenizer, SpeechT5ForTextToSpeech, SpeechT5HifiGan } from '@xenova/transformers';
import { encodeWAV } from './utils';
// Disable local model checks
env.allowLocalModels = false;
// Use the Singleton pattern to enable lazy construction of the pipeline.
class MyTextToSpeechPipeline {
static BASE_URL = 'https://huggingface.co/datasets/Xenova/cmu-arctic-xvectors-extracted/resolve/main/';
static model_id = 'Xenova/speecht5_tts';
static vocoder_id = 'Xenova/speecht5_hifigan';
static tokenizer_instance = null;
static model_instance = null;
static vocoder_instance = null;
static async getInstance(progress_callback = null) {
if (this.tokenizer_instance === null) {
this.tokenizer = AutoTokenizer.from_pretrained(this.model_id, { progress_callback });
}
if (this.model_instance === null) {
this.model_instance = SpeechT5ForTextToSpeech.from_pretrained(this.model_id, {
quantized: false,
progress_callback,
});
}
if (this.vocoder_instance === null) {
this.vocoder_instance = SpeechT5HifiGan.from_pretrained(this.vocoder_id, {
quantized: false,
progress_callback,
});
}
return new Promise(async (resolve, reject) => {
const result = await Promise.all([
this.tokenizer,
this.model_instance,
this.vocoder_instance,
]);
self.postMessage({
status: 'ready',
});
resolve(result);
});
}
static async getSpeakerEmbeddings(speaker_id) {
// e.g., `cmu_us_awb_arctic-wav-arctic_a0001`
const speaker_embeddings_url = `${this.BASE_URL}${speaker_id}.bin`;
const speaker_embeddings = new Tensor(
'float32',
new Float32Array(await (await fetch(speaker_embeddings_url)).arrayBuffer()),
[1, 512]
)
return speaker_embeddings;
}
}
// Mapping of cached speaker embeddings
const speaker_embeddings_cache = new Map();
// Listen for messages from the main thread
self.addEventListener('message', async (event) => {
// Load the pipeline
const [tokenizer, model, vocoder] = await MyTextToSpeechPipeline.getInstance(x => {
// We also add a progress callback so that we can track model loading.
self.postMessage(x);
});
// Tokenize the input
const { input_ids } = tokenizer(event.data.text);
// Load the speaker embeddings
let speaker_embeddings = speaker_embeddings_cache.get(event.data.speaker_id);
if (speaker_embeddings === undefined) {
speaker_embeddings = await MyTextToSpeechPipeline.getSpeakerEmbeddings(event.data.speaker_id);
speaker_embeddings_cache.set(event.data.speaker_id, speaker_embeddings);
}
// Generate the waveform
let response;
try {
response = await model.generate_speech(input_ids, speaker_embeddings, { vocoder });
} catch(e) {
self.postMessage({
status: 'error',
exception: e,
});
throw e;
}
const { waveform } = response;
// Encode the waveform as a WAV file
const wav = encodeWAV(waveform.data);
// Send the output back to the main thread
self.postMessage({
status: 'complete',
output: new Blob([wav], { type: 'audio/wav' }),
});
});

5
projects/windows/copy.sh Executable file
View File

@ -0,0 +1,5 @@
cp -r * /var/win/shared/
cp -r windows/* /var/win/shared/
cp setup.py /var/win/shared/
cp README.md /var/win/shared/
#git clone https://github.com/pyinstaller/pyinstaller/ /var/win/shared/pyinstaller

View File

Before

Width:  |  Height:  |  Size: 4.2 KiB

After

Width:  |  Height:  |  Size: 4.2 KiB

View File

@ -1,9 +1,6 @@
# -*- mode: python ; coding: utf-8 -*- # -*- mode: python ; coding: utf-8 -*-
block_cipher = None
a = Analysis( a = Analysis(
['main.py'], ['main.py'],
pathex=[], pathex=[],
@ -14,29 +11,21 @@ a = Analysis(
hooksconfig={}, hooksconfig={},
runtime_hooks=[], runtime_hooks=[],
excludes=[], excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False, noarchive=False,
) )
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) pyz = PYZ(a.pure)
exe = EXE( exe = EXE(
pyz, pyz,
a.scripts, a.scripts,
a.binaries,
Tree('C:/Users/Docker/AppData/Local/Packages/PythonSoftwareFoundation.Python.3.11_qbz5n2kfra8p0/LocalCache/local-packages/Python311/site-packages/g4f/gui/client', prefix='client'),
a.zipfiles,
a.datas,
[], [],
exclude_binaries=True,
name='g4f', name='g4f',
debug=False, debug=False,
bootloader_ignore_signals=False, bootloader_ignore_signals=False,
strip=False, strip=False,
upx=True, upx=True,
upx_exclude=[], console=True,
runtime_tmpdir=None,
console=False,
disable_windowed_traceback=False, disable_windowed_traceback=False,
argv_emulation=False, argv_emulation=False,
target_arch=None, target_arch=None,
@ -44,3 +33,13 @@ exe = EXE(
entitlements_file=None, entitlements_file=None,
icon='icon.ico', icon='icon.ico',
) )
coll = COLLECT(
exe,
a.binaries,
Tree('//host.lan/Data/g4f/gui/client', prefix='client'),
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='g4f',
)