mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-27 21:21:41 +03:00
Merge branch 'main' of https://github.com/xtekky/gpt4free
This commit is contained in:
commit
8f7dbfec63
@ -234,11 +234,11 @@ set G4F_PROXY=http://host:port
|
||||
| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |
|
||||
| ------ | ------- | ------- | ----- | ------ | ------ | ---- |
|
||||
| [bing.com](https://bing.com/chat) | `g4f.Provider.Bing` | ❌ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
|
||||
| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
|
||||
| [chat.openai.com](https://chat.openai.com) | `g4f.Provider.OpenaiChat` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Active-brightgreen) | ✔️ |
|
||||
| [raycast.com](https://raycast.com) | `g4f.Provider.Raycast` | ✔️ | ✔️ | ✔️ | ![Unknown](https://img.shields.io/badge/Unknown-grey) | ✔️ |
|
||||
| [you.com](https://you.com) | `g4f.Provider.You` | ✔️ | ✔️ | ✔️ | ![Active](https://img.shields.io/badge/Active-brightgreen) | ❌ |
|
||||
| [chat.geekgpt.org](https://chat.geekgpt.org) | `g4f.Provider.GeekGpt` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
|
||||
| [liaobots.site](https://liaobots.site) | `g4f.Provider.Liaobots` | ✔️ | ✔️ | ✔️ | ![Inactive](https://img.shields.io/badge/Inactive-red) | ❌ |
|
||||
|
||||
### GPT-3.5
|
||||
|
||||
|
@ -73,7 +73,7 @@ models = {
|
||||
|
||||
class Liaobots(AsyncGeneratorProvider, ProviderModelMixin):
|
||||
url = "https://liaobots.site"
|
||||
working = True
|
||||
working = False
|
||||
supports_message_history = True
|
||||
supports_gpt_35_turbo = True
|
||||
supports_gpt_4 = True
|
||||
|
@ -31,12 +31,16 @@
|
||||
</script>
|
||||
<script id="MathJax-script" src="https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js" async></script>
|
||||
<script type="module" src="https://cdn.jsdelivr.net/npm/mistral-tokenizer-js" async>
|
||||
import mistralTokenizer from 'mistral-tokenizer-js'
|
||||
import mistralTokenizer from "mistral-tokenizer-js"
|
||||
</script>
|
||||
<script type="module" src="https://belladoreai.github.io/llama-tokenizer-js/llama-tokenizer.js" async>
|
||||
import llamaTokenizer from 'llama-tokenizer-js'
|
||||
import llamaTokenizer from "llama-tokenizer-js"
|
||||
</script>
|
||||
<script src="https://unpkg.com/gpt-tokenizer/dist/cl100k_base.js" async></script>
|
||||
<script type="module" async>
|
||||
import { countWords } from 'https://esm.run/alfaaz';
|
||||
window.countWords = countWords;
|
||||
</script>
|
||||
<script>
|
||||
const user_image = '<img src="/assets/img/user.png" alt="your avatar">';
|
||||
const gpt_image = '<img src="/assets/img/gpt.png" alt="your avatar">';
|
||||
|
@ -66,7 +66,6 @@ const register_remove_message = async () => {
|
||||
}
|
||||
|
||||
const delete_conversations = async () => {
|
||||
localStorage.clear();
|
||||
for (let i = 0; i < localStorage.length; i++){
|
||||
let key = localStorage.key(i);
|
||||
if (key.startsWith("conversation:")) {
|
||||
@ -476,12 +475,6 @@ const load_conversation = async (conversation_id) => {
|
||||
}, 500);
|
||||
};
|
||||
|
||||
// https://stackoverflow.com/questions/20396456/how-to-do-word-counts-for-a-mixture-of-english-and-chinese-in-javascript
|
||||
function count_words(str) {
|
||||
var matches = str.match(/[\u00ff-\uffff]|\S+/g);
|
||||
return matches ? matches.length : 0;
|
||||
}
|
||||
|
||||
function count_tokens(model, text) {
|
||||
if (model.startsWith("gpt-3") || model.startsWith("gpt-4")) {
|
||||
return GPTTokenizer_cl100k_base?.encode(text).length;
|
||||
@ -497,7 +490,7 @@ function count_tokens(model, text) {
|
||||
function count_words_and_tokens(text, model) {
|
||||
const tokens_count = model ? count_tokens(model, text) : null;
|
||||
const tokens_append = tokens_count ? `, ${tokens_count} tokens` : "";
|
||||
return `(${count_words(text)} words${tokens_append})`
|
||||
return countWords ? `(${countWords(text)} words${tokens_append})` : "";
|
||||
}
|
||||
|
||||
const get_conversation = async (conversation_id) => {
|
||||
|
@ -101,7 +101,7 @@ async def search(query: str, n_results: int = 5, max_words: int = 2500, add_text
|
||||
raise MissingRequirementsError('Install "duckduckgo-search" and "beautifulsoup4" package')
|
||||
async with AsyncDDGS() as ddgs:
|
||||
results = []
|
||||
async for result in ddgs.text(
|
||||
for result in await ddgs.text(
|
||||
query,
|
||||
region="wt-wt",
|
||||
safesearch="moderate",
|
||||
|
@ -1,11 +1,11 @@
|
||||
requests
|
||||
pycryptodome
|
||||
curl_cffi>=0.6.0b9
|
||||
curl_cffi>=0.6.2
|
||||
aiohttp
|
||||
certifi
|
||||
browser_cookie3
|
||||
PyExecJS
|
||||
duckduckgo-search>=4.4.3
|
||||
duckduckgo-search>=5.0
|
||||
nest_asyncio
|
||||
werkzeug
|
||||
loguru
|
||||
|
6
setup.py
6
setup.py
@ -15,13 +15,13 @@ INSTALL_REQUIRE = [
|
||||
|
||||
EXTRA_REQUIRE = {
|
||||
'all': [
|
||||
"curl_cffi>=0.6.0b9",
|
||||
"curl_cffi>=0.6.2",
|
||||
"certifi",
|
||||
"async-property", # openai
|
||||
"py-arkose-generator", # openai
|
||||
"browser_cookie3", # get_cookies
|
||||
"PyExecJS", # GptForLove
|
||||
"duckduckgo-search>=4.4.3",# internet.search
|
||||
"duckduckgo-search>=5.0" ,# internet.search
|
||||
"beautifulsoup4", # internet.search and bing.create_images
|
||||
"brotli", # openai
|
||||
"platformdirs", # webdriver
|
||||
@ -56,7 +56,7 @@ EXTRA_REQUIRE = {
|
||||
"gui": [
|
||||
"werkzeug", "flask",
|
||||
"beautifulsoup4", "pillow",
|
||||
"duckduckgo-search>=4.4.3",
|
||||
"duckduckgo-search>=5.0",
|
||||
"browser_cookie3"
|
||||
]
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user