mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-11 07:07:04 +03:00
Merge branch 'main' of https://github.com/hpsj/gpt4free
This commit is contained in:
commit
c7d26ed867
32
README.md
32
README.md
@ -28,11 +28,9 @@ Please note the following:
|
||||
|
||||
3. **Educational Purposes Only**: This repository and its content are provided strictly for educational purposes. By using the information and code provided, users acknowledge that they are using the APIs and models at their own risk and agree to comply with any applicable laws and regulations.
|
||||
|
||||
4. **Copyright**: All content in this repository, including but not limited to code, images, and documentation, is the intellectual property of the repository author, unless otherwise stated. Unauthorized copying, distribution, or use of any content in this repository is strictly prohibited without the express written consent of the repository author.
|
||||
4. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
|
||||
|
||||
5. **Indemnification**: Users agree to indemnify, defend, and hold harmless the author of this repository from and against any and all claims, liabilities, damages, losses, or expenses, including legal fees and costs, arising out of or in any way connected with their use or misuse of this repository, its content, or related third-party APIs.
|
||||
|
||||
6. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository.
|
||||
5. **Updates and Changes**: The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository.
|
||||
|
||||
By using this repository or any code related to it, you agree to these terms. The author is not responsible for any copies, forks, or reuploads made by other users. This is the author's only account and repository. To prevent impersonation or irresponsible actions, you may comply with the GNU GPL license this Repository uses.
|
||||
|
||||
@ -157,6 +155,32 @@ python3 -m venv venv
|
||||
pip3 install -r requirements.txt
|
||||
```
|
||||
|
||||
## Install ffmpeg
|
||||
```sh
|
||||
sudo apt-get install ffmpeg
|
||||
```
|
||||
|
||||
## Connect VPN if needed and get proxy (Optional)
|
||||
```sh
|
||||
echo "$http_proxy" # http://127.0.0.1:8889/
|
||||
```
|
||||
|
||||
## Set proxy in gpt4free/you/__init__.py (Optional)
|
||||
```
|
||||
diff --git a/gpt4free/you/__init__.py b/gpt4free/you/__init__.py
|
||||
index 11847fb..59d1162 100644
|
||||
--- a/gpt4free/you/__init__.py
|
||||
+++ b/gpt4free/you/__init__.py
|
||||
@@ -38,6 +38,7 @@ class Completion:
|
||||
if chat is None:
|
||||
chat = []
|
||||
|
||||
+ proxy = '127.0.0.1:8889'
|
||||
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else {}
|
||||
|
||||
client = Session(client_identifier='chrome_108')
|
||||
```
|
||||
|
||||
|
||||
## To start gpt4free GUI <a name="streamlit-gpt4free-gui"></a>
|
||||
|
||||
|
19
gpt4free/aiassist/README.md
Normal file
19
gpt4free/aiassist/README.md
Normal file
@ -0,0 +1,19 @@
|
||||
aiassist.site
|
||||
|
||||
### Example: `aiassist` <a name="example-assist"></a>
|
||||
|
||||
```python
|
||||
import aiassist
|
||||
|
||||
question1 = "Who won the world series in 2020?"
|
||||
req = aiassist.Completion.create(prompt=question1)
|
||||
answer = req["text"]
|
||||
message_id = req["parentMessageId"]
|
||||
|
||||
question2 = "Where was it played?"
|
||||
req2 = aiassist.Completion.create(prompt=question2, parentMessageId=message_id)
|
||||
answer2 = req2["text"]
|
||||
|
||||
print(answer)
|
||||
print(answer2)
|
||||
```
|
34
gpt4free/aiassist/__init__.py
Normal file
34
gpt4free/aiassist/__init__.py
Normal file
@ -0,0 +1,34 @@
|
||||
import json
|
||||
import requests
|
||||
|
||||
|
||||
class Completion:
|
||||
@staticmethod
|
||||
def create(
|
||||
systemMessage: str = "You are a helpful assistant",
|
||||
prompt: str = "",
|
||||
parentMessageId: str = "",
|
||||
temperature: float = 0.8,
|
||||
top_p: float = 1,
|
||||
):
|
||||
json_data = {
|
||||
"prompt": prompt,
|
||||
"options": {"parentMessageId": parentMessageId},
|
||||
"systemMessage": systemMessage,
|
||||
"temperature": temperature,
|
||||
"top_p": top_p,
|
||||
}
|
||||
|
||||
url = "http://43.153.7.56:8080/api/chat-process"
|
||||
request = requests.post(url, json=json_data)
|
||||
content = request.content
|
||||
|
||||
response = Completion.__load_json(content)
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def __load_json(cls, content) -> dict:
|
||||
decode_content = str(content.decode("utf-8"))
|
||||
split = decode_content.rsplit("\n", 1)[1]
|
||||
to_json = json.loads(split)
|
||||
return to_json
|
29
gpt4free/italygpt2/README.md
Normal file
29
gpt4free/italygpt2/README.md
Normal file
@ -0,0 +1,29 @@
|
||||
# Itagpt2(Rewrite)
|
||||
Written by [sife-shuo](https://github.com/sife-shuo/).
|
||||
|
||||
## Description
|
||||
Unlike gpt4free. italygpt in the pypi package, italygpt2 supports stream calls and has changed the request sending method to enable continuous and logical conversations.
|
||||
|
||||
The speed will increase when calling the conversation multiple times.
|
||||
|
||||
### Completion:
|
||||
```python
|
||||
account_data=italygpt2.Account.create()
|
||||
for chunk in italygpt2.Completion.create(account_data=account_data,prompt="Who are you?"):
|
||||
print(chunk, end="", flush=True)
|
||||
print()
|
||||
```
|
||||
|
||||
### Chat
|
||||
Like most chatgpt projects, format is supported.
|
||||
Use the same format for the messages as you would for the [official OpenAI API](https://platform.openai.com/docs/guides/chat/introduction).
|
||||
```python
|
||||
messages = [
|
||||
{"role": "system", "content": ""},#...
|
||||
{"role": "user", "content": ""}#....
|
||||
]
|
||||
account_data=italygpt2.Account.create()
|
||||
for chunk in italygpt2.Completion.create(account_data=account_data,prompt="Who are you?",message=messages):
|
||||
print(chunk, end="", flush=True)
|
||||
print()
|
||||
```
|
70
gpt4free/italygpt2/__init__.py
Normal file
70
gpt4free/italygpt2/__init__.py
Normal file
@ -0,0 +1,70 @@
|
||||
import re
|
||||
import requests
|
||||
import hashlib
|
||||
from fake_useragent import UserAgent
|
||||
class Account:
|
||||
@staticmethod
|
||||
def create():
|
||||
r=requests.get("https://italygpt.it/",headers=Account._header)
|
||||
f=r.text
|
||||
tid=re.search('<input type=\"hidden\" name=\"next_id\" id=\"next_id\" value=\"(\w+)\">',f).group(1)
|
||||
if len(tid)==0:
|
||||
raise RuntimeError("NetWorkError:failed to get id.")
|
||||
else:
|
||||
Account._tid=tid
|
||||
Account._raw="[]"
|
||||
return Account
|
||||
def next(next_id:str)->str:
|
||||
Account._tid=next_id
|
||||
return Account._tid
|
||||
def get()->str:
|
||||
return Account._tid
|
||||
_header={
|
||||
"Host": "italygpt.it",
|
||||
"Referer":"https://italygpt.it/",
|
||||
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",#UserAgent().random,
|
||||
"Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8",
|
||||
"Accept-Language":"zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2",
|
||||
"Upgrade-Insecure-Requests":"1",
|
||||
"Sec-Fetch-Dest":"document",
|
||||
"Sec-Fetch-Mode":"navigate",
|
||||
"Sec-Fetch-Site":"none",
|
||||
"Sec-Fetch-User":"?1",
|
||||
"Connection":"keep-alive",
|
||||
"Alt-Used":"italygpt.it",
|
||||
"Pragma":"no-cache",
|
||||
"Cache-Control":"no-cache",
|
||||
"TE": "trailers"
|
||||
}
|
||||
def settraw(raws:str):
|
||||
Account._raw=raws
|
||||
return Account._raw
|
||||
def gettraw():
|
||||
return Account._raw
|
||||
|
||||
class Completion:
|
||||
@staticmethod
|
||||
def create(
|
||||
account_data,
|
||||
prompt: str,
|
||||
message=False
|
||||
):
|
||||
param={
|
||||
"prompt":prompt.replace(" ","+"),
|
||||
"creative":"off",
|
||||
"internet":"false",
|
||||
"detailed":"off",
|
||||
"current_id":"0",
|
||||
"code":"",
|
||||
"gpt4":"false",
|
||||
"raw_messages":account_data.gettraw(),
|
||||
"hash":hashlib.sha256(account_data.get().encode()).hexdigest()
|
||||
}
|
||||
if(message):
|
||||
param["raw_messages"]=str(message)
|
||||
r = requests.get("https://italygpt.it/question",headers=account_data._header,params=param,stream=True)
|
||||
account_data.next(r.headers["Next_id"])
|
||||
account_data.settraw(r.headers["Raw_messages"])
|
||||
for chunk in r.iter_content(chunk_size=None):
|
||||
r.raise_for_status()
|
||||
yield chunk.decode()
|
@ -1,22 +1,30 @@
|
||||
ai.usesless.com
|
||||
|
||||
to do:
|
||||
|
||||
- use random user agent in header
|
||||
- make the code better I guess (?)
|
||||
|
||||
### Example: `usesless` <a name="example-usesless"></a>
|
||||
|
||||
### token generation
|
||||
<p>this will create account.txt that contains mail and token</p>
|
||||
|
||||
```python
|
||||
import usesless
|
||||
|
||||
usesless.Account.create(logging=True)
|
||||
```
|
||||
|
||||
### completion
|
||||
<p>insert token from account.txt</p>
|
||||
|
||||
```python
|
||||
import usesless
|
||||
|
||||
message_id = ""
|
||||
token = <TOKENHERE> # usesless.Account.create(logging=True)
|
||||
while True:
|
||||
prompt = input("Question: ")
|
||||
if prompt == "!stop":
|
||||
break
|
||||
|
||||
req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id)
|
||||
req = usesless.Completion.create(prompt=prompt, parentMessageId=message_id, token=token)
|
||||
|
||||
print(f"Answer: {req['text']}")
|
||||
message_id = req["id"]
|
||||
|
@ -1,22 +1,87 @@
|
||||
import time
|
||||
import re
|
||||
import json
|
||||
|
||||
import requests
|
||||
import fake_useragent
|
||||
import names
|
||||
|
||||
from mailgw_temporary_email import Email
|
||||
from password_generator import PasswordGenerator
|
||||
|
||||
|
||||
class Account:
|
||||
@staticmethod
|
||||
def create(logging: bool = False):
|
||||
mail_client = Email()
|
||||
mail_client.register()
|
||||
mail_address = mail_client.address
|
||||
|
||||
pwo = PasswordGenerator()
|
||||
pwo.minlen = 8
|
||||
password = pwo.generate()
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
if logging:
|
||||
print(f"email: {mail_address}")
|
||||
|
||||
register_url = "https://ai.usesless.com/api/cms/auth/local/register"
|
||||
register_json = {
|
||||
"username": names.get_first_name(),
|
||||
"password": password,
|
||||
"email": mail_address,
|
||||
}
|
||||
headers = {
|
||||
"authority": "ai.usesless.com",
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.5",
|
||||
"cache-control": "no-cache",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": fake_useragent.UserAgent().random,
|
||||
}
|
||||
register = session.post(register_url, json=register_json, headers=headers)
|
||||
if logging:
|
||||
if register.status_code == 200:
|
||||
print("register success")
|
||||
else:
|
||||
print("there's a problem with account creation, try again")
|
||||
|
||||
if register.status_code != 200:
|
||||
quit()
|
||||
|
||||
while True:
|
||||
time.sleep(5)
|
||||
message_id = mail_client.message_list()[0]["id"]
|
||||
message = mail_client.message(message_id)
|
||||
verification_url = re.findall(
|
||||
r"http:\/\/ai\.usesless\.com\/api\/cms\/auth\/email-confirmation\?confirmation=\w.+\w\w",
|
||||
message["text"],
|
||||
)[0]
|
||||
if verification_url:
|
||||
break
|
||||
|
||||
session.get(verification_url)
|
||||
login_json = {"identifier": mail_address, "password": password}
|
||||
login_request = session.post(
|
||||
url="https://ai.usesless.com/api/cms/auth/local", json=login_json
|
||||
)
|
||||
token = login_request.json()["jwt"]
|
||||
if logging:
|
||||
print(f"token: {token}")
|
||||
|
||||
with open("accounts.txt", "w") as f:
|
||||
f.write(f"{mail_address}\n")
|
||||
f.write(f"{token}")
|
||||
|
||||
return token
|
||||
|
||||
|
||||
class Completion:
|
||||
headers = {
|
||||
"authority": "ai.usesless.com",
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.5",
|
||||
"cache-control": "no-cache",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (X11; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/112.0",
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
token: str,
|
||||
systemMessage: str = "You are a helpful assistant",
|
||||
prompt: str = "",
|
||||
parentMessageId: str = "",
|
||||
@ -24,7 +89,17 @@ class Completion:
|
||||
temperature: float = 1,
|
||||
model: str = "gpt-3.5-turbo",
|
||||
):
|
||||
print(parentMessageId, prompt)
|
||||
headers = {
|
||||
"authority": "ai.usesless.com",
|
||||
"accept": "application/json, text/plain, */*",
|
||||
"accept-language": "en-US,en;q=0.5",
|
||||
"cache-control": "no-cache",
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": fake_useragent.UserAgent().random,
|
||||
"Authorization": f"Bearer {token}",
|
||||
}
|
||||
|
||||
json_data = {
|
||||
"openaiKey": "",
|
||||
@ -41,14 +116,14 @@ class Completion:
|
||||
}
|
||||
|
||||
url = "https://ai.usesless.com/api/chat-process"
|
||||
request = requests.post(url, headers=Completion.headers, json=json_data)
|
||||
request = requests.post(url, headers=headers, json=json_data)
|
||||
content = request.content
|
||||
|
||||
response = Completion.__response_to_json(content)
|
||||
return response
|
||||
|
||||
@classmethod
|
||||
def __response_to_json(cls, text) -> dict:
|
||||
def __response_to_json(cls, text) -> str:
|
||||
text = str(text.decode("utf-8"))
|
||||
|
||||
split_text = text.rsplit("\n", 1)[1]
|
||||
|
3
gpt4free/usesless/account_creation.py
Normal file
3
gpt4free/usesless/account_creation.py
Normal file
@ -0,0 +1,3 @@
|
||||
import usesless
|
||||
|
||||
usesless.Account.create(logging=True)
|
13
testing/aiassistest.py
Normal file
13
testing/aiassistest.py
Normal file
@ -0,0 +1,13 @@
|
||||
import aiassist
|
||||
|
||||
question1 = "Who won the world series in 2020?"
|
||||
req = aiassist.Completion.create(prompt=question1)
|
||||
answer = req["text"]
|
||||
message_id = req["parentMessageId"]
|
||||
|
||||
question2 = "Where was it played?"
|
||||
req2 = aiassist.Completion.create(prompt=question2, parentMessageId=message_id)
|
||||
answer2 = req2["text"]
|
||||
|
||||
print(answer)
|
||||
print(answer2)
|
4
testing/italygpt2_test.py
Normal file
4
testing/italygpt2_test.py
Normal file
@ -0,0 +1,4 @@
|
||||
from gpt4free import italygpt2
|
||||
account_data=italygpt2.Account.create()
|
||||
for chunk in italygpt2.Completion.create(account_data=account_data,prompt="Who are you?"):
|
||||
print(chunk, end="", flush=True)
|
Loading…
Reference in New Issue
Block a user