Merge branch 'main' into main

This commit is contained in:
t.me/xtekky 2023-05-04 19:13:58 +01:00 committed by GitHub
commit e865fcbc5a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 44 additions and 258 deletions

4
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,4 @@
{
"editor.tabCompletion": "on",
"diffEditor.codeLens": true
}

View File

@ -51,6 +51,13 @@ Just API's from some language model sites.
<td><a href="https://github.com/xtekky/gpt4free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/xtekky/gpt4free?style=flat-square&labelColor=343b41"/></a></td> <td><a href="https://github.com/xtekky/gpt4free/issues"><img alt="Issues" src="https://img.shields.io/github/issues/xtekky/gpt4free?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/xtekky/gpt4free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/xtekky/gpt4free?style=flat-square&labelColor=343b41"/></a></td> <td><a href="https://github.com/xtekky/gpt4free/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/xtekky/gpt4free?style=flat-square&labelColor=343b41"/></a></td>
</tr> </tr>
<tr>
<td><a href="https://github.com/xiangsx/gpt4free-ts"><b>gpt4free-ts</b></a></td>
<td><a href="https://github.com/xiangsx/gpt4free-ts/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/xiangsx/gpt4free-ts?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/xiangsx/gpt4free-ts/network/members"><img alt="Forks" src="https://img.shields.io/github/forks/xiangsx/gpt4free-ts?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/xiangsx/gpt4free-ts/issues"><img alt="Issues" src="https://img.shields.io/github/issues/xiangsx/gpt4free-ts?style=flat-square&labelColor=343b41"/></a></td>
<td><a href="https://github.com/xiangsx/gpt4free-ts/pulls"><img alt="Pull Requests" src="https://img.shields.io/github/issues-pr/xiangsx/gpt4free-ts?style=flat-square&labelColor=343b41"/></a></td>
</tr>
<tr> <tr>
<td><a href="https://github.com/xtekky/chatgpt-clone"><b>ChatGPT-Clone</b></a></td> <td><a href="https://github.com/xtekky/chatgpt-clone"><b>ChatGPT-Clone</b></a></td>
<td><a href="https://github.com/xtekky/chatgpt-clone/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/xtekky/chatgpt-clone?style=flat-square&labelColor=343b41"/></a></td> <td><a href="https://github.com/xtekky/chatgpt-clone/stargazers"><img alt="Stars" src="https://img.shields.io/github/stars/xtekky/chatgpt-clone?style=flat-square&labelColor=343b41"/></a></td>

View File

@ -1,49 +1,27 @@
import os
import pickle
from json import loads from json import loads
from xtempmail import Email
from re import findall from re import findall
from typing import Optional, Generator
from faker import Faker
from time import time, sleep from time import time, sleep
from typing import Generator, Optional
from uuid import uuid4 from uuid import uuid4
from fake_useragent import UserAgent from fake_useragent import UserAgent
from pymailtm import MailTm, Message
from requests import post from requests import post
from tls_client import Session from tls_client import Session
from .typing import ForeFrontResponse from .typing import ForeFrontResponse
class Account: class Account:
COOKIES_FILE_NAME = 'cookies.pickle'
@staticmethod @staticmethod
def login(proxy: Optional[str] = None, logging: bool = False) -> str: def create(proxy: Optional[str] = None, logging: bool = False):
if not os.path.isfile(Account.COOKIES_FILE_NAME):
return Account.create(proxy, logging)
with open(Account.COOKIES_FILE_NAME, 'rb') as f:
cookies = pickle.load(f)
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
client = Session(client_identifier='chrome110')
client.proxies = proxies
client.cookies.update(cookies)
if Account.is_cookie_enabled(client):
response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4')
return response.json()['response']['sessions'][0]['last_active_token']['jwt']
else:
return Account.create(proxy, logging)
@staticmethod
def create(proxy: Optional[str] = None, logging: bool = False, save_cookies: bool = False) -> str:
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
faker = Faker()
name = (faker.name().replace(' ', '_')).lower()
start = time() start = time()
mail_client = MailTm().get_account() mail_client = Email(name=name)
mail_address = mail_client.address mail_address = mail_client.email
client = Session(client_identifier='chrome110') client = Session(client_identifier='chrome110')
client.proxies = proxies client.proxies = proxies
@ -66,7 +44,10 @@ class Account:
response = client.post( response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4', f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.38.4',
data={'strategy': 'email_link', 'redirect_url': 'https://accounts.forefront.ai/sign-up/verify'}, data={
'strategy': 'email_link',
'redirect_url': 'https://accounts.forefront.ai/sign-up/verify'
},
) )
if logging: if logging:
@ -74,31 +55,23 @@ class Account:
if 'sign_up_attempt' not in response.text: if 'sign_up_attempt' not in response.text:
return 'Failed to create account!' return 'Failed to create account!'
verification_url = None
while True: new_message = mail_client.get_new_message(5)
sleep(1) for msg in new_message:
new_message: Message = mail_client.wait_for_message() verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', msg.text)[0]
if logging:
print(new_message.data['id'])
verification_url = findall(r'https:\/\/clerk\.forefront\.ai\/v1\/verify\?token=\w.+', new_message.text)[0]
if verification_url: if verification_url:
break break
if verification_url is None or not verification_url:
raise RuntimeError('Error while obtaining verfication URL!')
if logging: if logging:
print(verification_url) print(verification_url)
response = client.get(verification_url) response = client.get(verification_url)
response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4') response = client.get('https://clerk.forefront.ai/v1/client?_clerk_js_version=4.38.4')
token = response.json()['response']['sessions'][0]['last_active_token']['jwt'] token = response.json()['response']['sessions'][0]['last_active_token']['jwt']
if save_cookies:
with open(Account.COOKIES_FILE_NAME, 'wb') as f:
pickle.dump(client.cookies, f)
with open('accounts.txt', 'a') as f: with open('accounts.txt', 'a') as f:
f.write(f'{mail_address}:{token}\n') f.write(f'{mail_address}:{token}\n')
@ -107,11 +80,6 @@ class Account:
return token return token
@staticmethod
def is_cookie_enabled(client: Session) -> bool:
response = client.get('https://chat.forefront.ai/')
return 'window.startClerk' in response.text
class StreamingCompletion: class StreamingCompletion:
@staticmethod @staticmethod
@ -122,7 +90,7 @@ class StreamingCompletion:
action_type='new', action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4', model='gpt-4',
proxy=None, proxy=None
) -> Generator[ForeFrontResponse, None, None]: ) -> Generator[ForeFrontResponse, None, None]:
if not token: if not token:
raise Exception('Token is required!') raise Exception('Token is required!')
@ -197,7 +165,7 @@ class Completion:
action_type='new', action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4', model='gpt-4',
proxy=None, proxy=None
) -> ForeFrontResponse: ) -> ForeFrontResponse:
text = '' text = ''
final_response = None final_response = None
@ -208,7 +176,7 @@ class Completion:
action_type=action_type, action_type=action_type,
default_persona=default_persona, default_persona=default_persona,
model=model, model=model,
proxy=proxy, proxy=proxy
): ):
if response: if response:
final_response = response final_response = response
@ -220,3 +188,4 @@ class Completion:
raise Exception('Unable to get the response, Please try again') raise Exception('Unable to get the response, Please try again')
return final_response return final_response

View File

@ -1,18 +0,0 @@
### Example: `italygpt`
```python
# create an instance
from gpt4free import italygpt
italygpt = italygpt.Completion()
# initialize api
italygpt.init()
# get an answer
italygpt.create(prompt="What is the meaning of life?")
print(italygpt.answer) # html formatted
# keep the old conversation
italygpt.create(prompt="Are you a human?", messages=italygpt.messages)
print(italygpt.answer)
```

View File

@ -1,28 +0,0 @@
import requests, time, ast, json
from bs4 import BeautifulSoup
from hashlib import sha256
class Completion:
# answer is returned with html formatting
next_id = None
messages = []
answer = None
def init(self):
r = requests.get("https://italygpt.it")
soup = BeautifulSoup(r.text, "html.parser")
self.next_id = soup.find("input", {"name": "next_id"})["value"]
def create(self, prompt: str, messages: list = []):
try:
r = requests.get("https://italygpt.it/question", params={"hash": sha256(self.next_id.encode()).hexdigest(), "prompt": prompt, "raw_messages": json.dumps(messages)}).json()
except:
r = requests.get("https://italygpt.it/question", params={"hash": sha256(self.next_id.encode()).hexdigest(), "prompt": prompt, "raw_messages": json.dumps(messages)}).text
if "too many requests" in r.lower():
# rate limit is 17 requests per 1 minute
time.sleep(20)
return self.create(prompt, messages)
self.next_id = r["next_id"]
self.messages = ast.literal_eval(r["raw_messages"])
self.answer = r["response"]
return self

View File

@ -104,8 +104,8 @@ class Model:
def create( def create(
token: str, token: str,
model: str = 'gpt-3.5-turbo', # claude-instant model: str = 'gpt-3.5-turbo', # claude-instant
system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible', system_prompt: str = 'You are ChatGPT a large language model. Answer as consisely as possible',
description: str = 'gpt-3.5 language model from openai, skidded by poe.com', description: str = 'gpt-3.5 language model',
handle: str = None, handle: str = None,
) -> ModelResponse: ) -> ModelResponse:
if not handle: if not handle:

4
gpt4free/test.py Normal file
View File

@ -0,0 +1,4 @@
import forefront
token = forefront.Account.create()
response = forefront.Completion.create(token=token, prompt='Hello!')
print(response)

View File

@ -72,3 +72,5 @@ class Completion:
for message in Completion.create(prompt, proxy): for message in Completion.create(prompt, proxy):
response_list.append(message) response_list.append(message)
return ''.join(response_list) return ''.join(response_list)
Completion.message_queue.put(response.decode(errors='replace'))

View File

@ -1,5 +1,5 @@
[tool.poetry] [tool.poetry]
name = "openai-rev" name = "gpt4free"
version = "0.1.0" version = "0.1.0"
description = "" description = ""
authors = [] authors = []

View File

@ -1,8 +0,0 @@
# asyncio.run(gptbz.test())
import requests
image = '/9j/4AAQSkZJRgABAQEAYABgAAD/2wBDAAgGBgcGBQgHBwcJCQgKDBQNDAsLDBkSEw8UHRofHh0aHBwgJC4nICIsIxwcKDcpLDAxNDQ0Hyc5PTgyPC4zNDL/2wBDAQkJCQwLDBgNDRgyIRwhMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjL/wAARCAAoALQDASIAAhEBAxEB/8QAHwAAAQUBAQEBAQEAAAAAAAAAAAECAwQFBgcICQoL/8QAtRAAAgEDAwIEAwUFBAQAAAF9AQIDAAQRBRIhMUEGE1FhByJxFDKBkaEII0KxwRVS0fAkM2JyggkKFhcYGRolJicoKSo0NTY3ODk6Q0RFRkdISUpTVFVWV1hZWmNkZWZnaGlqc3R1dnd4eXqDhIWGh4iJipKTlJWWl5iZmqKjpKWmp6ipqrKztLW2t7i5usLDxMXGx8jJytLT1NXW19jZ2uHi4+Tl5ufo6erx8vP09fb3+Pn6/8QAHwEAAwEBAQEBAQEBAQAAAAAAAAECAwQFBgcICQoL/8QAtREAAgECBAQDBAcFBAQAAQJ3AAECAxEEBSExBhJBUQdhcRMiMoEIFEKRobHBCSMzUvAVYnLRChYkNOEl8RcYGRomJygpKjU2Nzg5OkNERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6goOEhYaHiImKkpOUlZaXmJmaoqOkpaanqKmqsrO0tba3uLm6wsPExcbHyMnK0tPU1dbX2Nna4uPk5ebn6Onq8vP09fb3+Pn6/9oADAMBAAIRAxEAPwD3+iiigDkZP+EhS4W0k1S+VntQPtEWmRsgkNwBu4ZsHYQNvTbls5BA6DS7uW6S6E0VwjQ3UsQM0Pl71DZUrydy4IAbvg8CsTx3DbHQLi4uVs9scWzdd+dsAaWI4PlfNjKjpzkDtmpoNSgbWYpLR7Ty5bq5trw/vd3nIowBxtzti53Y6fKT3z2djra56fNbv07HR1z13ZRX/jDyby0+02f9nfdmsEeHd5o/5anndwPkxjjPWuhrh9Mvra88RLqccmnOHtvLEqfaN+1r1lUcjbg4PbO4H+Cqk+hnRi9ZI29E0uC2N1eG3Am+13DITZRwuqlsYG0ZYEKCGJywwT2AtWTapcW1vcPPCiyrE5ils2SRQV+dW/ecMT/3zgj5utZtpdwL4e190e02W9xeb9vm7FOWY78/NnnJ28f3ahkgtptD8JRlbMos9s8QPnbcrEzDy/4sgDjzOMdeaSZbi23f8vmbfn6hBFuktmuWWPJWCNELNuxgbpcDj1Pbr2qJ9bMVyIZNK1JVLyr5qwB1AjUNu+Uk4bovGSRjAqCTwdoElv5B02MReT5G1HZfk8zzMcEfx81YlsJ7NJX0tolZzNK8dyZJA8jDIwd3yjcBkAHjOAM09SP3b/q36mkjiSNXAYBgCNykH8QeRWdfaw1ldSW66XqN0UgE++3iBRsvt2BiQN/8WPQZqharF9oN5osVml1NLbLqUbmUFY/L4CrgYYKy4yoGM5xjhlnc2OoeMrfULV7aQXGkExyYlErJ5oPQ/Jtye/zZ9qLgqaTba0NyzvPtizH7NcQeVM8OJ49u/acbl9VPY96s1geFjF/xOhF9m41Wfd9n8z73BO7f/Fzzt+X0q7c6mWvRY2DwSXcUsQuUff8Auo2ySflB+YqrYyQOmTyARPQmVP32kLqF1cbmsrJZkuni3rcfZ98UfzKvJJUE4JOM5wpODwDl3Meuf2rHbRatcBJXuj5iachjhUovlBmZudrNkEZ3HIOMGlhREhbS9He2a8MO6a4fzmGDMQ3zAk5yZ8DzMgj0yRuWdha2CzLawrEJpnnkx/G7HLMfc0bl3VNf5pff/kVLS8uxFHHJZ3s5Xyo2mZI4y2VBZyN44B6gDrwAcVZ069Go2EV2Le5t/MBPlXMZjkXnGGU9OlULSdbfTt8LWy5mt0JAkK4YRLjnnODx26Z71TXULEWn/CUWDwmxeDbM4WbkCXJbaB23SnlM5PUDNF7CcObZf12OlpCcDoTz2oVlcZVgRkjIPccGo7hgsSk7ceYg+bP94elUYpamda64915GdH1SESxiTM0KjZmTZtbDHB53Y/u89eK1qw4xD9l0mIC3wLdCg/eYwHh+73x0+9znb71uUkXUSWyCiiimZhRRRQBieL5Hj8LXjxySxuNmGivFtWHzr0lbhfx69O9MvHdZpbKKWYnUluNji+VGikVFULHnkdGbjO05JHPEviyF5/DF7HGkjuQpCx2i3THDA8RNw3Tv069qR0kk0i4uFilF3bSXTwE2a+YGzIAUQnnIPByN46kbjUPc6YNKC9X+SLtjeB9Mt5ZyqzbI1lQzK5R2C/KWGAT8w6dcjHUVzemSyxeCba9e5uWfzIgxl1aOTgXPebGw5BwR3ACdalna8+0R3Kx3nk6jc2MvkjTI2MH97zDnI+4uWOSny4z2Lqxmt/hytvHHIZhFHJsj0yJnyXDEfZ87M9cjPB56ik2y4xSsu7XcnjMsejeJszXBZZrgozaihZAYwQFfGIQM8Bvu9ehrTKuJtOg3y5gKs/8ApAy2Y5B846uMj8Tz/CaqzROH1C3EchW6uHGRZIVx9nHXs4yPvN1PydBV2Lc+u3eUkCJBDtZoAFJzJna/VjgjI/h/4EaaM5PS/wDXRF+iiirOcy7RZE8RanukmKPFA6q9yHVfvg7Y+qfd5J4Y9OhrJ8Nm4FxYJNNdORaXCsJtTS4yVnAyQoG5sfxfw/dPJrUslmGt6rcymQxM0MMStahMALk4cfM65c9cBSGA7mqmi2k9t/ZZuDJJKbSdpHNjHEdzyRvhtv3G5PyjIbBJOVqDpurP5d+zGWtzeLdahZQLNK895PiV7+N/IURKQQMEqNzKAm1tucnggG4Fkhs4INNuJL145oEuHa7BcIAuWOQRkrhiAFzkkEE8rNDJPczWtnG1rG7yfapvsqESsY1AIJPP3hztbPllTjHKvpv2CWKbTUSHdJCk8cVtH+8jUFOSNpGAynOTgJgL1BNRNxf9fmWNGa3fR7U2ty9zDswJZJxMzHvlwSCc5BwccVerBZ3tLf8Atqyguvsxt/n02OyUSsxk3FsHa24bnyM4ycgE9d1WDDIz1I5BHQ471SM6i1uY8cjjSIWLyFjLbDJu1J5Mefn6HryP4snH3hRdmTS5f7T82aS2WBY5Y5LpVjX94Pn+YYzhmydw4UDB4wio/wDY8K+XLuE1qcfY1B4MWfk6DHOT/Bg4+6K1zGkkHlSoroy7WVlGCCOQRSsU5JGUrPo96EZ5p7O7mmmlubm7XFqQoYIobB2fK3Aztwe3TQvX2QKQSMyxDiQJ1dR1P8u/TvWb5bWty2m3KTXlvqMs7Ky2ieVbqVBKSEcHJL4JB3ZwfeLfcQRnTpY7mT7PLZiOdbJSkillzgA44KMScLsBBAOBkuNxu0/6epcQv9s0+LfJzauxBuVJJDRckdXPJ+YcDJH8QrTrN2sNcsxsk2LZyjd9nXaCWj439VPH3RwcZ/hFaVNGc+gUUUUyAooooAxfFVxZxeG9RS7ltVQ25ytwzbCCQBkJ82MkD5eeah0G7tYLi/sZJrKO4fUbjy4oncM/SQ5D9Ww4J25Xniiis2/eO2FNOhf1/CxmamsEGp2+nzx2CwxajYyWKN9o3KdpX+Ebd2I2287ePm973i3UdMg0W+0y4mtUkNqJPKuBJ5ewuEBYx8gbiBxz+FFFS3ZM1p01OdNN/wBaFfVtU0qHxHplx9qsSkEl2853SvIjxwjdtCZXIX7wbt05q7YJdS6nc6vYxWEtpfi2KS+bKsjQhCSWBBG4bhtAAyCcmiinF3k0RWgqdKMl1VvxZfM2s+VkWFh5nl5x9tfG/djGfK6bec468Y/irN1CeUCeHXbrTItPc3O6GN5PNltxHx0I+YKXLYB42455ooqpaIwo2lO1rE1rZjUYrcCO2Giw/Zp7BYzKrkKu4bh8oAB2EA56HIz0u3uxL+1kbygQpQFt2fmki4GOOuOvfHbNFFPpcTu6nKFpsTU75V8oNJKXIXduOI4hk54zjHTjGO+a0KKKaM59PQxLqNNBMuoQpDFYJEfPQLISp8zcWAXIxh5CcLnOMnHQaFNKkkvtOFoli0k9xqP32Zn24LIFyM7kwRg98c5yUVL3No6xTfV2/IrxyW0vh21kQ2phaexKn97s5aErj+LPTbnj7u7+KujoopxZNZW+9/oQXdpBfWk1rcxiSGVGjdSSMhgQeRyOCRxWOtvbXU0Ol6mIHksJbea0IMoJYISGy3U5ST+JuB83uUUMVJuz121JnaL/AITOBSYPOGnyEA7/ADdvmJnH8G3IHX5s4xxmtmiihdRVFZR9AoooqjI//9k='
response = requests.get('https://ocr.holey.cc/ncku?base64_str=%s' % image) # .split('base64,')[1])
print(response.content)

View File

@ -1,41 +0,0 @@
import requests
class Completion:
def create(prompt: str,
model: str = 'openai:gpt-3.5-turbo',
temperature: float = 0.7,
max_tokens: int = 200,
top_p: float = 1,
top_k: int = 1,
frequency_penalty: float = 1,
presence_penalty: float = 1,
stopSequences: list = []):
token = requests.get('https://play.vercel.ai/openai.jpeg', headers={
'authority': 'play.vercel.ai',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'referer': 'https://play.vercel.ai/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'}).text.replace('=','')
print(token)
headers = {
'authority': 'play.vercel.ai',
'custom-encoding': token,
'origin': 'https://play.vercel.ai',
'referer': 'https://play.vercel.ai/',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
}
for chunk in requests.post('https://play.vercel.ai/api/generate', headers=headers, stream=True, json={
'prompt': prompt,
'model': model,
'temperature': temperature,
'maxTokens': max_tokens,
'topK': top_p,
'topP': top_k,
'frequencyPenalty': frequency_penalty,
'presencePenalty': presence_penalty,
'stopSequences': stopSequences}).iter_lines():
yield (chunk)

View File

@ -1,33 +0,0 @@
(async () => {
let response = await fetch("https://play.vercel.ai/openai.jpeg", {
"headers": {
"accept": "*/*",
"accept-language": "en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3",
"sec-ch-ua": "\"Chromium\";v=\"112\", \"Google Chrome\";v=\"112\", \"Not:A-Brand\";v=\"99\"",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "\"macOS\"",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin"
},
"referrer": "https://play.vercel.ai/",
"referrerPolicy": "strict-origin-when-cross-origin",
"body": null,
"method": "GET",
"mode": "cors",
"credentials": "omit"
});
let data = JSON.parse(atob(await response.text()))
let ret = eval("(".concat(data.c, ")(data.a)"));
botPreventionToken = btoa(JSON.stringify({
r: ret,
t: data.t
}))
console.log(botPreventionToken);
})()

View File

@ -1,67 +0,0 @@
import requests
from base64 import b64decode, b64encode
from json import loads
from json import dumps
headers = {
'Accept': '*/*',
'Accept-Language': 'en-GB,en-US;q=0.9,en;q=0.8',
'Connection': 'keep-alive',
'Referer': 'https://play.vercel.ai/',
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
'sec-ch-ua': '"Chromium";v="110", "Google Chrome";v="110", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
}
response = requests.get('https://play.vercel.ai/openai.jpeg', headers=headers)
token_data = loads(b64decode(response.text))
print(token_data)
raw_token = {
'a': token_data['a'] * .1 * .2,
't': token_data['t']
}
print(raw_token)
new_token = b64encode(dumps(raw_token, separators=(',', ':')).encode()).decode()
print(new_token)
import requests
headers = {
'authority': 'play.vercel.ai',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'content-type': 'application/json',
'custom-encoding': new_token,
'origin': 'https://play.vercel.ai',
'referer': 'https://play.vercel.ai/',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
json_data = {
'prompt': 'hello\n',
'model': 'openai:gpt-3.5-turbo',
'temperature': 0.7,
'maxTokens': 200,
'topK': 1,
'topP': 1,
'frequencyPenalty': 1,
'presencePenalty': 1,
'stopSequences': [],
}
response = requests.post('https://play.vercel.ai/api/generate', headers=headers, json=json_data)
print(response.text)

View File

@ -1,5 +0,0 @@
import vercelai
for token in vercelai.Completion.create('summarize the gnu gpl 1.0'):
print(token, end='', flush=True)