made changes

This commit is contained in:
noptuno 2023-04-28 06:33:51 -04:00
commit 396d7e11b2
65 changed files with 592 additions and 560 deletions

View File

@ -80,14 +80,14 @@ Please note the following:
- [`/you`](./you/README.md) - [`/you`](./you/README.md)
## Install <a name="install"></a> ## Install <a name="install"></a>
download or clone this GitHub repo Download or clone this GitHub repo
install requirements with: install requirements with:
```sh ```sh
pip3 install -r requirements.txt pip3 install -r requirements.txt
``` ```
## To start gpt4free GUI <a name="streamlit-gpt4free-gui"></a> ## To start gpt4free GUI <a name="streamlit-gpt4free-gui"></a>
move `streamlit_app.py` from `./gui` to the base folder Move `streamlit_app.py` from `./gui` to the base folder
then run: then run:
`streamlit run streamlit_app.py` or `python3 -m streamlit run streamlit_app.py` `streamlit run streamlit_app.py` or `python3 -m streamlit run streamlit_app.py`

View File

@ -1,47 +1,57 @@
import requests import requests
class Completion:
class Completion: @staticmethod
def create(self, prompt="What is the square root of pi", def create(prompt:str, cookieInput:str) -> str:
system_prompt=("ASSUME I HAVE FULL ACCESS TO COCALC. ENCLOSE MATH IN $. " # Initialize a session with custom headers
"INCLUDE THE LANGUAGE DIRECTLY AFTER THE TRIPLE BACKTICKS " session = Completion._initialize_session(cookieInput)
"IN ALL MARKDOWN CODE BLOCKS. How can I do the following using CoCalc?")) -> str:
# Initialize a session with custom headers # Set the data that will be submitted
session = self._initialize_session() payload = Completion._create_payload(prompt, ("ASSUME I HAVE FULL ACCESS TO COCALC. "))
# Set the data that will be submitted # Submit the request and return the results
payload = self._create_payload(prompt, system_prompt) return Completion._submit_request(session, payload)
# Submit the request and return the results @classmethod
return self._submit_request(session, payload) def _initialize_session(cls, conversationCookie) -> requests.Session:
"""Initialize a session with custom headers for the request."""
def _initialize_session(self) -> requests.Session:
"""Initialize a session with custom headers for the request.""" session = requests.Session()
headers = {
session = requests.Session() 'Accept': '*/*',
headers = { 'Accept-Language': 'en-US,en;q=0.5',
'Accept': '*/*', 'Origin': 'https://cocalc.com',
'Accept-Language': 'en-US,en;q=0.5', 'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
'Origin': 'https://cocalc.com', 'Cookie': conversationCookie,
'Referer': 'https://cocalc.com/api/v2/openai/chatgpt', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36', }
} session.headers.update(headers)
session.headers.update(headers)
return session
return session
@classmethod
def _create_payload(self, prompt: str, system_prompt: str) -> dict: def _create_payload(
"""Create the payload with the given prompts.""" cls,
prompt: str,
return { system_prompt: str
"input": prompt, ) -> dict:
"system": system_prompt,
"tag": "next:index" return {
} "input": prompt,
"system": system_prompt,
def _submit_request(self, session: requests.Session, payload: dict) -> str: "tag": "next:index"
"""Submit the request to the API and return the response.""" }
response = session.post( @classmethod
"https://cocalc.com/api/v2/openai/chatgpt", json=payload).json() def _submit_request(
return response cls,
session: requests.Session,
payload: dict
) -> str:
response = session.post(
"https://cocalc.com/api/v2/openai/chatgpt", json=payload).json()
return {
"response":response["output"],
"success":response["success"]
}

20
cocalc/readme.md Normal file
View File

@ -0,0 +1,20 @@
### Example: `cocalc` <a name="example-cocalc"></a>
```python
# import library
import cocalc
cocalc.Completion.create(prompt="How are you!", cookieInput="cookieinput") ## Tutorial
```
### How to grab cookie input
```js
// input this into ur developer tools console and the exact response u get from this u put into ur cookieInput!
var cookies = document.cookie.split("; ");
var cookieString = "";
for (var i = 0; i < cookies.length; i++) {
cookieString += cookies[i] + "; ";
}
console.log(cookieString);
```

16
forefront/README.md Normal file
View File

@ -0,0 +1,16 @@
### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
```python
import forefront
# create an account
token = forefront.Account.create(logging=False)
print(token)
# get a response
for response in forefront.StreamingCompletion.create(token = token,
prompt = 'hello world', model='gpt-4'):
print(response.completion.choices[0].text, end = '')
print("")
```

154
forefront/__init__.py Normal file
View File

@ -0,0 +1,154 @@
from json import loads
from re import match
from time import time, sleep
from uuid import uuid4
from requests import post
from tls_client import Session
from forefront.mail import Mail
from forefront.typing import ForeFrontResponse
class Account:
@staticmethod
def create(proxy=None, logging=False):
proxies = {
'http': 'http://' + proxy,
'https': 'http://' + proxy} if proxy else False
start = time()
mail = Mail(proxies)
mail_token = None
mail_adress = mail.get_mail()
# print(mail_adress)
client = Session(client_identifier='chrome110')
client.proxies = proxies
client.headers = {
"origin": "https://accounts.forefront.ai",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
}
response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
data={
"email_address": mail_adress
}
)
try:
trace_token = response.json()['response']['id']
if logging: print(trace_token)
except KeyError:
return 'Failed to create account!'
response = client.post(
f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
data={
"strategy": "email_code",
}
)
if logging: print(response.text)
if not 'sign_up_attempt' in response.text:
return 'Failed to create account!'
while True:
sleep(1)
for _ in mail.fetch_inbox():
print(mail.get_message_content(_["id"]))
mail_token = match(r"(\d){5,6}", mail.get_message_content(_["id"])).group(0)
if mail_token:
break
if logging: print(mail_token)
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
data={
'code': mail_token,
'strategy': 'email_code'
})
if logging: print(response.json())
token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
with open('accounts.txt', 'a') as f:
f.write(f'{mail_adress}:{token}\n')
if logging: print(time() - start)
return token
class StreamingCompletion:
@staticmethod
def create(
token=None,
chatId=None,
prompt='',
actionType='new',
defaultPersona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4') -> ForeFrontResponse:
if not token: raise Exception('Token is required!')
if not chatId: chatId = str(uuid4())
headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer ' + token,
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://chat.forefront.ai',
'pragma': 'no-cache',
'referer': 'https://chat.forefront.ai/',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
json_data = {
'text': prompt,
'action': actionType,
'parentId': chatId,
'workspaceId': chatId,
'messagePersona': defaultPersona,
'model': model
}
for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers, json=json_data, stream=True).iter_lines():
if b'finish_reason":null' in chunk:
data = loads(chunk.decode('utf-8').split('data: ')[1])
token = data['choices'][0]['delta'].get('content')
if token != None:
yield ForeFrontResponse({
'id': chatId,
'object': 'text_completion',
'created': int(time()),
'model': model,
'choices': [{
'text': token,
'index': 0,
'logprobs': None,
'finish_reason': 'stop'
}],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(token),
'total_tokens': len(prompt) + len(token)
}
})

View File

@ -23,17 +23,21 @@ class Mail:
"sec-fetch-dest": "empty", "sec-fetch-dest": "empty",
"referer": "https://mail.tm/", "referer": "https://mail.tm/",
"accept-encoding": "gzip, deflate, br", "accept-encoding": "gzip, deflate, br",
"accept-language": "en-GB,en-US;q=0.9,en;q=0.8", "accept-language": "en-GB,en-US;q=0.9,en;q=0.8"
} }
def get_mail(self) -> str: def get_mail(self) -> str:
token = ''.join(choices(ascii_letters, k=14)).lower() token = ''.join(choices(ascii_letters, k=14)).lower()
init = self.client.post( init = self.client.post("https://api.mail.tm/accounts", json={
"https://api.mail.tm/accounts", json={"address": f"{token}@bugfoo.com", "password": token} "address": f"{token}@bugfoo.com",
) "password": token
})
if init.status_code == 201: if init.status_code == 201:
resp = self.client.post("https://api.mail.tm/token", json={**init.json(), "password": token}) resp = self.client.post("https://api.mail.tm/token", json={
**init.json(),
"password": token
})
self.client.headers['authorization'] = 'Bearer ' + resp.json()['token'] self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']

36
forefront/typing.py Normal file
View File

@ -0,0 +1,36 @@
class ForeFrontResponse:
class Completion:
class Choices:
def __init__(self, choice: dict) -> None:
self.text = choice['text']
self.content = self.text.encode()
self.index = choice['index']
self.logprobs = choice['logprobs']
self.finish_reason = choice['finish_reason']
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
def __init__(self, choices: dict) -> None:
self.choices = [self.Choices(choice) for choice in choices]
class Usage:
def __init__(self, usage_dict: dict) -> None:
self.prompt_tokens = usage_dict['prompt_tokens']
self.completion_tokens = usage_dict['completion_tokens']
self.total_tokens = usage_dict['total_tokens']
def __repr__(self):
return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
def __init__(self, response_dict: dict) -> None:
self.response_dict = response_dict
self.id = response_dict['id']
self.object = response_dict['object']
self.created = response_dict['created']
self.model = response_dict['model']
self.completion = self.Completion(response_dict['choices'])
self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict

View File

@ -1,17 +1,24 @@
import openai_rev import os
from openai_rev import forefront, quora, theb, you import sys
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import forefront, quora, theb, you
import random import random
def query_forefront(question: str) -> str: def query_forefront(question: str) -> str:
# create an account # create an account
token = forefront.Account.create(logging=True) token = forefront.Account.create(logging=False)
response = ""
# get a response # get a response
try: try:
result = forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4') for i in forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4'):
response += i.completion.choices[0].text
return result['response'] return response
except Exception as e: except Exception as e:
# Return error message if an exception occurs # Return error message if an exception occurs
@ -31,11 +38,11 @@ def query_quora(question: str) -> str:
def query_theb(question: str) -> str: def query_theb(question: str) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model # Set cloudflare clearance cookie and get answer from GPT-4 model
response = ""
try: try:
result = theb.Completion.create( result = theb.Completion.create(
prompt = question) prompt = question)
return result
return result['response']
except Exception as e: except Exception as e:
# Return error message if an exception occurs # Return error message if an exception occurs
@ -47,16 +54,14 @@ def query_you(question: str) -> str:
try: try:
result = you.Completion.create( result = you.Completion.create(
prompt = question) prompt = question)
return result.text return result["response"]
except Exception as e: except Exception as e:
# Return error message if an exception occurs # Return error message if an exception occurs
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.' return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
# Define a dictionary containing all query methods
def query(user_input: str, selected_method: str = "Random") -> str: avail_query_methods = {
# Define a dictionary containing all query methods
avail_query_methods = {
"Forefront": query_forefront, "Forefront": query_forefront,
"Poe": query_quora, "Poe": query_quora,
"Theb": query_theb, "Theb": query_theb,
@ -67,6 +72,8 @@ def query(user_input: str, selected_method: str = "Random") -> str:
# "Ora": query_ora, # "Ora": query_ora,
} }
def query(user_input: str, selected_method: str = "Random") -> str:
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it # If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
if selected_method != "Random" and selected_method in avail_query_methods: if selected_method != "Random" and selected_method in avail_query_methods:
try: try:

View File

@ -38,9 +38,10 @@ st.header('GPT4free GUI')
question_text_area = st.text_area('🤖 Ask Any Question :', placeholder='Explain quantum computing in 50 words') question_text_area = st.text_area('🤖 Ask Any Question :', placeholder='Explain quantum computing in 50 words')
if st.button('🧠 Think'): if st.button('🧠 Think'):
answer = get_answer(question_text_area) answer = get_answer(question_text_area)
escaped = answer.encode('utf-8').decode('unicode-escape')
# Display answer # Display answer
st.caption("Answer :") st.caption("Answer :")
st.markdown(answer) st.markdown(escaped)
# Hide Streamlit footer # Hide Streamlit footer
hide_streamlit_style = """ hide_streamlit_style = """

View File

@ -1,13 +1,13 @@
import os import os
import sys import sys
import atexit
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir)) sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import streamlit as st import streamlit as st
from streamlit_chat import message from streamlit_chat import message
from query_methods import query from query_methods import query, avail_query_methods
import pickle import pickle
import openai_rev
conversations_file = "conversations.pkl" conversations_file = "conversations.pkl"
@ -18,6 +18,9 @@ def load_conversations():
return pickle.load(f) return pickle.load(f)
except FileNotFoundError: except FileNotFoundError:
return [] return []
except EOFError:
return []
def save_conversations(conversations, current_conversation): def save_conversations(conversations, current_conversation):
updated = False updated = False
@ -28,8 +31,22 @@ def save_conversations(conversations, current_conversation):
break break
if not updated: if not updated:
conversations.append(current_conversation) conversations.append(current_conversation)
with open(conversations_file, "wb") as f:
temp_conversations_file = "temp_" + conversations_file
with open(temp_conversations_file, "wb") as f:
pickle.dump(conversations, f) pickle.dump(conversations, f)
os.replace(temp_conversations_file, conversations_file)
def exit_handler():
print("Exiting, saving data...")
# Perform cleanup operations here, like saving data or closing open files.
save_conversations(st.session_state.conversations, st.session_state.current_conversation)
# Register the exit_handler function to be called when the program is closing.
atexit.register(exit_handler)
st.header("Chat Placeholder") st.header("Chat Placeholder")
@ -74,7 +91,7 @@ if st.sidebar.button("New Conversation"):
st.session_state['query_method'] = st.sidebar.selectbox( st.session_state['query_method'] = st.sidebar.selectbox(
"Select API:", "Select API:",
options=openai_rev.Provider.__members__.keys(), options=avail_query_methods,
index=0 index=0
) )

View File

@ -1 +0,0 @@
from .openai_rev import Provider

View File

@ -1,15 +0,0 @@
### Example: `forefront` (use like openai pypi package) <a name="example-forefront"></a>
```python
from openai_rev import forefront
# create an account
token = forefront.Account.create(logging=True)
print(token)
# get a response
for response in forefront.StreamingCompletion.create(token=token,
prompt='hello world', model='gpt-4'):
print(response.completion.choices[0].text, end='')
```

View File

@ -1,189 +0,0 @@
from json import loads
from re import match
from time import time, sleep
from typing import Generator, Optional
from uuid import uuid4
from fake_useragent import UserAgent
from requests import post
from tls_client import Session
from .mail import Mail
from .models import ForeFrontResponse
class Account:
@staticmethod
def create(proxy: Optional[str] = None, logging: bool = False):
proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
start = time()
mail_client = Mail(proxies)
mail_token = None
mail_address = mail_client.get_mail()
# print(mail_address)
client = Session(client_identifier='chrome110')
client.proxies = proxies
client.headers = {
'origin': 'https://accounts.forefront.ai',
'user-agent': UserAgent().random,
}
response = client.post(
'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
data={'email_address': mail_address},
)
trace_token = response.json()['response']['id']
if logging:
print(trace_token)
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6',
data={
'strategy': 'email_code',
},
)
if logging:
print(response.text)
if 'sign_up_attempt' not in response.text:
return 'Failed to create account!'
while True:
sleep(1)
for _ in mail_client.fetch_inbox():
if logging:
print(mail_client.get_message_content(_['id']))
mail_token = match(r'(\d){5,6}', mail_client.get_message_content(_['id'])).group(0)
if mail_token:
break
if logging:
print(mail_token)
response = client.post(
f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
data={'code': mail_token, 'strategy': 'email_code'},
)
if logging:
print(response.json())
token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
with open('accounts.txt', 'a') as f:
f.write(f'{mail_address}:{token}\n')
if logging:
print(time() - start)
return token
class StreamingCompletion:
@staticmethod
def create(
token=None,
chat_id=None,
prompt='',
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
) -> Generator[ForeFrontResponse, None, None]:
if not token:
raise Exception('Token is required!')
if not chat_id:
chat_id = str(uuid4())
headers = {
'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
'accept': '*/*',
'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
'authorization': 'Bearer ' + token,
'cache-control': 'no-cache',
'content-type': 'application/json',
'origin': 'https://chat.forefront.ai',
'pragma': 'no-cache',
'referer': 'https://chat.forefront.ai/',
'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
'sec-ch-ua-mobile': '?0',
'sec-ch-ua-platform': '"macOS"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': UserAgent().random,
}
json_data = {
'text': prompt,
'action': action_type,
'parentId': chat_id,
'workspaceId': chat_id,
'messagePersona': default_persona,
'model': model,
}
for chunk in post(
'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
headers=headers,
json=json_data,
stream=True,
).iter_lines():
if b'finish_reason":null' in chunk:
data = loads(chunk.decode('utf-8').split('data: ')[1])
token = data['choices'][0]['delta'].get('content')
if token is not None:
yield ForeFrontResponse(
**{
'id': chat_id,
'object': 'text_completion',
'created': int(time()),
'text': token,
'model': model,
'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
'usage': {
'prompt_tokens': len(prompt),
'completion_tokens': len(token),
'total_tokens': len(prompt) + len(token),
},
}
)
class Completion:
@staticmethod
def create(
token=None,
chat_id=None,
prompt='',
action_type='new',
default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
model='gpt-4',
) -> ForeFrontResponse:
text = ''
final_response = None
for response in StreamingCompletion.create(
token=token,
chat_id=chat_id,
prompt=prompt,
action_type=action_type,
default_persona=default_persona,
model=model,
):
if response:
final_response = response
text += response.text
if final_response:
final_response.text = text
else:
raise Exception('Unable to get the response, Please try again')
return final_response

View File

@ -1,26 +0,0 @@
from typing import Any, List
from pydantic import BaseModel
class Choice(BaseModel):
text: str
index: int
logprobs: Any
finish_reason: str
class Usage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ForeFrontResponse(BaseModel):
id: str
object: str
created: int
model: str
choices: List[Choice]
usage: Usage
text: str

View File

@ -1,42 +0,0 @@
from enum import Enum
from openai_rev import forefront
from openai_rev import quora
from openai_rev import theb
from openai_rev import you
class Provider(Enum):
You = 'you'
Poe = 'poe'
ForeFront = 'fore_front'
Theb = 'theb'
class Completion:
@staticmethod
def create(provider: Provider, prompt: str, **kwargs):
if provider == Provider.Poe:
return Completion.__poe_service(prompt, **kwargs)
elif provider == Provider.You:
return Completion.__you_service(prompt, **kwargs)
elif provider == Provider.ForeFront:
return Completion.__fore_front_service(prompt, **kwargs)
elif provider == Provider.Theb:
return Completion.__theb_service(prompt, **kwargs)
@classmethod
def __you_service(cls, prompt: str, **kwargs) -> str:
return you.Completion.create(prompt, **kwargs).text
@classmethod
def __poe_service(cls, prompt: str, **kwargs) -> str:
return quora.Completion.create(prompt=prompt, **kwargs).text
@classmethod
def __fore_front_service(cls, prompt: str, **kwargs) -> str:
return forefront.Completion.create(prompt=prompt, **kwargs).text
@classmethod
def __theb_service(cls, prompt: str, **kwargs):
return ''.join(theb.Completion.create(prompt=prompt))

60
openaihosted/__init__.py Normal file
View File

@ -0,0 +1,60 @@
import json
import re
from fake_useragent import UserAgent
import requests
class Completion:
@staticmethod
def create(
systemprompt:str,
text:str,
assistantprompt:str
):
data = [
{"role": "system", "content": systemprompt},
{"role": "user", "content": "hi"},
{"role": "assistant", "content": assistantprompt},
{"role": "user", "content": text},
]
url = f'https://openai.a2hosted.com/chat?q={Completion.__get_query_param(data)}'
try:
response = requests.get(url, headers=Completion.__get_headers(), stream=True)
except:
return Completion.__get_failure_response()
sentence = ""
for message in response.iter_content(chunk_size=1024):
message = message.decode('utf-8')
msg_match, num_match = re.search(r'"msg":"([^"]+)"', message), re.search(r'\[DONE\] (\d+)', message)
if msg_match:
# Put the captured group into a sentence
sentence += msg_match.group(1)
return {
'response': sentence
}
@classmethod
def __get_headers(cls) -> dict:
return {
'authority': 'openai.a2hosted.com',
'accept': 'text/event-stream',
'accept-language': 'en-US,en;q=0.9,id;q=0.8,ja;q=0.7',
'cache-control': 'no-cache',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': UserAgent().random
}
@classmethod
def __get_failure_response(cls) -> dict:
return dict(response='Unable to fetch the response, Please try again.', links=[], extra={})
@classmethod
def __get_query_param(cls, conversation) -> str:
encoded_conversation = json.dumps(conversation)
return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")

10
openaihosted/readme.md Normal file
View File

@ -0,0 +1,10 @@
### Example: `openaihosted`) <a name="example-openaihosted"></a>
```python
# import library
import openaihosted
res = openaihosted.Completion.create(systemprompt="U are ChatGPT", text="What is 4+4", assistantprompt="U are a helpful assistant.")['response']
print(res) ## Responds with the answer
```

View File

@ -1,4 +1,5 @@
#### warning !!!
> ⚠ Warning !!!
poe.com added security and can detect if you are making automated requests. You may get your account banned if you are using this api. poe.com added security and can detect if you are making automated requests. You may get your account banned if you are using this api.
The normal non-driver api is also currently not very stable The normal non-driver api is also currently not very stable
@ -16,44 +17,44 @@ models = {
} }
``` ```
#### !! new: bot creation ### New: bot creation
```python ```python
# import quora (poe) package # import quora (poe) package
from openai_rev import quora import quora
# create account # create account
# make sure to set enable_bot_creation to True # make sure to set enable_bot_creation to True
token = quora.Account.create(logging=True, enable_bot_creation=True) token = quora.Account.create(logging = True, enable_bot_creation=True)
model = quora.Model.create( model = quora.Model.create(
token=token, token = token,
model='gpt-3.5-turbo', # or claude-instant-v1.0 model = 'gpt-3.5-turbo', # or claude-instant-v1.0
system_prompt='you are ChatGPT a large language model ...' system_prompt = 'you are ChatGPT a large language model ...'
) )
print(model.name) # gptx.... print(model.name) # gptx....
# streaming response # streaming response
for response in quora.StreamingCompletion.create( for response in quora.StreamingCompletion.create(
custom_model=model.name, custom_model = model.name,
prompt='hello world', prompt ='hello world',
token=token): token = token):
print(response.text)
print(response.completion.choices[0].text)
``` ```
#### Normal Response: ### Normal Response:
```python ```python
import quora
response = quora.Completion.create(model = 'gpt-4', response = quora.Completion.create(model = 'gpt-4',
prompt = 'hello world', prompt = 'hello world',
token = 'token') token = token)
print(response.text) print(response.completion.choices[0].text)
``` ```
#### Update Use This For Poe ### Update Use This For Poe
```python ```python
from quora import Poe from quora import Poe

View File

@ -6,12 +6,11 @@ from pathlib import Path
from random import choice, choices, randint from random import choice, choices, randint
from re import search, findall from re import search, findall
from string import ascii_letters, digits from string import ascii_letters, digits
from typing import Optional, Union, List, Any, Generator from typing import Optional, Union
from urllib.parse import unquote from urllib.parse import unquote
import selenium.webdriver.support.expected_conditions as EC import selenium.webdriver.support.expected_conditions as EC
from fake_useragent import UserAgent from fake_useragent import UserAgent
from pydantic import BaseModel
from pypasser import reCaptchaV3 from pypasser import reCaptchaV3
from requests import Session from requests import Session
from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
@ -19,8 +18,8 @@ from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait from selenium.webdriver.support.wait import WebDriverWait
from tls_client import Session as TLS from tls_client import Session as TLS
from .api import Client as PoeClient from quora.api import Client as PoeClient
from .mail import Emailnator from quora.mail import Emailnator
SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
@ -68,27 +67,42 @@ def extract_formkey(html):
return formkey return formkey
class Choice(BaseModel): class PoeResponse:
text: str class Completion:
index: int class Choices:
logprobs: Any def __init__(self, choice: dict) -> None:
finish_reason: str self.text = choice['text']
self.content = self.text.encode()
self.index = choice['index']
self.logprobs = choice['logprobs']
self.finish_reason = choice['finish_reason']
def __repr__(self) -> str:
return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
class Usage(BaseModel): def __init__(self, choices: dict) -> None:
prompt_tokens: int self.choices = [self.Choices(choice) for choice in choices]
completion_tokens: int
total_tokens: int
class Usage:
def __init__(self, usage_dict: dict) -> None:
self.prompt_tokens = usage_dict['prompt_tokens']
self.completion_tokens = usage_dict['completion_tokens']
self.total_tokens = usage_dict['total_tokens']
class PoeResponse(BaseModel): def __repr__(self):
id: int return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
object: str
created: int def __init__(self, response_dict: dict) -> None:
model: str self.response_dict = response_dict
choices: List[Choice] self.id = response_dict['id']
usage: Usage self.object = response_dict['object']
text: str self.created = response_dict['created']
self.model = response_dict['model']
self.completion = self.Completion(response_dict['choices'])
self.usage = self.Usage(response_dict['usage'])
def json(self) -> dict:
return self.response_dict
class ModelResponse: class ModelResponse:
@ -102,12 +116,17 @@ class ModelResponse:
class Model: class Model:
@staticmethod @staticmethod
def create( def create(
token: str, token: str,
model: str = 'gpt-3.5-turbo', # claude-instant model: str = 'gpt-3.5-turbo', # claude-instant
system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible', system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
description: str = 'gpt-3.5 language model from openai, skidded by poe.com', description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
handle: str = None, handle: str = None,
) -> ModelResponse: ) -> ModelResponse:
models = {
'gpt-3.5-turbo': 'chinchilla',
'claude-instant-v1.0': 'a2',
'gpt-4': 'beaver',
}
if not handle: if not handle:
handle = f'gptx{randint(1111111, 9999999)}' handle = f'gptx{randint(1111111, 9999999)}'
@ -143,7 +162,7 @@ class Model:
obj={ obj={
'queryName': 'CreateBotMain_poeBotCreate_Mutation', 'queryName': 'CreateBotMain_poeBotCreate_Mutation',
'variables': { 'variables': {
'model': MODELS[model], 'model': models[model],
'handle': handle, 'handle': handle,
'prompt': system_prompt, 'prompt': system_prompt,
'isPromptPublic': True, 'isPromptPublic': True,
@ -183,9 +202,9 @@ class Model:
class Account: class Account:
@staticmethod @staticmethod
def create( def create(
proxy: Optional[str] = None, proxy: Optional[str] = None,
logging: bool = False, logging: bool = False,
enable_bot_creation: bool = False, enable_bot_creation: bool = False,
): ):
client = TLS(client_identifier='chrome110') client = TLS(client_identifier='chrome110')
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
@ -290,23 +309,22 @@ class Account:
class StreamingCompletion: class StreamingCompletion:
@staticmethod @staticmethod
def create( def create(
model: str = 'gpt-4', model: str = 'gpt-4',
custom_model: bool = None, custom_model: bool = None,
prompt: str = 'hello world', prompt: str = 'hello world',
token: str = '', token: str = '',
) -> Generator[PoeResponse, None, None]: ):
_model = MODELS[model] if not custom_model else custom_model _model = MODELS[model] if not custom_model else custom_model
client = PoeClient(token) client = PoeClient(token)
for chunk in client.send_message(_model, prompt): for chunk in client.send_message(_model, prompt):
yield PoeResponse( yield PoeResponse(
**{ {
'id': chunk['messageId'], 'id': chunk['messageId'],
'object': 'text_completion', 'object': 'text_completion',
'created': chunk['creationTime'], 'created': chunk['creationTime'],
'model': _model, 'model': _model,
'text': chunk['text_new'],
'choices': [ 'choices': [
{ {
'text': chunk['text_new'], 'text': chunk['text_new'],
@ -325,28 +343,33 @@ class StreamingCompletion:
class Completion: class Completion:
@staticmethod
def create( def create(
model: str = 'gpt-4', model: str = 'gpt-4',
custom_model: str = None, custom_model: str = None,
prompt: str = 'hello world', prompt: str = 'hello world',
token: str = '', token: str = '',
) -> PoeResponse: ):
_model = MODELS[model] if not custom_model else custom_model models = {
'sage': 'capybara',
'gpt-4': 'beaver',
'claude-v1.2': 'a2_2',
'claude-instant-v1.0': 'a2',
'gpt-3.5-turbo': 'chinchilla',
}
_model = models[model] if not custom_model else custom_model
client = PoeClient(token) client = PoeClient(token)
chunk = None for chunk in client.send_message(_model, prompt):
for response in client.send_message(_model, prompt): pass
chunk = response
return PoeResponse( return PoeResponse(
**{ {
'id': chunk['messageId'], 'id': chunk['messageId'],
'object': 'text_completion', 'object': 'text_completion',
'created': chunk['creationTime'], 'created': chunk['creationTime'],
'model': _model, 'model': _model,
'text': chunk['text'],
'choices': [ 'choices': [
{ {
'text': chunk['text'], 'text': chunk['text'],
@ -366,12 +389,12 @@ class Completion:
class Poe: class Poe:
def __init__( def __init__(
self, self,
model: str = 'ChatGPT', model: str = 'ChatGPT',
driver: str = 'firefox', driver: str = 'firefox',
download_driver: bool = False, download_driver: bool = False,
driver_path: Optional[str] = None, driver_path: Optional[str] = None,
cookie_path: str = './quora/cookie.json', cookie_path: str = './quora/cookie.json',
): ):
# validating the model # validating the model
if model and model not in MODELS: if model and model not in MODELS:
@ -450,12 +473,12 @@ class Poe:
return response return response
def create_bot( def create_bot(
self, self,
name: str, name: str,
/, /,
prompt: str = '', prompt: str = '',
base_model: str = 'ChatGPT', base_model: str = 'ChatGPT',
description: str = '', description: str = '',
) -> None: ) -> None:
if base_model not in MODELS: if base_model not in MODELS:
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.') raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')

View File

@ -384,7 +384,7 @@ class Client:
continue continue
# update info about response # update info about response
message["text_new"] = message["text"][len(last_text) :] message["text_new"] = message["text"][len(last_text):]
last_text = message["text"] last_text = message["text"]
message_id = message["messageId"] message_id = message["messageId"]
@ -456,21 +456,21 @@ class Client:
logger.info(f"No more messages left to delete.") logger.info(f"No more messages left to delete.")
def create_bot( def create_bot(
self, self,
handle, handle,
prompt="", prompt="",
base_model="chinchilla", base_model="chinchilla",
description="", description="",
intro_message="", intro_message="",
api_key=None, api_key=None,
api_bot=False, api_bot=False,
api_url=None, api_url=None,
prompt_public=True, prompt_public=True,
pfp_url=None, pfp_url=None,
linkification=False, linkification=False,
markdown_rendering=True, markdown_rendering=True,
suggested_replies=False, suggested_replies=False,
private=False, private=False,
): ):
result = self.send_query( result = self.send_query(
"PoeBotCreateMutation", "PoeBotCreateMutation",
@ -499,21 +499,21 @@ class Client:
return data return data
def edit_bot( def edit_bot(
self, self,
bot_id, bot_id,
handle, handle,
prompt="", prompt="",
base_model="chinchilla", base_model="chinchilla",
description="", description="",
intro_message="", intro_message="",
api_key=None, api_key=None,
api_url=None, api_url=None,
private=False, private=False,
prompt_public=True, prompt_public=True,
pfp_url=None, pfp_url=None,
linkification=False, linkification=False,
markdown_rendering=True, markdown_rendering=True,
suggested_replies=False, suggested_replies=False,
): ):
result = self.send_query( result = self.send_query(
"PoeBotEditMutation", "PoeBotEditMutation",

View File

@ -42,7 +42,9 @@ class Emailnator:
while True: while True:
sleep(2) sleep(2)
mail_token = self.client.post("https://www.emailnator.com/message-list", json={"email": self.email}) mail_token = self.client.post(
"https://www.emailnator.com/message-list", json={"email": self.email}
)
mail_token = loads(mail_token.text)["messageData"] mail_token = loads(mail_token.text)["messageData"]

View File

@ -1,4 +1,5 @@
from openai_rev import theb import theb
for token in theb.Completion.create('hello world'): for token in theb.Completion.create('hello world'):
print(token, end='', flush=True) print(token, end='', flush=True)
print('asdsos')

View File

@ -1,10 +1,12 @@
### Example: `theb` (use like openai pypi package) <a name="example-theb"></a> ### Example: `theb` (use like openai pypi package) <a name="example-theb"></a>
```python ```python
# import library # import library
from openai_rev import theb import theb
# simple streaming completion # simple streaming completion
for token in theb.Completion.create('hello world'): for token in theb.Completion.create('hello world'):
print(token, end='', flush=True) print(token, end='', flush=True)
``` print("")
```

View File

@ -1,11 +1,9 @@
from re import findall
from json import loads from json import loads
from queue import Queue, Empty from queue import Queue, Empty
from re import findall
from threading import Thread from threading import Thread
from curl_cffi import requests from curl_cffi import requests
class Completion: class Completion:
# experimental # experimental
part1 = '{"role":"assistant","id":"chatcmpl' part1 = '{"role":"assistant","id":"chatcmpl'
@ -16,8 +14,7 @@ class Completion:
message_queue = Queue() message_queue = Queue()
stream_completed = False stream_completed = False
@classmethod def request(prompt: str):
def request(cls, prompt: str):
headers = { headers = {
'authority': 'chatbot.theb.ai', 'authority': 'chatbot.theb.ai',
'content-type': 'application/json', 'content-type': 'application/json',
@ -25,11 +22,12 @@ class Completion:
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
} }
requests.post( requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers,
'https://chatbot.theb.ai/api/chat-process', content_callback = Completion.handle_stream_response,
headers=headers, json = {
content_callback=Completion.handle_stream_response, 'prompt': prompt,
json={'prompt': prompt, 'options': {}}, 'options': {}
}
) )
Completion.stream_completed = True Completion.stream_completed = True
@ -38,7 +36,7 @@ class Completion:
def create(prompt: str): def create(prompt: str):
Thread(target=Completion.request, args=[prompt]).start() Thread(target=Completion.request, args=[prompt]).start()
while not Completion.stream_completed or not Completion.message_queue.empty(): while Completion.stream_completed != True or not Completion.message_queue.empty():
try: try:
message = Completion.message_queue.get(timeout=0.01) message = Completion.message_queue.get(timeout=0.01)
for message in findall(Completion.regex, message): for message in findall(Completion.regex, message):

4
theb/theb_test.py Normal file
View File

@ -0,0 +1,4 @@
import theb
for token in theb.Completion.create('hello world'):
print(token, end='', flush=True)

View File

@ -1,7 +0,0 @@
import cocalc
response = cocalc.Completion.create(
prompt='hello world'
)
print(response)

View File

@ -1,3 +0,0 @@
writegpt.ai
to do:
- code ref

View File

@ -1,41 +0,0 @@
import json
import re
import requests
headers = {
'authority': 'openai.a2hosted.com',
'accept': 'text/event-stream',
'accept-language': 'en-US,en;q=0.9,id;q=0.8,ja;q=0.7',
'cache-control': 'no-cache',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'cross-site',
'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.0.0',
}
def create_query_param(conversation):
encoded_conversation = json.dumps(conversation)
return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
user_input = input("Enter your message: ")
data = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "hi"},
{"role": "assistant", "content": "Hello! How can I assist you today?"},
{"role": "user", "content": user_input},
]
query_param = create_query_param(data)
url = f'https://openai.a2hosted.com/chat?q={query_param}'
response = requests.get(url, headers=headers, stream=True)
for message in response.iter_content(chunk_size=1024):
message = message.decode('utf-8')
msg_match, num_match = re.search(r'"msg":"(.*?)"', message), re.search(r'\[DONE\] (\d+)', message)
if msg_match: print(msg_match.group(1))
if num_match: print(num_match.group(1))

View File

@ -1,8 +1,7 @@
### Example: `you` (use like openai pypi package) <a name="example-you"></a> ### Example: `you` (use like openai pypi package) <a name="example-you"></a>
```python ```python
import you
from openai_rev import you
# simple request with links and details # simple request with links and details
response = you.Completion.create( response = you.Completion.create(
@ -26,7 +25,8 @@ chat = []
while True: while True:
prompt = input("You: ") prompt = input("You: ")
if prompt == 'q':
break
response = you.Completion.create( response = you.Completion.create(
prompt=prompt, prompt=prompt,
chat=chat) chat=chat)
@ -34,4 +34,4 @@ while True:
print("Bot:", response["response"]) print("Bot:", response["response"])
chat.append({"question": prompt, "answer": response["response"]}) chat.append({"question": prompt, "answer": response["response"]})
``` ```

View File

@ -1,36 +1,28 @@
import json
import re import re
from typing import Optional, List, Dict, Any from json import loads
from uuid import uuid4 from uuid import uuid4
from fake_useragent import UserAgent from fake_useragent import UserAgent
from pydantic import BaseModel
from tls_client import Session from tls_client import Session
class PoeResponse(BaseModel):
text: Optional[str] = None
links: List[str] = []
extra: Dict[str, Any] = {}
class Completion: class Completion:
@staticmethod @staticmethod
def create( def create(
prompt: str, prompt: str,
page: int = 1, page: int = 1,
count: int = 10, count: int = 10,
safe_search: str = 'Moderate', safe_search: str = 'Moderate',
on_shopping_page: bool = False, on_shopping_page: bool = False,
mkt: str = '', mkt: str = '',
response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches', response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
domain: str = 'youchat', domain: str = 'youchat',
query_trace_id: str = None, query_trace_id: str = None,
chat: list = None, chat: list = None,
include_links: bool = False, include_links: bool = False,
detailed: bool = False, detailed: bool = False,
debug: bool = False, debug: bool = False,
) -> PoeResponse: ) -> dict:
if chat is None: if chat is None:
chat = [] chat = []
@ -65,25 +57,23 @@ class Completion:
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
).group() ).group()
third_party_search_results = re.search( third_party_search_results = re.search(
r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text).group()
).group()
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0] # slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text)) text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
extra = { extra = {
'youChatSerpResults': json.loads(you_chat_serp_results), 'youChatSerpResults': loads(you_chat_serp_results),
# 'slots' : loads(slots) # 'slots' : loads(slots)
} }
response = PoeResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"')) return {
if include_links: 'response': text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'),
response.links = json.loads(third_party_search_results)['search']['third_party_search_results'] 'links': loads(third_party_search_results)['search']['third_party_search_results']
if include_links
if detailed: else None,
response.extra = extra 'extra': extra if detailed else None,
}
return response
@classmethod @classmethod
def __get_headers(cls) -> dict: def __get_headers(cls) -> dict:
@ -104,5 +94,5 @@ class Completion:
} }
@classmethod @classmethod
def __get_failure_response(cls) -> PoeResponse: def __get_failure_response(cls) -> dict:
return PoeResponse(text='Unable to fetch the response, Please try again.') return dict(response='Unable to fetch the response, Please try again.', links=[], extra={})