diff --git a/README.md b/README.md
index 7946cfca..6d6c398c 100644
--- a/README.md
+++ b/README.md
@@ -80,14 +80,14 @@ Please note the following:
- [`/you`](./you/README.md)
## Install
-download or clone this GitHub repo
+Download or clone this GitHub repo
install requirements with:
```sh
pip3 install -r requirements.txt
```
## To start gpt4free GUI
-move `streamlit_app.py` from `./gui` to the base folder
+Move `streamlit_app.py` from `./gui` to the base folder
then run:
`streamlit run streamlit_app.py` or `python3 -m streamlit run streamlit_app.py`
diff --git a/unfinished/cocalc/__init__.py b/cocalc/__init__.py
similarity index 52%
rename from unfinished/cocalc/__init__.py
rename to cocalc/__init__.py
index 2b73fc9e..5a32274c 100644
--- a/unfinished/cocalc/__init__.py
+++ b/cocalc/__init__.py
@@ -1,47 +1,57 @@
-import requests
-
-
-class Completion:
- def create(self, prompt="What is the square root of pi",
- system_prompt=("ASSUME I HAVE FULL ACCESS TO COCALC. ENCLOSE MATH IN $. "
- "INCLUDE THE LANGUAGE DIRECTLY AFTER THE TRIPLE BACKTICKS "
- "IN ALL MARKDOWN CODE BLOCKS. How can I do the following using CoCalc?")) -> str:
- # Initialize a session with custom headers
- session = self._initialize_session()
-
- # Set the data that will be submitted
- payload = self._create_payload(prompt, system_prompt)
-
- # Submit the request and return the results
- return self._submit_request(session, payload)
-
- def _initialize_session(self) -> requests.Session:
- """Initialize a session with custom headers for the request."""
-
- session = requests.Session()
- headers = {
- 'Accept': '*/*',
- 'Accept-Language': 'en-US,en;q=0.5',
- 'Origin': 'https://cocalc.com',
- 'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
- 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
- }
- session.headers.update(headers)
-
- return session
-
- def _create_payload(self, prompt: str, system_prompt: str) -> dict:
- """Create the payload with the given prompts."""
-
- return {
- "input": prompt,
- "system": system_prompt,
- "tag": "next:index"
- }
-
- def _submit_request(self, session: requests.Session, payload: dict) -> str:
- """Submit the request to the API and return the response."""
-
- response = session.post(
- "https://cocalc.com/api/v2/openai/chatgpt", json=payload).json()
- return response
+import requests
+
+class Completion:
+ @staticmethod
+ def create(prompt:str, cookieInput:str) -> str:
+ # Initialize a session with custom headers
+ session = Completion._initialize_session(cookieInput)
+
+ # Set the data that will be submitted
+ payload = Completion._create_payload(prompt, ("ASSUME I HAVE FULL ACCESS TO COCALC. "))
+
+ # Submit the request and return the results
+ return Completion._submit_request(session, payload)
+
+ @classmethod
+ def _initialize_session(cls, conversationCookie) -> requests.Session:
+ """Initialize a session with custom headers for the request."""
+
+ session = requests.Session()
+ headers = {
+ 'Accept': '*/*',
+ 'Accept-Language': 'en-US,en;q=0.5',
+ 'Origin': 'https://cocalc.com',
+ 'Referer': 'https://cocalc.com/api/v2/openai/chatgpt',
+ 'Cookie': conversationCookie,
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36',
+ }
+ session.headers.update(headers)
+
+ return session
+
+ @classmethod
+ def _create_payload(
+ cls,
+ prompt: str,
+ system_prompt: str
+ ) -> dict:
+
+ return {
+ "input": prompt,
+ "system": system_prompt,
+ "tag": "next:index"
+ }
+
+ @classmethod
+ def _submit_request(
+ cls,
+ session: requests.Session,
+ payload: dict
+ ) -> str:
+
+ response = session.post(
+ "https://cocalc.com/api/v2/openai/chatgpt", json=payload).json()
+ return {
+ "response":response["output"],
+ "success":response["success"]
+ }
\ No newline at end of file
diff --git a/cocalc/readme.md b/cocalc/readme.md
new file mode 100644
index 00000000..04095339
--- /dev/null
+++ b/cocalc/readme.md
@@ -0,0 +1,20 @@
+### Example: `cocalc`
+
+
+```python
+# import library
+import cocalc
+
+cocalc.Completion.create(prompt="How are you!", cookieInput="cookieinput") ## Tutorial
+```
+
+### How to grab cookie input
+```js
+// input this into ur developer tools console and the exact response u get from this u put into ur cookieInput!
+var cookies = document.cookie.split("; ");
+var cookieString = "";
+for (var i = 0; i < cookies.length; i++) {
+ cookieString += cookies[i] + "; ";
+}
+console.log(cookieString);
+```
diff --git a/forefront/README.md b/forefront/README.md
new file mode 100644
index 00000000..a2be8187
--- /dev/null
+++ b/forefront/README.md
@@ -0,0 +1,16 @@
+### Example: `forefront` (use like openai pypi package)
+
+```python
+import forefront
+
+# create an account
+token = forefront.Account.create(logging=False)
+print(token)
+
+# get a response
+for response in forefront.StreamingCompletion.create(token = token,
+ prompt = 'hello world', model='gpt-4'):
+
+ print(response.completion.choices[0].text, end = '')
+print("")
+```
diff --git a/forefront/__init__.py b/forefront/__init__.py
new file mode 100644
index 00000000..c8eca7c8
--- /dev/null
+++ b/forefront/__init__.py
@@ -0,0 +1,154 @@
+from json import loads
+from re import match
+from time import time, sleep
+from uuid import uuid4
+
+from requests import post
+from tls_client import Session
+
+from forefront.mail import Mail
+from forefront.typing import ForeFrontResponse
+
+
+class Account:
+ @staticmethod
+ def create(proxy=None, logging=False):
+
+ proxies = {
+ 'http': 'http://' + proxy,
+ 'https': 'http://' + proxy} if proxy else False
+
+ start = time()
+
+ mail = Mail(proxies)
+ mail_token = None
+ mail_adress = mail.get_mail()
+
+ # print(mail_adress)
+
+ client = Session(client_identifier='chrome110')
+ client.proxies = proxies
+ client.headers = {
+ "origin": "https://accounts.forefront.ai",
+ "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
+ }
+
+ response = client.post('https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
+ data={
+ "email_address": mail_adress
+ }
+ )
+ try:
+ trace_token = response.json()['response']['id']
+ if logging: print(trace_token)
+ except KeyError:
+ return 'Failed to create account!'
+
+ response = client.post(
+ f"https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6",
+ data={
+ "strategy": "email_code",
+ }
+ )
+
+ if logging: print(response.text)
+
+ if not 'sign_up_attempt' in response.text:
+ return 'Failed to create account!'
+
+ while True:
+ sleep(1)
+ for _ in mail.fetch_inbox():
+ print(mail.get_message_content(_["id"]))
+ mail_token = match(r"(\d){5,6}", mail.get_message_content(_["id"])).group(0)
+
+ if mail_token:
+ break
+
+ if logging: print(mail_token)
+
+ response = client.post(
+ f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
+ data={
+ 'code': mail_token,
+ 'strategy': 'email_code'
+ })
+
+ if logging: print(response.json())
+
+ token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
+
+ with open('accounts.txt', 'a') as f:
+ f.write(f'{mail_adress}:{token}\n')
+
+ if logging: print(time() - start)
+
+ return token
+
+
+class StreamingCompletion:
+ @staticmethod
+ def create(
+ token=None,
+ chatId=None,
+ prompt='',
+ actionType='new',
+ defaultPersona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
+ model='gpt-4') -> ForeFrontResponse:
+
+ if not token: raise Exception('Token is required!')
+ if not chatId: chatId = str(uuid4())
+
+ headers = {
+ 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'authorization': 'Bearer ' + token,
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json',
+ 'origin': 'https://chat.forefront.ai',
+ 'pragma': 'no-cache',
+ 'referer': 'https://chat.forefront.ai/',
+ 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
+ }
+
+ json_data = {
+ 'text': prompt,
+ 'action': actionType,
+ 'parentId': chatId,
+ 'workspaceId': chatId,
+ 'messagePersona': defaultPersona,
+ 'model': model
+ }
+
+ for chunk in post('https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
+ headers=headers, json=json_data, stream=True).iter_lines():
+
+ if b'finish_reason":null' in chunk:
+ data = loads(chunk.decode('utf-8').split('data: ')[1])
+ token = data['choices'][0]['delta'].get('content')
+
+ if token != None:
+ yield ForeFrontResponse({
+ 'id': chatId,
+ 'object': 'text_completion',
+ 'created': int(time()),
+ 'model': model,
+ 'choices': [{
+ 'text': token,
+ 'index': 0,
+ 'logprobs': None,
+ 'finish_reason': 'stop'
+ }],
+ 'usage': {
+ 'prompt_tokens': len(prompt),
+ 'completion_tokens': len(token),
+ 'total_tokens': len(prompt) + len(token)
+ }
+ })
diff --git a/openai_rev/forefront/mail.py b/forefront/mail.py
similarity index 85%
rename from openai_rev/forefront/mail.py
rename to forefront/mail.py
index 2c00051c..41c2a647 100644
--- a/openai_rev/forefront/mail.py
+++ b/forefront/mail.py
@@ -23,17 +23,21 @@ class Mail:
"sec-fetch-dest": "empty",
"referer": "https://mail.tm/",
"accept-encoding": "gzip, deflate, br",
- "accept-language": "en-GB,en-US;q=0.9,en;q=0.8",
+ "accept-language": "en-GB,en-US;q=0.9,en;q=0.8"
}
def get_mail(self) -> str:
token = ''.join(choices(ascii_letters, k=14)).lower()
- init = self.client.post(
- "https://api.mail.tm/accounts", json={"address": f"{token}@bugfoo.com", "password": token}
- )
+ init = self.client.post("https://api.mail.tm/accounts", json={
+ "address": f"{token}@bugfoo.com",
+ "password": token
+ })
if init.status_code == 201:
- resp = self.client.post("https://api.mail.tm/token", json={**init.json(), "password": token})
+ resp = self.client.post("https://api.mail.tm/token", json={
+ **init.json(),
+ "password": token
+ })
self.client.headers['authorization'] = 'Bearer ' + resp.json()['token']
diff --git a/forefront/typing.py b/forefront/typing.py
new file mode 100644
index 00000000..a11ac49f
--- /dev/null
+++ b/forefront/typing.py
@@ -0,0 +1,36 @@
+class ForeFrontResponse:
+ class Completion:
+ class Choices:
+ def __init__(self, choice: dict) -> None:
+ self.text = choice['text']
+ self.content = self.text.encode()
+ self.index = choice['index']
+ self.logprobs = choice['logprobs']
+ self.finish_reason = choice['finish_reason']
+
+ def __repr__(self) -> str:
+ return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
+
+ def __init__(self, choices: dict) -> None:
+ self.choices = [self.Choices(choice) for choice in choices]
+
+ class Usage:
+ def __init__(self, usage_dict: dict) -> None:
+ self.prompt_tokens = usage_dict['prompt_tokens']
+ self.completion_tokens = usage_dict['completion_tokens']
+ self.total_tokens = usage_dict['total_tokens']
+
+ def __repr__(self):
+ return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
+
+ def __init__(self, response_dict: dict) -> None:
+ self.response_dict = response_dict
+ self.id = response_dict['id']
+ self.object = response_dict['object']
+ self.created = response_dict['created']
+ self.model = response_dict['model']
+ self.completion = self.Completion(response_dict['choices'])
+ self.usage = self.Usage(response_dict['usage'])
+
+ def json(self) -> dict:
+ return self.response_dict
diff --git a/gui/query_methods.py b/gui/query_methods.py
index 49946900..1a4a3402 100644
--- a/gui/query_methods.py
+++ b/gui/query_methods.py
@@ -1,17 +1,24 @@
-import openai_rev
-from openai_rev import forefront, quora, theb, you
+import os
+import sys
+
+sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
+
+import forefront, quora, theb, you
import random
+
def query_forefront(question: str) -> str:
# create an account
- token = forefront.Account.create(logging=True)
-
+ token = forefront.Account.create(logging=False)
+
+ response = ""
# get a response
try:
- result = forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4')
+ for i in forefront.StreamingCompletion.create(token = token, prompt = 'hello world', model='gpt-4'):
+ response += i.completion.choices[0].text
- return result['response']
+ return response
except Exception as e:
# Return error message if an exception occurs
@@ -31,11 +38,11 @@ def query_quora(question: str) -> str:
def query_theb(question: str) -> str:
# Set cloudflare clearance cookie and get answer from GPT-4 model
+ response = ""
try:
result = theb.Completion.create(
prompt = question)
-
- return result['response']
+ return result
except Exception as e:
# Return error message if an exception occurs
@@ -47,16 +54,14 @@ def query_you(question: str) -> str:
try:
result = you.Completion.create(
prompt = question)
- return result.text
+ return result["response"]
except Exception as e:
# Return error message if an exception occurs
return f'An error occurred: {e}. Please make sure you are using a valid cloudflare clearance token and user agent.'
-
-def query(user_input: str, selected_method: str = "Random") -> str:
- # Define a dictionary containing all query methods
- avail_query_methods = {
+# Define a dictionary containing all query methods
+avail_query_methods = {
"Forefront": query_forefront,
"Poe": query_quora,
"Theb": query_theb,
@@ -67,6 +72,8 @@ def query(user_input: str, selected_method: str = "Random") -> str:
# "Ora": query_ora,
}
+def query(user_input: str, selected_method: str = "Random") -> str:
+
# If a specific query method is selected (not "Random") and the method is in the dictionary, try to call it
if selected_method != "Random" and selected_method in avail_query_methods:
try:
diff --git a/gui/streamlit_app.py b/gui/streamlit_app.py
index 2bbf86e0..d1975bbd 100644
--- a/gui/streamlit_app.py
+++ b/gui/streamlit_app.py
@@ -38,9 +38,10 @@ st.header('GPT4free GUI')
question_text_area = st.text_area('🤖 Ask Any Question :', placeholder='Explain quantum computing in 50 words')
if st.button('🧠Think'):
answer = get_answer(question_text_area)
+ escaped = answer.encode('utf-8').decode('unicode-escape')
# Display answer
st.caption("Answer :")
- st.markdown(answer)
+ st.markdown(escaped)
# Hide Streamlit footer
hide_streamlit_style = """
diff --git a/gui/streamlit_chat_app.py b/gui/streamlit_chat_app.py
index aee3563e..dce8ef29 100644
--- a/gui/streamlit_chat_app.py
+++ b/gui/streamlit_chat_app.py
@@ -1,13 +1,13 @@
import os
import sys
+import atexit
sys.path.append(os.path.join(os.path.dirname(__file__), os.path.pardir))
import streamlit as st
from streamlit_chat import message
-from query_methods import query
+from query_methods import query, avail_query_methods
import pickle
-import openai_rev
conversations_file = "conversations.pkl"
@@ -18,6 +18,9 @@ def load_conversations():
return pickle.load(f)
except FileNotFoundError:
return []
+ except EOFError:
+ return []
+
def save_conversations(conversations, current_conversation):
updated = False
@@ -28,8 +31,22 @@ def save_conversations(conversations, current_conversation):
break
if not updated:
conversations.append(current_conversation)
- with open(conversations_file, "wb") as f:
+
+ temp_conversations_file = "temp_" + conversations_file
+ with open(temp_conversations_file, "wb") as f:
pickle.dump(conversations, f)
+
+ os.replace(temp_conversations_file, conversations_file)
+
+
+def exit_handler():
+ print("Exiting, saving data...")
+ # Perform cleanup operations here, like saving data or closing open files.
+ save_conversations(st.session_state.conversations, st.session_state.current_conversation)
+
+# Register the exit_handler function to be called when the program is closing.
+atexit.register(exit_handler)
+
st.header("Chat Placeholder")
@@ -74,7 +91,7 @@ if st.sidebar.button("New Conversation"):
st.session_state['query_method'] = st.sidebar.selectbox(
"Select API:",
- options=openai_rev.Provider.__members__.keys(),
+ options=avail_query_methods,
index=0
)
diff --git a/openai_rev/__init__.py b/openai_rev/__init__.py
deleted file mode 100644
index 9076abe9..00000000
--- a/openai_rev/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-from .openai_rev import Provider
diff --git a/openai_rev/forefront/README.md b/openai_rev/forefront/README.md
deleted file mode 100644
index 94089faa..00000000
--- a/openai_rev/forefront/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-### Example: `forefront` (use like openai pypi package)
-
-```python
-
-from openai_rev import forefront
-
-# create an account
-token = forefront.Account.create(logging=True)
-print(token)
-
-# get a response
-for response in forefront.StreamingCompletion.create(token=token,
- prompt='hello world', model='gpt-4'):
- print(response.completion.choices[0].text, end='')
-```
\ No newline at end of file
diff --git a/openai_rev/forefront/__init__.py b/openai_rev/forefront/__init__.py
deleted file mode 100644
index 6fece65a..00000000
--- a/openai_rev/forefront/__init__.py
+++ /dev/null
@@ -1,189 +0,0 @@
-from json import loads
-from re import match
-from time import time, sleep
-from typing import Generator, Optional
-from uuid import uuid4
-
-from fake_useragent import UserAgent
-from requests import post
-from tls_client import Session
-
-from .mail import Mail
-from .models import ForeFrontResponse
-
-
-class Account:
- @staticmethod
- def create(proxy: Optional[str] = None, logging: bool = False):
- proxies = {'http': 'http://' + proxy, 'https': 'http://' + proxy} if proxy else False
-
- start = time()
-
- mail_client = Mail(proxies)
- mail_token = None
- mail_address = mail_client.get_mail()
-
- # print(mail_address)
-
- client = Session(client_identifier='chrome110')
- client.proxies = proxies
- client.headers = {
- 'origin': 'https://accounts.forefront.ai',
- 'user-agent': UserAgent().random,
- }
-
- response = client.post(
- 'https://clerk.forefront.ai/v1/client/sign_ups?_clerk_js_version=4.32.6',
- data={'email_address': mail_address},
- )
-
- trace_token = response.json()['response']['id']
- if logging:
- print(trace_token)
-
- response = client.post(
- f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/prepare_verification?_clerk_js_version=4.32.6',
- data={
- 'strategy': 'email_code',
- },
- )
-
- if logging:
- print(response.text)
-
- if 'sign_up_attempt' not in response.text:
- return 'Failed to create account!'
-
- while True:
- sleep(1)
- for _ in mail_client.fetch_inbox():
- if logging:
- print(mail_client.get_message_content(_['id']))
- mail_token = match(r'(\d){5,6}', mail_client.get_message_content(_['id'])).group(0)
-
- if mail_token:
- break
-
- if logging:
- print(mail_token)
-
- response = client.post(
- f'https://clerk.forefront.ai/v1/client/sign_ups/{trace_token}/attempt_verification?_clerk_js_version=4.38.4',
- data={'code': mail_token, 'strategy': 'email_code'},
- )
-
- if logging:
- print(response.json())
-
- token = response.json()['client']['sessions'][0]['last_active_token']['jwt']
-
- with open('accounts.txt', 'a') as f:
- f.write(f'{mail_address}:{token}\n')
-
- if logging:
- print(time() - start)
-
- return token
-
-
-class StreamingCompletion:
- @staticmethod
- def create(
- token=None,
- chat_id=None,
- prompt='',
- action_type='new',
- default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
- model='gpt-4',
- ) -> Generator[ForeFrontResponse, None, None]:
- if not token:
- raise Exception('Token is required!')
- if not chat_id:
- chat_id = str(uuid4())
-
- headers = {
- 'authority': 'chat-server.tenant-forefront-default.knative.chi.coreweave.com',
- 'accept': '*/*',
- 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
- 'authorization': 'Bearer ' + token,
- 'cache-control': 'no-cache',
- 'content-type': 'application/json',
- 'origin': 'https://chat.forefront.ai',
- 'pragma': 'no-cache',
- 'referer': 'https://chat.forefront.ai/',
- 'sec-ch-ua': '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"',
- 'sec-ch-ua-mobile': '?0',
- 'sec-ch-ua-platform': '"macOS"',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': UserAgent().random,
- }
-
- json_data = {
- 'text': prompt,
- 'action': action_type,
- 'parentId': chat_id,
- 'workspaceId': chat_id,
- 'messagePersona': default_persona,
- 'model': model,
- }
-
- for chunk in post(
- 'https://chat-server.tenant-forefront-default.knative.chi.coreweave.com/chat',
- headers=headers,
- json=json_data,
- stream=True,
- ).iter_lines():
- if b'finish_reason":null' in chunk:
- data = loads(chunk.decode('utf-8').split('data: ')[1])
- token = data['choices'][0]['delta'].get('content')
-
- if token is not None:
- yield ForeFrontResponse(
- **{
- 'id': chat_id,
- 'object': 'text_completion',
- 'created': int(time()),
- 'text': token,
- 'model': model,
- 'choices': [{'text': token, 'index': 0, 'logprobs': None, 'finish_reason': 'stop'}],
- 'usage': {
- 'prompt_tokens': len(prompt),
- 'completion_tokens': len(token),
- 'total_tokens': len(prompt) + len(token),
- },
- }
- )
-
-
-class Completion:
- @staticmethod
- def create(
- token=None,
- chat_id=None,
- prompt='',
- action_type='new',
- default_persona='607e41fe-95be-497e-8e97-010a59b2e2c0', # default
- model='gpt-4',
- ) -> ForeFrontResponse:
- text = ''
- final_response = None
- for response in StreamingCompletion.create(
- token=token,
- chat_id=chat_id,
- prompt=prompt,
- action_type=action_type,
- default_persona=default_persona,
- model=model,
- ):
- if response:
- final_response = response
- text += response.text
-
- if final_response:
- final_response.text = text
- else:
- raise Exception('Unable to get the response, Please try again')
-
- return final_response
diff --git a/openai_rev/forefront/models.py b/openai_rev/forefront/models.py
deleted file mode 100644
index 23e90903..00000000
--- a/openai_rev/forefront/models.py
+++ /dev/null
@@ -1,26 +0,0 @@
-from typing import Any, List
-
-from pydantic import BaseModel
-
-
-class Choice(BaseModel):
- text: str
- index: int
- logprobs: Any
- finish_reason: str
-
-
-class Usage(BaseModel):
- prompt_tokens: int
- completion_tokens: int
- total_tokens: int
-
-
-class ForeFrontResponse(BaseModel):
- id: str
- object: str
- created: int
- model: str
- choices: List[Choice]
- usage: Usage
- text: str
diff --git a/openai_rev/openai_rev.py b/openai_rev/openai_rev.py
deleted file mode 100644
index 6b88298b..00000000
--- a/openai_rev/openai_rev.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from enum import Enum
-
-from openai_rev import forefront
-from openai_rev import quora
-from openai_rev import theb
-from openai_rev import you
-
-
-class Provider(Enum):
- You = 'you'
- Poe = 'poe'
- ForeFront = 'fore_front'
- Theb = 'theb'
-
-
-class Completion:
- @staticmethod
- def create(provider: Provider, prompt: str, **kwargs):
- if provider == Provider.Poe:
- return Completion.__poe_service(prompt, **kwargs)
- elif provider == Provider.You:
- return Completion.__you_service(prompt, **kwargs)
- elif provider == Provider.ForeFront:
- return Completion.__fore_front_service(prompt, **kwargs)
- elif provider == Provider.Theb:
- return Completion.__theb_service(prompt, **kwargs)
-
- @classmethod
- def __you_service(cls, prompt: str, **kwargs) -> str:
- return you.Completion.create(prompt, **kwargs).text
-
- @classmethod
- def __poe_service(cls, prompt: str, **kwargs) -> str:
- return quora.Completion.create(prompt=prompt, **kwargs).text
-
- @classmethod
- def __fore_front_service(cls, prompt: str, **kwargs) -> str:
- return forefront.Completion.create(prompt=prompt, **kwargs).text
-
- @classmethod
- def __theb_service(cls, prompt: str, **kwargs):
- return ''.join(theb.Completion.create(prompt=prompt))
diff --git a/openaihosted/__init__.py b/openaihosted/__init__.py
new file mode 100644
index 00000000..c773b3f9
--- /dev/null
+++ b/openaihosted/__init__.py
@@ -0,0 +1,60 @@
+import json
+import re
+from fake_useragent import UserAgent
+
+import requests
+
+class Completion:
+ @staticmethod
+ def create(
+ systemprompt:str,
+ text:str,
+ assistantprompt:str
+ ):
+
+ data = [
+ {"role": "system", "content": systemprompt},
+ {"role": "user", "content": "hi"},
+ {"role": "assistant", "content": assistantprompt},
+ {"role": "user", "content": text},
+ ]
+ url = f'https://openai.a2hosted.com/chat?q={Completion.__get_query_param(data)}'
+
+ try:
+ response = requests.get(url, headers=Completion.__get_headers(), stream=True)
+ except:
+ return Completion.__get_failure_response()
+
+ sentence = ""
+
+ for message in response.iter_content(chunk_size=1024):
+ message = message.decode('utf-8')
+ msg_match, num_match = re.search(r'"msg":"([^"]+)"', message), re.search(r'\[DONE\] (\d+)', message)
+ if msg_match:
+ # Put the captured group into a sentence
+ sentence += msg_match.group(1)
+ return {
+ 'response': sentence
+ }
+
+ @classmethod
+ def __get_headers(cls) -> dict:
+ return {
+ 'authority': 'openai.a2hosted.com',
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en-US,en;q=0.9,id;q=0.8,ja;q=0.7',
+ 'cache-control': 'no-cache',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'cross-site',
+ 'user-agent': UserAgent().random
+ }
+
+ @classmethod
+ def __get_failure_response(cls) -> dict:
+ return dict(response='Unable to fetch the response, Please try again.', links=[], extra={})
+
+ @classmethod
+ def __get_query_param(cls, conversation) -> str:
+ encoded_conversation = json.dumps(conversation)
+ return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
\ No newline at end of file
diff --git a/openaihosted/readme.md b/openaihosted/readme.md
new file mode 100644
index 00000000..acd60bab
--- /dev/null
+++ b/openaihosted/readme.md
@@ -0,0 +1,10 @@
+### Example: `openaihosted`)
+
+
+```python
+# import library
+import openaihosted
+
+res = openaihosted.Completion.create(systemprompt="U are ChatGPT", text="What is 4+4", assistantprompt="U are a helpful assistant.")['response']
+print(res) ## Responds with the answer
+```
diff --git a/openai_rev/quora/README.md b/quora/README.md
similarity index 70%
rename from openai_rev/quora/README.md
rename to quora/README.md
index dc2bb32d..24679277 100644
--- a/openai_rev/quora/README.md
+++ b/quora/README.md
@@ -1,4 +1,5 @@
-#### warning !!!
+
+> âš Warning !!!
poe.com added security and can detect if you are making automated requests. You may get your account banned if you are using this api.
The normal non-driver api is also currently not very stable
@@ -16,44 +17,44 @@ models = {
}
```
-#### !! new: bot creation
+### New: bot creation
```python
# import quora (poe) package
-from openai_rev import quora
+import quora
# create account
# make sure to set enable_bot_creation to True
-token = quora.Account.create(logging=True, enable_bot_creation=True)
+token = quora.Account.create(logging = True, enable_bot_creation=True)
model = quora.Model.create(
- token=token,
- model='gpt-3.5-turbo', # or claude-instant-v1.0
- system_prompt='you are ChatGPT a large language model ...'
+ token = token,
+ model = 'gpt-3.5-turbo', # or claude-instant-v1.0
+ system_prompt = 'you are ChatGPT a large language model ...'
)
-print(model.name) # gptx....
+print(model.name) # gptx....
# streaming response
for response in quora.StreamingCompletion.create(
- custom_model=model.name,
- prompt='hello world',
- token=token):
- print(response.text)
+ custom_model = model.name,
+ prompt ='hello world',
+ token = token):
+
+ print(response.completion.choices[0].text)
```
-#### Normal Response:
+### Normal Response:
```python
-import quora
response = quora.Completion.create(model = 'gpt-4',
prompt = 'hello world',
- token = 'token')
+ token = token)
-print(response.text)
+print(response.completion.choices[0].text)
```
-#### Update Use This For Poe
+### Update Use This For Poe
```python
from quora import Poe
diff --git a/openai_rev/quora/__init__.py b/quora/__init__.py
similarity index 83%
rename from openai_rev/quora/__init__.py
rename to quora/__init__.py
index 5303f206..cd5ec8f9 100644
--- a/openai_rev/quora/__init__.py
+++ b/quora/__init__.py
@@ -6,12 +6,11 @@ from pathlib import Path
from random import choice, choices, randint
from re import search, findall
from string import ascii_letters, digits
-from typing import Optional, Union, List, Any, Generator
+from typing import Optional, Union
from urllib.parse import unquote
import selenium.webdriver.support.expected_conditions as EC
from fake_useragent import UserAgent
-from pydantic import BaseModel
from pypasser import reCaptchaV3
from requests import Session
from selenium.webdriver import Firefox, Chrome, FirefoxOptions, ChromeOptions
@@ -19,8 +18,8 @@ from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait
from tls_client import Session as TLS
-from .api import Client as PoeClient
-from .mail import Emailnator
+from quora.api import Client as PoeClient
+from quora.mail import Emailnator
SELENIUM_WEB_DRIVER_ERROR_MSG = b'''The error message you are receiving is due to the `geckodriver` executable not
being found in your system\'s PATH. To resolve this issue, you need to download the geckodriver and add its location
@@ -68,27 +67,42 @@ def extract_formkey(html):
return formkey
-class Choice(BaseModel):
- text: str
- index: int
- logprobs: Any
- finish_reason: str
+class PoeResponse:
+ class Completion:
+ class Choices:
+ def __init__(self, choice: dict) -> None:
+ self.text = choice['text']
+ self.content = self.text.encode()
+ self.index = choice['index']
+ self.logprobs = choice['logprobs']
+ self.finish_reason = choice['finish_reason']
+ def __repr__(self) -> str:
+ return f'''<__main__.APIResponse.Completion.Choices(\n text = {self.text.encode()},\n index = {self.index},\n logprobs = {self.logprobs},\n finish_reason = {self.finish_reason})object at 0x1337>'''
-class Usage(BaseModel):
- prompt_tokens: int
- completion_tokens: int
- total_tokens: int
+ def __init__(self, choices: dict) -> None:
+ self.choices = [self.Choices(choice) for choice in choices]
+ class Usage:
+ def __init__(self, usage_dict: dict) -> None:
+ self.prompt_tokens = usage_dict['prompt_tokens']
+ self.completion_tokens = usage_dict['completion_tokens']
+ self.total_tokens = usage_dict['total_tokens']
-class PoeResponse(BaseModel):
- id: int
- object: str
- created: int
- model: str
- choices: List[Choice]
- usage: Usage
- text: str
+ def __repr__(self):
+ return f'''<__main__.APIResponse.Usage(\n prompt_tokens = {self.prompt_tokens},\n completion_tokens = {self.completion_tokens},\n total_tokens = {self.total_tokens})object at 0x1337>'''
+
+ def __init__(self, response_dict: dict) -> None:
+ self.response_dict = response_dict
+ self.id = response_dict['id']
+ self.object = response_dict['object']
+ self.created = response_dict['created']
+ self.model = response_dict['model']
+ self.completion = self.Completion(response_dict['choices'])
+ self.usage = self.Usage(response_dict['usage'])
+
+ def json(self) -> dict:
+ return self.response_dict
class ModelResponse:
@@ -102,12 +116,17 @@ class ModelResponse:
class Model:
@staticmethod
def create(
- token: str,
- model: str = 'gpt-3.5-turbo', # claude-instant
- system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
- description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
- handle: str = None,
+ token: str,
+ model: str = 'gpt-3.5-turbo', # claude-instant
+ system_prompt: str = 'You are ChatGPT a large language model developed by Openai. Answer as consisely as possible',
+ description: str = 'gpt-3.5 language model from openai, skidded by poe.com',
+ handle: str = None,
) -> ModelResponse:
+ models = {
+ 'gpt-3.5-turbo': 'chinchilla',
+ 'claude-instant-v1.0': 'a2',
+ 'gpt-4': 'beaver',
+ }
if not handle:
handle = f'gptx{randint(1111111, 9999999)}'
@@ -143,7 +162,7 @@ class Model:
obj={
'queryName': 'CreateBotMain_poeBotCreate_Mutation',
'variables': {
- 'model': MODELS[model],
+ 'model': models[model],
'handle': handle,
'prompt': system_prompt,
'isPromptPublic': True,
@@ -183,9 +202,9 @@ class Model:
class Account:
@staticmethod
def create(
- proxy: Optional[str] = None,
- logging: bool = False,
- enable_bot_creation: bool = False,
+ proxy: Optional[str] = None,
+ logging: bool = False,
+ enable_bot_creation: bool = False,
):
client = TLS(client_identifier='chrome110')
client.proxies = {'http': f'http://{proxy}', 'https': f'http://{proxy}'} if proxy else None
@@ -290,23 +309,22 @@ class Account:
class StreamingCompletion:
@staticmethod
def create(
- model: str = 'gpt-4',
- custom_model: bool = None,
- prompt: str = 'hello world',
- token: str = '',
- ) -> Generator[PoeResponse, None, None]:
+ model: str = 'gpt-4',
+ custom_model: bool = None,
+ prompt: str = 'hello world',
+ token: str = '',
+ ):
_model = MODELS[model] if not custom_model else custom_model
client = PoeClient(token)
for chunk in client.send_message(_model, prompt):
yield PoeResponse(
- **{
+ {
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': _model,
- 'text': chunk['text_new'],
'choices': [
{
'text': chunk['text_new'],
@@ -325,28 +343,33 @@ class StreamingCompletion:
class Completion:
- @staticmethod
def create(
- model: str = 'gpt-4',
- custom_model: str = None,
- prompt: str = 'hello world',
- token: str = '',
- ) -> PoeResponse:
- _model = MODELS[model] if not custom_model else custom_model
+ model: str = 'gpt-4',
+ custom_model: str = None,
+ prompt: str = 'hello world',
+ token: str = '',
+ ):
+ models = {
+ 'sage': 'capybara',
+ 'gpt-4': 'beaver',
+ 'claude-v1.2': 'a2_2',
+ 'claude-instant-v1.0': 'a2',
+ 'gpt-3.5-turbo': 'chinchilla',
+ }
+
+ _model = models[model] if not custom_model else custom_model
client = PoeClient(token)
- chunk = None
- for response in client.send_message(_model, prompt):
- chunk = response
+ for chunk in client.send_message(_model, prompt):
+ pass
return PoeResponse(
- **{
+ {
'id': chunk['messageId'],
'object': 'text_completion',
'created': chunk['creationTime'],
'model': _model,
- 'text': chunk['text'],
'choices': [
{
'text': chunk['text'],
@@ -366,12 +389,12 @@ class Completion:
class Poe:
def __init__(
- self,
- model: str = 'ChatGPT',
- driver: str = 'firefox',
- download_driver: bool = False,
- driver_path: Optional[str] = None,
- cookie_path: str = './quora/cookie.json',
+ self,
+ model: str = 'ChatGPT',
+ driver: str = 'firefox',
+ download_driver: bool = False,
+ driver_path: Optional[str] = None,
+ cookie_path: str = './quora/cookie.json',
):
# validating the model
if model and model not in MODELS:
@@ -450,12 +473,12 @@ class Poe:
return response
def create_bot(
- self,
- name: str,
- /,
- prompt: str = '',
- base_model: str = 'ChatGPT',
- description: str = '',
+ self,
+ name: str,
+ /,
+ prompt: str = '',
+ base_model: str = 'ChatGPT',
+ description: str = '',
) -> None:
if base_model not in MODELS:
raise RuntimeError('Sorry, the base_model you provided does not exist. Please check and try again.')
diff --git a/openai_rev/quora/api.py b/quora/api.py
similarity index 95%
rename from openai_rev/quora/api.py
rename to quora/api.py
index 42814f2c..697f6663 100644
--- a/openai_rev/quora/api.py
+++ b/quora/api.py
@@ -384,7 +384,7 @@ class Client:
continue
# update info about response
- message["text_new"] = message["text"][len(last_text) :]
+ message["text_new"] = message["text"][len(last_text):]
last_text = message["text"]
message_id = message["messageId"]
@@ -456,21 +456,21 @@ class Client:
logger.info(f"No more messages left to delete.")
def create_bot(
- self,
- handle,
- prompt="",
- base_model="chinchilla",
- description="",
- intro_message="",
- api_key=None,
- api_bot=False,
- api_url=None,
- prompt_public=True,
- pfp_url=None,
- linkification=False,
- markdown_rendering=True,
- suggested_replies=False,
- private=False,
+ self,
+ handle,
+ prompt="",
+ base_model="chinchilla",
+ description="",
+ intro_message="",
+ api_key=None,
+ api_bot=False,
+ api_url=None,
+ prompt_public=True,
+ pfp_url=None,
+ linkification=False,
+ markdown_rendering=True,
+ suggested_replies=False,
+ private=False,
):
result = self.send_query(
"PoeBotCreateMutation",
@@ -499,21 +499,21 @@ class Client:
return data
def edit_bot(
- self,
- bot_id,
- handle,
- prompt="",
- base_model="chinchilla",
- description="",
- intro_message="",
- api_key=None,
- api_url=None,
- private=False,
- prompt_public=True,
- pfp_url=None,
- linkification=False,
- markdown_rendering=True,
- suggested_replies=False,
+ self,
+ bot_id,
+ handle,
+ prompt="",
+ base_model="chinchilla",
+ description="",
+ intro_message="",
+ api_key=None,
+ api_url=None,
+ private=False,
+ prompt_public=True,
+ pfp_url=None,
+ linkification=False,
+ markdown_rendering=True,
+ suggested_replies=False,
):
result = self.send_query(
"PoeBotEditMutation",
diff --git a/openai_rev/quora/cookies.txt b/quora/cookies.txt
similarity index 100%
rename from openai_rev/quora/cookies.txt
rename to quora/cookies.txt
diff --git a/openai_rev/quora/graphql/AddHumanMessageMutation.graphql b/quora/graphql/AddHumanMessageMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/AddHumanMessageMutation.graphql
rename to quora/graphql/AddHumanMessageMutation.graphql
diff --git a/openai_rev/quora/graphql/AddMessageBreakMutation.graphql b/quora/graphql/AddMessageBreakMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/AddMessageBreakMutation.graphql
rename to quora/graphql/AddMessageBreakMutation.graphql
diff --git a/openai_rev/quora/graphql/AutoSubscriptionMutation.graphql b/quora/graphql/AutoSubscriptionMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/AutoSubscriptionMutation.graphql
rename to quora/graphql/AutoSubscriptionMutation.graphql
diff --git a/openai_rev/quora/graphql/BioFragment.graphql b/quora/graphql/BioFragment.graphql
similarity index 100%
rename from openai_rev/quora/graphql/BioFragment.graphql
rename to quora/graphql/BioFragment.graphql
diff --git a/openai_rev/quora/graphql/ChatAddedSubscription.graphql b/quora/graphql/ChatAddedSubscription.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ChatAddedSubscription.graphql
rename to quora/graphql/ChatAddedSubscription.graphql
diff --git a/openai_rev/quora/graphql/ChatFragment.graphql b/quora/graphql/ChatFragment.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ChatFragment.graphql
rename to quora/graphql/ChatFragment.graphql
diff --git a/openai_rev/quora/graphql/ChatListPaginationQuery.graphql b/quora/graphql/ChatListPaginationQuery.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ChatListPaginationQuery.graphql
rename to quora/graphql/ChatListPaginationQuery.graphql
diff --git a/openai_rev/quora/graphql/ChatPaginationQuery.graphql b/quora/graphql/ChatPaginationQuery.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ChatPaginationQuery.graphql
rename to quora/graphql/ChatPaginationQuery.graphql
diff --git a/openai_rev/quora/graphql/ChatViewQuery.graphql b/quora/graphql/ChatViewQuery.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ChatViewQuery.graphql
rename to quora/graphql/ChatViewQuery.graphql
diff --git a/openai_rev/quora/graphql/DeleteHumanMessagesMutation.graphql b/quora/graphql/DeleteHumanMessagesMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/DeleteHumanMessagesMutation.graphql
rename to quora/graphql/DeleteHumanMessagesMutation.graphql
diff --git a/openai_rev/quora/graphql/DeleteMessageMutation.graphql b/quora/graphql/DeleteMessageMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/DeleteMessageMutation.graphql
rename to quora/graphql/DeleteMessageMutation.graphql
diff --git a/openai_rev/quora/graphql/HandleFragment.graphql b/quora/graphql/HandleFragment.graphql
similarity index 100%
rename from openai_rev/quora/graphql/HandleFragment.graphql
rename to quora/graphql/HandleFragment.graphql
diff --git a/openai_rev/quora/graphql/LoginWithVerificationCodeMutation.graphql b/quora/graphql/LoginWithVerificationCodeMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/LoginWithVerificationCodeMutation.graphql
rename to quora/graphql/LoginWithVerificationCodeMutation.graphql
diff --git a/openai_rev/quora/graphql/MessageAddedSubscription.graphql b/quora/graphql/MessageAddedSubscription.graphql
similarity index 100%
rename from openai_rev/quora/graphql/MessageAddedSubscription.graphql
rename to quora/graphql/MessageAddedSubscription.graphql
diff --git a/openai_rev/quora/graphql/MessageDeletedSubscription.graphql b/quora/graphql/MessageDeletedSubscription.graphql
similarity index 100%
rename from openai_rev/quora/graphql/MessageDeletedSubscription.graphql
rename to quora/graphql/MessageDeletedSubscription.graphql
diff --git a/openai_rev/quora/graphql/MessageFragment.graphql b/quora/graphql/MessageFragment.graphql
similarity index 100%
rename from openai_rev/quora/graphql/MessageFragment.graphql
rename to quora/graphql/MessageFragment.graphql
diff --git a/openai_rev/quora/graphql/MessageRemoveVoteMutation.graphql b/quora/graphql/MessageRemoveVoteMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/MessageRemoveVoteMutation.graphql
rename to quora/graphql/MessageRemoveVoteMutation.graphql
diff --git a/openai_rev/quora/graphql/MessageSetVoteMutation.graphql b/quora/graphql/MessageSetVoteMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/MessageSetVoteMutation.graphql
rename to quora/graphql/MessageSetVoteMutation.graphql
diff --git a/openai_rev/quora/graphql/PoeBotCreateMutation.graphql b/quora/graphql/PoeBotCreateMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/PoeBotCreateMutation.graphql
rename to quora/graphql/PoeBotCreateMutation.graphql
diff --git a/openai_rev/quora/graphql/PoeBotEditMutation.graphql b/quora/graphql/PoeBotEditMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/PoeBotEditMutation.graphql
rename to quora/graphql/PoeBotEditMutation.graphql
diff --git a/openai_rev/quora/graphql/SendMessageMutation.graphql b/quora/graphql/SendMessageMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/SendMessageMutation.graphql
rename to quora/graphql/SendMessageMutation.graphql
diff --git a/openai_rev/quora/graphql/SendVerificationCodeForLoginMutation.graphql b/quora/graphql/SendVerificationCodeForLoginMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/SendVerificationCodeForLoginMutation.graphql
rename to quora/graphql/SendVerificationCodeForLoginMutation.graphql
diff --git a/openai_rev/quora/graphql/ShareMessagesMutation.graphql b/quora/graphql/ShareMessagesMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ShareMessagesMutation.graphql
rename to quora/graphql/ShareMessagesMutation.graphql
diff --git a/openai_rev/quora/graphql/SignupWithVerificationCodeMutation.graphql b/quora/graphql/SignupWithVerificationCodeMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/SignupWithVerificationCodeMutation.graphql
rename to quora/graphql/SignupWithVerificationCodeMutation.graphql
diff --git a/openai_rev/quora/graphql/StaleChatUpdateMutation.graphql b/quora/graphql/StaleChatUpdateMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/StaleChatUpdateMutation.graphql
rename to quora/graphql/StaleChatUpdateMutation.graphql
diff --git a/openai_rev/quora/graphql/SubscriptionsMutation.graphql b/quora/graphql/SubscriptionsMutation.graphql
similarity index 100%
rename from openai_rev/quora/graphql/SubscriptionsMutation.graphql
rename to quora/graphql/SubscriptionsMutation.graphql
diff --git a/openai_rev/quora/graphql/SummarizePlainPostQuery.graphql b/quora/graphql/SummarizePlainPostQuery.graphql
similarity index 100%
rename from openai_rev/quora/graphql/SummarizePlainPostQuery.graphql
rename to quora/graphql/SummarizePlainPostQuery.graphql
diff --git a/openai_rev/quora/graphql/SummarizeQuotePostQuery.graphql b/quora/graphql/SummarizeQuotePostQuery.graphql
similarity index 100%
rename from openai_rev/quora/graphql/SummarizeQuotePostQuery.graphql
rename to quora/graphql/SummarizeQuotePostQuery.graphql
diff --git a/openai_rev/quora/graphql/SummarizeSharePostQuery.graphql b/quora/graphql/SummarizeSharePostQuery.graphql
similarity index 100%
rename from openai_rev/quora/graphql/SummarizeSharePostQuery.graphql
rename to quora/graphql/SummarizeSharePostQuery.graphql
diff --git a/openai_rev/quora/graphql/UserSnippetFragment.graphql b/quora/graphql/UserSnippetFragment.graphql
similarity index 100%
rename from openai_rev/quora/graphql/UserSnippetFragment.graphql
rename to quora/graphql/UserSnippetFragment.graphql
diff --git a/openai_rev/quora/graphql/ViewerInfoQuery.graphql b/quora/graphql/ViewerInfoQuery.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ViewerInfoQuery.graphql
rename to quora/graphql/ViewerInfoQuery.graphql
diff --git a/openai_rev/quora/graphql/ViewerStateFragment.graphql b/quora/graphql/ViewerStateFragment.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ViewerStateFragment.graphql
rename to quora/graphql/ViewerStateFragment.graphql
diff --git a/openai_rev/quora/graphql/ViewerStateUpdatedSubscription.graphql b/quora/graphql/ViewerStateUpdatedSubscription.graphql
similarity index 100%
rename from openai_rev/quora/graphql/ViewerStateUpdatedSubscription.graphql
rename to quora/graphql/ViewerStateUpdatedSubscription.graphql
diff --git a/openai_rev/quora/graphql/__init__.py b/quora/graphql/__init__.py
similarity index 100%
rename from openai_rev/quora/graphql/__init__.py
rename to quora/graphql/__init__.py
diff --git a/openai_rev/quora/mail.py b/quora/mail.py
similarity index 93%
rename from openai_rev/quora/mail.py
rename to quora/mail.py
index 864d9568..e6ce96c2 100644
--- a/openai_rev/quora/mail.py
+++ b/quora/mail.py
@@ -42,7 +42,9 @@ class Emailnator:
while True:
sleep(2)
- mail_token = self.client.post("https://www.emailnator.com/message-list", json={"email": self.email})
+ mail_token = self.client.post(
+ "https://www.emailnator.com/message-list", json={"email": self.email}
+ )
mail_token = loads(mail_token.text)["messageData"]
diff --git a/openai_rev/theb/theb_test.py b/test.py
similarity index 74%
rename from openai_rev/theb/theb_test.py
rename to test.py
index 805ef094..4b39bd0a 100644
--- a/openai_rev/theb/theb_test.py
+++ b/test.py
@@ -1,4 +1,5 @@
-from openai_rev import theb
+import theb
for token in theb.Completion.create('hello world'):
print(token, end='', flush=True)
+ print('asdsos')
\ No newline at end of file
diff --git a/openai_rev/theb/README.md b/theb/README.md
similarity index 87%
rename from openai_rev/theb/README.md
rename to theb/README.md
index bd37ba3f..ca978fce 100644
--- a/openai_rev/theb/README.md
+++ b/theb/README.md
@@ -1,10 +1,12 @@
### Example: `theb` (use like openai pypi package)
+
```python
# import library
-from openai_rev import theb
+import theb
# simple streaming completion
for token in theb.Completion.create('hello world'):
print(token, end='', flush=True)
-```
\ No newline at end of file
+print("")
+```
diff --git a/openai_rev/theb/__init__.py b/theb/__init__.py
similarity index 77%
rename from openai_rev/theb/__init__.py
rename to theb/__init__.py
index fa79fdd9..726e025e 100644
--- a/openai_rev/theb/__init__.py
+++ b/theb/__init__.py
@@ -1,11 +1,9 @@
+from re import findall
from json import loads
from queue import Queue, Empty
-from re import findall
from threading import Thread
-
from curl_cffi import requests
-
class Completion:
# experimental
part1 = '{"role":"assistant","id":"chatcmpl'
@@ -16,8 +14,7 @@ class Completion:
message_queue = Queue()
stream_completed = False
- @classmethod
- def request(cls, prompt: str):
+ def request(prompt: str):
headers = {
'authority': 'chatbot.theb.ai',
'content-type': 'application/json',
@@ -25,11 +22,12 @@ class Completion:
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
}
- requests.post(
- 'https://chatbot.theb.ai/api/chat-process',
- headers=headers,
- content_callback=Completion.handle_stream_response,
- json={'prompt': prompt, 'options': {}},
+ requests.post('https://chatbot.theb.ai/api/chat-process', headers=headers,
+ content_callback = Completion.handle_stream_response,
+ json = {
+ 'prompt': prompt,
+ 'options': {}
+ }
)
Completion.stream_completed = True
@@ -38,7 +36,7 @@ class Completion:
def create(prompt: str):
Thread(target=Completion.request, args=[prompt]).start()
- while not Completion.stream_completed or not Completion.message_queue.empty():
+ while Completion.stream_completed != True or not Completion.message_queue.empty():
try:
message = Completion.message_queue.get(timeout=0.01)
for message in findall(Completion.regex, message):
diff --git a/theb/theb_test.py b/theb/theb_test.py
new file mode 100644
index 00000000..177c970a
--- /dev/null
+++ b/theb/theb_test.py
@@ -0,0 +1,4 @@
+import theb
+
+for token in theb.Completion.create('hello world'):
+ print(token, end='', flush=True)
\ No newline at end of file
diff --git a/unfinished/cocalc/cocalc_test.py b/unfinished/cocalc/cocalc_test.py
deleted file mode 100644
index d6266518..00000000
--- a/unfinished/cocalc/cocalc_test.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import cocalc
-
-response = cocalc.Completion.create(
- prompt='hello world'
-)
-
-print(response)
diff --git a/unfinished/openaihosted/README.md b/unfinished/openaihosted/README.md
deleted file mode 100644
index 60b5ea7a..00000000
--- a/unfinished/openaihosted/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-writegpt.ai
-to do:
-- code ref
diff --git a/unfinished/openaihosted/__init__.py b/unfinished/openaihosted/__init__.py
deleted file mode 100644
index ba4d3982..00000000
--- a/unfinished/openaihosted/__init__.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import json
-import re
-
-import requests
-
-headers = {
- 'authority': 'openai.a2hosted.com',
- 'accept': 'text/event-stream',
- 'accept-language': 'en-US,en;q=0.9,id;q=0.8,ja;q=0.7',
- 'cache-control': 'no-cache',
- 'sec-fetch-dest': 'empty',
- 'sec-fetch-mode': 'cors',
- 'sec-fetch-site': 'cross-site',
- 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.0.0',
-}
-
-
-def create_query_param(conversation):
- encoded_conversation = json.dumps(conversation)
- return encoded_conversation.replace(" ", "%20").replace('"', '%22').replace("'", "%27")
-
-
-user_input = input("Enter your message: ")
-
-data = [
- {"role": "system", "content": "You are a helpful assistant."},
- {"role": "user", "content": "hi"},
- {"role": "assistant", "content": "Hello! How can I assist you today?"},
- {"role": "user", "content": user_input},
-]
-
-query_param = create_query_param(data)
-url = f'https://openai.a2hosted.com/chat?q={query_param}'
-
-response = requests.get(url, headers=headers, stream=True)
-
-for message in response.iter_content(chunk_size=1024):
- message = message.decode('utf-8')
- msg_match, num_match = re.search(r'"msg":"(.*?)"', message), re.search(r'\[DONE\] (\d+)', message)
- if msg_match: print(msg_match.group(1))
- if num_match: print(num_match.group(1))
diff --git a/openai_rev/you/README.md b/you/README.md
similarity index 92%
rename from openai_rev/you/README.md
rename to you/README.md
index f759c27c..25c20085 100644
--- a/openai_rev/you/README.md
+++ b/you/README.md
@@ -1,8 +1,7 @@
### Example: `you` (use like openai pypi package)
```python
-
-from openai_rev import you
+import you
# simple request with links and details
response = you.Completion.create(
@@ -26,7 +25,8 @@ chat = []
while True:
prompt = input("You: ")
-
+ if prompt == 'q':
+ break
response = you.Completion.create(
prompt=prompt,
chat=chat)
@@ -34,4 +34,4 @@ while True:
print("Bot:", response["response"])
chat.append({"question": prompt, "answer": response["response"]})
-```
\ No newline at end of file
+```
diff --git a/openai_rev/you/__init__.py b/you/__init__.py
similarity index 66%
rename from openai_rev/you/__init__.py
rename to you/__init__.py
index 50d74152..8bf31f0d 100644
--- a/openai_rev/you/__init__.py
+++ b/you/__init__.py
@@ -1,36 +1,28 @@
-import json
import re
-from typing import Optional, List, Dict, Any
+from json import loads
from uuid import uuid4
from fake_useragent import UserAgent
-from pydantic import BaseModel
from tls_client import Session
-class PoeResponse(BaseModel):
- text: Optional[str] = None
- links: List[str] = []
- extra: Dict[str, Any] = {}
-
-
class Completion:
@staticmethod
def create(
- prompt: str,
- page: int = 1,
- count: int = 10,
- safe_search: str = 'Moderate',
- on_shopping_page: bool = False,
- mkt: str = '',
- response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
- domain: str = 'youchat',
- query_trace_id: str = None,
- chat: list = None,
- include_links: bool = False,
- detailed: bool = False,
- debug: bool = False,
- ) -> PoeResponse:
+ prompt: str,
+ page: int = 1,
+ count: int = 10,
+ safe_search: str = 'Moderate',
+ on_shopping_page: bool = False,
+ mkt: str = '',
+ response_filter: str = 'WebPages,Translations,TimeZone,Computation,RelatedSearches',
+ domain: str = 'youchat',
+ query_trace_id: str = None,
+ chat: list = None,
+ include_links: bool = False,
+ detailed: bool = False,
+ debug: bool = False,
+ ) -> dict:
if chat is None:
chat = []
@@ -65,25 +57,23 @@ class Completion:
r'(?<=event: youChatSerpResults\ndata:)(.*\n)*?(?=event: )', response.text
).group()
third_party_search_results = re.search(
- r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text
- ).group()
+ r'(?<=event: thirdPartySearchResults\ndata:)(.*\n)*?(?=event: )', response.text).group()
# slots = findall(r"slots\ndata: (.*)\n\nevent", response.text)[0]
text = ''.join(re.findall(r'{\"youChatToken\": \"(.*?)\"}', response.text))
extra = {
- 'youChatSerpResults': json.loads(you_chat_serp_results),
+ 'youChatSerpResults': loads(you_chat_serp_results),
# 'slots' : loads(slots)
}
- response = PoeResponse(text=text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'))
- if include_links:
- response.links = json.loads(third_party_search_results)['search']['third_party_search_results']
-
- if detailed:
- response.extra = extra
-
- return response
+ return {
+ 'response': text.replace('\\n', '\n').replace('\\\\', '\\').replace('\\"', '"'),
+ 'links': loads(third_party_search_results)['search']['third_party_search_results']
+ if include_links
+ else None,
+ 'extra': extra if detailed else None,
+ }
@classmethod
def __get_headers(cls) -> dict:
@@ -104,5 +94,5 @@ class Completion:
}
@classmethod
- def __get_failure_response(cls) -> PoeResponse:
- return PoeResponse(text='Unable to fetch the response, Please try again.')
+ def __get_failure_response(cls) -> dict:
+ return dict(response='Unable to fetch the response, Please try again.', links=[], extra={})