Add system message input to gui

Improve OpenaiChat provider
This commit is contained in:
Heiner Lohaus 2024-03-09 03:10:59 +01:00
parent cfa45e7016
commit 14167671d4
4 changed files with 134 additions and 103 deletions

View File

@ -10,11 +10,10 @@ from aiohttp import ClientWebSocketResponse
try:
from py_arkose_generator.arkose import get_values_for_request
from async_property import async_cached_property
has_requirements = True
has_arkose_generator = True
except ImportError:
async_cached_property = property
has_requirements = False
has_arkose_generator = False
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
@ -34,7 +33,7 @@ from ... import debug
class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
"""A class for creating and managing conversations with OpenAI chat service"""
url = "https://chat.openai.com"
working = True
needs_auth = True
@ -81,7 +80,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
A Response object that contains the generator, action, messages, and options
"""
# Add the user input to the messages list
if prompt:
if prompt is not None:
messages.append({
"role": "user",
"content": prompt
@ -103,7 +102,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
messages,
kwargs
)
@classmethod
async def upload_image(
cls,
@ -163,7 +162,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
response.raise_for_status()
image_data["download_url"] = (await response.json())["download_url"]
return ImageRequest(image_data)
@classmethod
async def get_default_model(cls, session: StreamSession, headers: dict):
"""
@ -186,7 +185,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
return cls.default_model
raise RuntimeError(f"Response: {data}")
return cls.default_model
@classmethod
def create_messages(cls, messages: Messages, image_request: ImageRequest = None):
"""
@ -335,9 +334,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
Raises:
RuntimeError: If an error occurs during processing.
"""
if not has_requirements:
raise MissingRequirementsError('Install "py-arkose-generator" and "async_property" package')
if not parent_id:
if parent_id is None:
parent_id = str(uuid.uuid4())
# Read api_key from arguments
@ -349,7 +346,7 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
timeout=timeout
) as session:
# Read api_key and cookies from cache / browser config
if cls._headers is None or time.time() > cls._expires:
if cls._headers is None or cls._expires is None or time.time() > cls._expires:
if api_key is None:
# Read api_key from cookies
cookies = get_cookies("chat.openai.com", False) if cookies is None else cookies
@ -358,8 +355,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
else:
api_key = cls._api_key if api_key is None else api_key
# Read api_key with session cookies
if api_key is None and cookies:
api_key = await cls.fetch_access_token(session, cls._headers)
#if api_key is None and cookies:
# api_key = await cls.fetch_access_token(session, cls._headers)
# Load default model
if cls.default_model is None and api_key is not None:
try:
@ -385,6 +382,19 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
else:
cls._set_api_key(api_key)
async with session.post(
f"{cls.url}/backend-api/sentinel/chat-requirements",
json={"conversation_mode_kind": "primary_assistant"},
headers=cls._headers
) as response:
response.raise_for_status()
data = await response.json()
need_arkose = data["arkose"]["required"]
chat_token = data["token"]
if need_arkose and not has_arkose_generator:
raise MissingRequirementsError('Install "py-arkose-generator" package')
try:
image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
except Exception as e:
@ -395,12 +405,10 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
model = cls.get_model(model).replace("gpt-3.5-turbo", "text-davinci-002-render-sha")
fields = ResponseFields()
while fields.finish_reason is None:
arkose_token = await cls.get_arkose_token(session)
conversation_id = conversation_id if fields.conversation_id is None else fields.conversation_id
parent_id = parent_id if fields.message_id is None else fields.message_id
data = {
"action": action,
"arkose_token": arkose_token,
"conversation_mode": {"kind": "primary_assistant"},
"force_paragen": False,
"force_rate_limit": False,
@ -418,7 +426,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
json=data,
headers={
"Accept": "text/event-stream",
"OpenAI-Sentinel-Arkose-Token": arkose_token,
**({"OpenAI-Sentinel-Arkose-Token": await cls.get_arkose_token(session)} if need_arkose else {}),
"OpenAI-Sentinel-Chat-Requirements-Token": chat_token,
**cls._headers
}
) as response:
@ -471,6 +480,8 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
if not line.startswith(b"data: "):
return
elif line.startswith(b"data: [DONE]"):
if fields.finish_reason is None:
fields.finish_reason = "error"
return
try:
line = json.loads(line[6:])
@ -600,16 +611,6 @@ class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
def _update_cookie_header(cls):
cls._headers["Cookie"] = cls._format_cookies(cls._cookies)
class EndTurn:
"""
Class to represent the end of a conversation turn.
"""
def __init__(self):
self.is_end = False
def end(self):
self.is_end = True
class ResponseFields:
"""
Class to encapsulate response fields.
@ -638,8 +639,8 @@ class Response():
self._options = options
self._fields = None
async def generator(self):
if self._generator:
async def generator(self) -> AsyncIterator:
if self._generator is not None:
self._generator = None
chunks = []
async for chunk in self._generator:
@ -649,27 +650,29 @@ class Response():
yield chunk
chunks.append(str(chunk))
self._message = "".join(chunks)
if not self._fields:
if self._fields is None:
raise RuntimeError("Missing response fields")
self.is_end = self._fields.end_turn
self.is_end = self._fields.finish_reason == "stop"
def __aiter__(self):
return self.generator()
@async_cached_property
async def message(self) -> str:
async def get_message(self) -> str:
await self.generator()
return self._message
async def get_fields(self):
async def get_fields(self) -> dict:
await self.generator()
return {"conversation_id": self._fields.conversation_id, "parent_id": self._fields.message_id}
return {
"conversation_id": self._fields.conversation_id,
"parent_id": self._fields.message_id
}
async def next(self, prompt: str, **kwargs) -> Response:
async def create_next(self, prompt: str, **kwargs) -> Response:
return await OpenaiChat.create(
**self._options,
prompt=prompt,
messages=await self.messages,
messages=await self.get_messages(),
action="next",
**await self.get_fields(),
**kwargs
@ -681,13 +684,13 @@ class Response():
raise RuntimeError("Can't continue message. Message already finished.")
return await OpenaiChat.create(
**self._options,
messages=await self.messages,
messages=await self.get_messages(),
action="continue",
**fields,
**kwargs
)
async def variant(self, **kwargs) -> Response:
async def create_variant(self, **kwargs) -> Response:
if self.action != "next":
raise RuntimeError("Can't create variant from continue or variant request.")
return await OpenaiChat.create(
@ -698,8 +701,7 @@ class Response():
**kwargs
)
@async_cached_property
async def messages(self):
async def get_messages(self) -> list:
messages = self._messages
messages.append({"role": "assistant", "content": await self.message})
messages.append({"role": "assistant", "content": await self.message()})
return messages

View File

@ -65,6 +65,7 @@
:root {
--font-1: "Inter", sans-serif;
--section-gap: 25px;
--inner-gap: 15px;
--border-radius-1: 8px;
}
@ -222,7 +223,7 @@ body {
overflow-wrap: break-word;
display: flex;
gap: var(--section-gap);
padding: var(--section-gap);
padding: var(--inner-gap) var(--section-gap);
padding-bottom: 0;
}
@ -393,7 +394,7 @@ body {
#input-count {
width: fit-content;
font-size: 12px;
padding: 6px 15px;
padding: 6px var(--inner-gap);
}
.stop_generating, .regenerate {
@ -417,7 +418,7 @@ body {
background-color: var(--blur-bg);
border-radius: var(--border-radius-1);
border: 1px solid var(--blur-border);
padding: 5px 15px;
padding: 5px var(--inner-gap);
color: var(--colour-3);
display: flex;
justify-content: center;
@ -601,7 +602,7 @@ select {
.input-box {
display: flex;
align-items: center;
padding-right: 15px;
padding-right: var(--inner-gap);
cursor: pointer;
}
@ -785,7 +786,7 @@ a:-webkit-any-link {
font-size: 15px;
width: 100%;
height: 100%;
padding: 12px 15px;
padding: 12px var(--inner-gap);
background: none;
border: none;
outline: none;
@ -997,3 +998,13 @@ a:-webkit-any-link {
#send-button:hover {
border: 1px solid #e4d4ffc9;
}
#systemPrompt {
font-size: 15px;
width: 100%;
color: var(--colour-3);
height: 50px;
outline: none;
padding: var(--inner-gap) var(--section-gap);
resize: vertical;
}

View File

@ -116,6 +116,7 @@
</div>
</div>
<div class="conversation">
<textarea id="systemPrompt" class="box" placeholder="System prompt"></textarea>
<div id="messages" class="box"></div>
<div class="toolbar">
<div id="input-count" class="">

View File

@ -13,6 +13,7 @@ const cameraInput = document.getElementById("camera");
const fileInput = document.getElementById("file");
const inputCount = document.getElementById("input-count")
const modelSelect = document.getElementById("model");
const systemPrompt = document.getElementById("systemPrompt")
let prompt_lock = false;
@ -135,7 +136,7 @@ const remove_cancel_button = async () => {
}, 300);
};
const filter_messages = (messages, filter_last_message = true) => {
const prepare_messages = (messages, filter_last_message = true) => {
// Removes none user messages at end
if (filter_last_message) {
let last_message;
@ -147,7 +148,7 @@ const filter_messages = (messages, filter_last_message = true) => {
}
}
// Remove history, if it is selected
// Remove history, if it's selected
if (document.getElementById('history')?.checked) {
if (filter_last_message) {
messages = [messages.pop()];
@ -160,7 +161,7 @@ const filter_messages = (messages, filter_last_message = true) => {
for (i in messages) {
new_message = messages[i];
// Remove generated images from history
new_message["content"] = new_message["content"].replaceAll(
new_message.content = new_message.content.replaceAll(
/<!-- generated images start -->[\s\S]+<!-- generated images end -->/gm,
""
)
@ -171,6 +172,15 @@ const filter_messages = (messages, filter_last_message = true) => {
}
}
// Add system message
system_content = systemPrompt?.value;
if (system_content) {
new_messages.unshift({
"role": "system",
"content": system_content
});
}
return new_messages;
}
@ -179,7 +189,7 @@ const ask_gpt = async () => {
messages = await get_messages(window.conversation_id);
total_messages = messages.length;
messages = filter_messages(messages);
messages = prepare_messages(messages);
window.scrollTo(0, 0);
window.controller = new AbortController();
@ -192,8 +202,6 @@ const ask_gpt = async () => {
message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0);
await new Promise((r) => setTimeout(r, 500));
window.scrollTo(0, 0);
el = message_box.querySelector('.count_total');
el ? el.parentElement.removeChild(el) : null;
@ -218,6 +226,8 @@ const ask_gpt = async () => {
message_box.scrollTop = message_box.scrollHeight;
window.scrollTo(0, 0);
error = provider_result = null;
try {
let body = JSON.stringify({
id: window.token,
@ -241,18 +251,14 @@ const ask_gpt = async () => {
} else {
headers['content-type'] = 'application/json';
}
const response = await fetch(`/backend-api/v2/conversation`, {
method: 'POST',
signal: window.controller.signal,
headers: headers,
body: body
});
await new Promise((r) => setTimeout(r, 1000));
window.scrollTo(0, 0);
const reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
error = provider = null;
while (true) {
const { value, done } = await reader.read();
if (done) break;
@ -262,12 +268,12 @@ const ask_gpt = async () => {
if (message.type == "content") {
text += message.content;
} else if (message["type"] == "provider") {
provider = message.provider
provider_result = message.provider
content.querySelector('.provider').innerHTML = `
<a href="${provider.url}" target="_blank">
${provider.name}
<a href="${provider_result.url}" target="_blank">
${provider_result.name}
</a>
${provider.model ? ' with ' + provider.model : ''}
${provider_result.model ? ' with ' + provider_result.model : ''}
`
} else if (message["type"] == "error") {
error = message["error"];
@ -292,7 +298,7 @@ const ask_gpt = async () => {
html = html.substring(0, lastIndex) + '<span id="cursor"></span>' + lastElement;
}
content_inner.innerHTML = html;
content_count.innerText = count_words_and_tokens(text, provider?.model);
content_count.innerText = count_words_and_tokens(text, provider_result?.model);
highlight(content_inner);
}
@ -324,19 +330,19 @@ const ask_gpt = async () => {
}
}
if (!error) {
await add_message(window.conversation_id, "assistant", text, provider);
await add_message(window.conversation_id, "assistant", text, provider_result);
await load_conversation(window.conversation_id);
} else {
let cursorDiv = document.getElementById(`cursor`);
if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv);
}
window.scrollTo(0, 0);
message_box.scrollTop = message_box.scrollHeight;
await remove_cancel_button();
await register_remove_message();
prompt_lock = false;
window.scrollTo(0, 0);
await load_conversations();
regenerate.classList.remove(`regenerate-hidden`);
regenerate.classList.remove("regenerate-hidden");
};
const clear_conversations = async () => {
@ -362,6 +368,10 @@ const clear_conversation = async () => {
while (messages.length > 0) {
message_box.removeChild(messages[0]);
}
if (systemPrompt) {
systemPrompt.value = "";
}
};
const show_option = async (conversation_id) => {
@ -418,17 +428,22 @@ const new_conversation = async () => {
};
const load_conversation = async (conversation_id) => {
let messages = await get_messages(conversation_id);
let conversation = await get_conversation(conversation_id);
let messages = conversation?.items || [];
if (systemPrompt) {
systemPrompt.value = conversation.system || "";
}
let elements = "";
let last_model = null;
for (i in messages) {
let item = messages[i];
last_model = item?.provider?.model;
last_model = item.provider?.model;
let next_i = parseInt(i) + 1;
let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null);
let provider_link = item.provider?.name ? `<a href="${item.provider?.url}" target="_blank">${item.provider.name}</a>` : "";
let provider_link = item.provider?.name ? `<a href="${item.provider.url}" target="_blank">${item.provider.name}</a>` : "";
let provider = provider_link ? `
<div class="provider">
${provider_link}
@ -454,7 +469,7 @@ const load_conversation = async (conversation_id) => {
`;
}
const filtered = filter_messages(messages, false);
const filtered = prepare_messages(messages, false);
if (filtered.length > 0) {
last_model = last_model?.startsWith("gpt-4") ? "gpt-4" : "gpt-3.5-turbo"
let count_total = GPTTokenizer_cl100k_base?.encodeChat(filtered, last_model).length
@ -493,19 +508,26 @@ function count_words_and_tokens(text, model) {
return countWords ? `(${countWords(text)} words${tokens_append})` : "";
}
const get_conversation = async (conversation_id) => {
async function get_conversation(conversation_id) {
let conversation = await JSON.parse(
localStorage.getItem(`conversation:${conversation_id}`)
);
return conversation;
};
}
const get_messages = async (conversation_id) => {
async function save_conversation(conversation_id, conversation) {
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
}
async function get_messages(conversation_id) {
let conversation = await get_conversation(conversation_id);
return conversation?.items || [];
};
}
const add_conversation = async (conversation_id, content) => {
async function add_conversation(conversation_id, content) {
if (content.length > 17) {
title = content.substring(0, 17) + '...'
} else {
@ -513,18 +535,23 @@ const add_conversation = async (conversation_id, content) => {
}
if (localStorage.getItem(`conversation:${conversation_id}`) == null) {
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify({
id: conversation_id,
title: title,
items: [],
})
);
await save_conversation(conversation_id, {
id: conversation_id,
title: title,
system: systemPrompt?.value,
items: [],
});
}
history.pushState({}, null, `/chat/${conversation_id}`);
};
}
async function save_system_message() {
if (!window.conversation_id) return;
const conversation = await get_conversation(window.conversation_id);
conversation.system = systemPrompt?.value;
await save_conversation(window.conversation_id, conversation);
}
const hide_last_message = async (conversation_id) => {
const conversation = await get_conversation(conversation_id)
@ -533,11 +560,7 @@ const hide_last_message = async (conversation_id) => {
last_message["regenerate"] = true;
}
conversation.items.push(last_message);
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
await save_conversation(conversation_id, conversation);
};
const remove_message = async (conversation_id, index) => {
@ -552,10 +575,7 @@ const remove_message = async (conversation_id, index) => {
}
}
conversation.items = new_items;
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
await save_conversation(conversation_id, conversation);
};
const add_message = async (conversation_id, role, content, provider) => {
@ -566,12 +586,7 @@ const add_message = async (conversation_id, role, content, provider) => {
content: content,
provider: provider
});
localStorage.setItem(
`conversation:${conversation_id}`,
JSON.stringify(conversation)
);
await save_conversation(conversation_id, conversation);
return conversation.items.length - 1;
};
@ -754,9 +769,7 @@ window.onload = async () => {
say_hello()
}
setTimeout(() => {
load_conversations();
}, 1);
load_conversations();
message_input.addEventListener("keydown", async (evt) => {
if (prompt_lock) return;
@ -875,4 +888,8 @@ fileInput.addEventListener('change', async (event) => {
} else {
delete fileInput.dataset.text;
}
});
systemPrompt?.addEventListener("blur", async () => {
await save_system_message();
});