2023-09-12 01:47:03 +03:00
from __future__ import annotations
2023-09-23 02:44:09 +03:00
import requests
2023-09-12 01:47:03 +03:00
2023-10-14 01:31:48 +03:00
from . . base_provider import BaseProvider
from . . . typing import CreateResult , Messages
2023-10-09 21:53:31 +03:00
from json import dumps
2023-09-12 01:47:03 +03:00
2023-09-29 17:21:18 +03:00
# to recreate this easily, send a post request to https://chat.aivvm.com/api/models
2023-09-12 01:47:03 +03:00
models = {
2023-09-23 02:44:09 +03:00
' gpt-3.5-turbo ' : { ' id ' : ' gpt-3.5-turbo ' , ' name ' : ' GPT-3.5 ' } ,
' gpt-3.5-turbo-0613 ' : { ' id ' : ' gpt-3.5-turbo-0613 ' , ' name ' : ' GPT-3.5-0613 ' } ,
' gpt-3.5-turbo-16k ' : { ' id ' : ' gpt-3.5-turbo-16k ' , ' name ' : ' GPT-3.5-16K ' } ,
' gpt-3.5-turbo-16k-0613 ' : { ' id ' : ' gpt-3.5-turbo-16k-0613 ' , ' name ' : ' GPT-3.5-16K-0613 ' } ,
' gpt-4 ' : { ' id ' : ' gpt-4 ' , ' name ' : ' GPT-4 ' } ,
' gpt-4-0613 ' : { ' id ' : ' gpt-4-0613 ' , ' name ' : ' GPT-4-0613 ' } ,
' gpt-4-32k ' : { ' id ' : ' gpt-4-32k ' , ' name ' : ' GPT-4-32K ' } ,
' gpt-4-32k-0613 ' : { ' id ' : ' gpt-4-32k-0613 ' , ' name ' : ' GPT-4-32K-0613 ' } ,
2023-09-12 01:47:03 +03:00
}
2023-09-23 02:44:09 +03:00
class Aivvm ( BaseProvider ) :
url = ' https://chat.aivvm.com '
supports_stream = True
2023-10-11 20:56:53 +03:00
working = False
2023-09-12 01:47:03 +03:00
supports_gpt_35_turbo = True
supports_gpt_4 = True
@classmethod
2023-09-23 02:44:09 +03:00
def create_completion ( cls ,
2023-09-12 01:47:03 +03:00
model : str ,
2023-10-14 01:31:48 +03:00
messages : Messages ,
2023-09-23 16:35:17 +03:00
stream : bool ,
* * kwargs
) - > CreateResult :
if not model :
model = " gpt-3.5-turbo "
elif model not in models :
2023-09-29 17:21:18 +03:00
raise ValueError ( f " Model is not supported: { model } " )
2023-09-23 02:44:09 +03:00
json_data = {
" model " : models [ model ] ,
" messages " : messages ,
" key " : " " ,
2023-09-29 17:21:18 +03:00
" prompt " : kwargs . get ( " system_message " , " You are ChatGPT, a large language model trained by OpenAI. Follow the user ' s instructions carefully. Respond using markdown. " ) ,
2023-09-23 02:44:09 +03:00
" temperature " : kwargs . get ( " temperature " , 0.7 )
}
2023-10-10 15:22:13 +03:00
data = dumps ( json_data )
2023-10-09 21:53:31 +03:00
headers = {
" accept " : " text/event-stream " ,
" accept-language " : " en-US,en;q=0.9 " ,
" content-type " : " application/json " ,
2023-10-10 15:22:13 +03:00
" content-length " : str ( len ( data ) ) ,
2023-10-09 21:53:31 +03:00
" sec-ch-ua " : " \" Chrome \" ;v= \" 117 \" , \" Not;A=Brand \" ;v= \" 8 \" , \" Chromium \" ;v= \" 117 \" " ,
" sec-ch-ua-mobile " : " ?0 " ,
" sec-ch-ua-platform " : " \" Windows \" " ,
" sec-fetch-dest " : " empty " ,
" sec-fetch-mode " : " cors " ,
" sec-fetch-site " : " same-origin " ,
" sec-gpc " : " 1 " ,
2023-10-10 15:22:13 +03:00
" referrer " : " https://chat.aivvm.com/ " ,
" user-agent " : " Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36 "
2023-10-09 21:53:31 +03:00
}
2023-10-10 15:22:13 +03:00
response = requests . post ( " https://chat.aivvm.com/api/chat " , headers = headers , data = data , stream = True )
2023-09-26 01:52:29 +03:00
response . raise_for_status ( )
2023-09-23 02:44:09 +03:00
2023-10-10 17:48:56 +03:00
for chunk in response . iter_content ( chunk_size = 4096 ) :
2023-10-09 21:53:31 +03:00
try :
yield chunk . decode ( " utf-8 " )
except UnicodeDecodeError :
yield chunk . decode ( " unicode-escape " )
2023-09-12 01:47:03 +03:00
@classmethod
@property
def params ( cls ) :
params = [
2023-09-23 02:44:09 +03:00
( ' model ' , ' str ' ) ,
( ' messages ' , ' list[dict[str, str]] ' ) ,
( ' stream ' , ' bool ' ) ,
( ' temperature ' , ' float ' ) ,
2023-09-12 01:47:03 +03:00
]
2023-09-23 02:44:09 +03:00
param = ' , ' . join ( [ ' : ' . join ( p ) for p in params ] )
2023-10-09 21:53:31 +03:00
return f ' g4f.provider. { cls . __name__ } supports: ( { param } ) '