mirror of
https://github.com/xtekky/gpt4free.git
synced 2024-12-23 11:02:40 +03:00
commit
a722abb8c2
1
.github/workflows/publish-workflow.yaml
vendored
1
.github/workflows/publish-workflow.yaml
vendored
@ -24,6 +24,7 @@ jobs:
|
|||||||
python -m etc.tool.openapi
|
python -m etc.tool.openapi
|
||||||
- uses: actions/upload-artifact@v4
|
- uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
|
name: openapi
|
||||||
path: openapi.json
|
path: openapi.json
|
||||||
publish:
|
publish:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
@ -91,15 +91,15 @@ def create_app_debug(g4f_api_key: str = None):
|
|||||||
class ChatCompletionsConfig(BaseModel):
|
class ChatCompletionsConfig(BaseModel):
|
||||||
messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]])
|
messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]])
|
||||||
model: str = Field(default="")
|
model: str = Field(default="")
|
||||||
provider: Optional[str] = Field(examples=[None])
|
provider: Optional[str] = None
|
||||||
stream: bool = False
|
stream: bool = False
|
||||||
temperature: Optional[float] = Field(examples=[None])
|
temperature: Optional[float] = None
|
||||||
max_tokens: Optional[int] = Field(examples=[None])
|
max_tokens: Optional[int] = None
|
||||||
stop: Union[list[str], str, None] = Field(examples=[None])
|
stop: Union[list[str], str, None] = None
|
||||||
api_key: Optional[str] = Field(examples=[None])
|
api_key: Optional[str] = None
|
||||||
web_search: Optional[bool] = Field(examples=[None])
|
web_search: Optional[bool] = None
|
||||||
proxy: Optional[str] = Field(examples=[None])
|
proxy: Optional[str] = None
|
||||||
conversation_id: Optional[str] = Field(examples=[None])
|
conversation_id: Optional[str] = None
|
||||||
|
|
||||||
class ImageGenerationConfig(BaseModel):
|
class ImageGenerationConfig(BaseModel):
|
||||||
prompt: str
|
prompt: str
|
||||||
|
@ -152,8 +152,7 @@ async def async_iter_response(
|
|||||||
content = filter_json(content)
|
content = filter_json(content)
|
||||||
yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
|
yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
|
||||||
finally:
|
finally:
|
||||||
if hasattr(response, 'aclose'):
|
await safe_aclose(response)
|
||||||
await safe_aclose(response)
|
|
||||||
|
|
||||||
async def async_iter_append_model_and_provider(
|
async def async_iter_append_model_and_provider(
|
||||||
response: AsyncChatCompletionResponseType
|
response: AsyncChatCompletionResponseType
|
||||||
@ -167,8 +166,7 @@ async def async_iter_append_model_and_provider(
|
|||||||
chunk.provider = last_provider.get("name")
|
chunk.provider = last_provider.get("name")
|
||||||
yield chunk
|
yield chunk
|
||||||
finally:
|
finally:
|
||||||
if hasattr(response, 'aclose'):
|
await safe_aclose(response)
|
||||||
await safe_aclose(response)
|
|
||||||
|
|
||||||
class Client(BaseClient):
|
class Client(BaseClient):
|
||||||
def __init__(
|
def __init__(
|
||||||
@ -292,7 +290,7 @@ class Images:
|
|||||||
proxy = self.client.proxy
|
proxy = self.client.proxy
|
||||||
|
|
||||||
response = None
|
response = None
|
||||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
if hasattr(provider_handler, "create_async_generator"):
|
||||||
messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
|
messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
|
||||||
async for item in provider_handler.create_async_generator(model, messages, prompt=prompt, **kwargs):
|
async for item in provider_handler.create_async_generator(model, messages, prompt=prompt, **kwargs):
|
||||||
if isinstance(item, ImageResponse):
|
if isinstance(item, ImageResponse):
|
||||||
@ -354,7 +352,7 @@ class Images:
|
|||||||
if proxy is None:
|
if proxy is None:
|
||||||
proxy = self.client.proxy
|
proxy = self.client.proxy
|
||||||
|
|
||||||
if isinstance(provider, type) and issubclass(provider, AsyncGeneratorProvider):
|
if hasattr(provider, "create_async_generator"):
|
||||||
messages = [{"role": "user", "content": "create a variation of this image"}]
|
messages = [{"role": "user", "content": "create a variation of this image"}]
|
||||||
generator = None
|
generator = None
|
||||||
try:
|
try:
|
||||||
@ -364,8 +362,7 @@ class Images:
|
|||||||
response = chunk
|
response = chunk
|
||||||
break
|
break
|
||||||
finally:
|
finally:
|
||||||
if generator and hasattr(generator, 'aclose'):
|
await safe_aclose(generator)
|
||||||
await safe_aclose(generator)
|
|
||||||
elif hasattr(provider, 'create_variation'):
|
elif hasattr(provider, 'create_variation'):
|
||||||
if asyncio.iscoroutinefunction(provider.create_variation):
|
if asyncio.iscoroutinefunction(provider.create_variation):
|
||||||
response = await provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
|
response = await provider.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
|
||||||
@ -454,7 +451,11 @@ class AsyncCompletions:
|
|||||||
)
|
)
|
||||||
stop = [stop] if isinstance(stop, str) else stop
|
stop = [stop] if isinstance(stop, str) else stop
|
||||||
|
|
||||||
response = provider.create_completion(
|
if hasattr(provider, "create_async_generator"):
|
||||||
|
create_handler = provider.create_async_generator
|
||||||
|
else:
|
||||||
|
create_handler = provider.create_completion
|
||||||
|
response = create_handler(
|
||||||
model,
|
model,
|
||||||
messages,
|
messages,
|
||||||
stream=stream,
|
stream=stream,
|
||||||
|
@ -46,7 +46,8 @@ def filter_none(**kwargs) -> dict:
|
|||||||
|
|
||||||
async def safe_aclose(generator: AsyncGenerator) -> None:
|
async def safe_aclose(generator: AsyncGenerator) -> None:
|
||||||
try:
|
try:
|
||||||
await generator.aclose()
|
if generator and hasattr(generator, 'aclose'):
|
||||||
|
await generator.aclose()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logging.warning(f"Error while closing generator: {e}")
|
logging.warning(f"Error while closing generator: {e}")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user