zed/crates/open_ai/Cargo.toml
Marshall Bowers 0d26beb91b
Add configurable low-speed timeout for OpenAI provider (#11668)
This PR adds a setting to allow configuring the low-speed timeout for
the Assistant when using the OpenAI provider.

The `low_speed_timeout_in_seconds` accepts a number of seconds that the
HTTP client can go below a minimum speed limit (currently set to 100
bytes/second) before it times out.

```json
{
  "assistant": {
    "version": "1",
    "provider": { "name": "openai", "low_speed_timeout_in_seconds": 60 }
  },
}
```

This should help the case where the `openai` provider is being used with
a local model that requires higher timeouts.

Issue: https://github.com/zed-industries/zed/issues/9913

Release Notes:

- Added a `low_speed_timeout_in_seconds` setting to the Assistant's
OpenAI provider
([#9913](https://github.com/zed-industries/zed/issues/9913)).
2024-05-10 13:19:21 -04:00

23 lines
401 B
TOML

[package]
name = "open_ai"
version = "0.1.0"
edition = "2021"
publish = false
license = "GPL-3.0-or-later"
[lib]
path = "src/open_ai.rs"
[features]
default = []
schemars = ["dep:schemars"]
[dependencies]
anyhow.workspace = true
futures.workspace = true
isahc.workspace = true
schemars = { workspace = true, optional = true }
serde.workspace = true
serde_json.workspace = true
util.workspace = true