assistant: Add display_name for OpenAI and Gemini (#17508)

This commit is contained in:
Peter Tripp 2024-09-10 13:41:06 -04:00 committed by GitHub
parent 85f4c96fef
commit fb9d01b0d5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 34 additions and 16 deletions

View File

@ -160,10 +160,12 @@ impl AssistantSettingsContent {
.filter_map(|model| match model {
OpenAiModel::Custom {
name,
display_name,
max_tokens,
max_output_tokens,
} => Some(open_ai::AvailableModel {
name,
display_name,
max_tokens,
max_output_tokens,
}),

View File

@ -304,7 +304,12 @@ pub enum Model {
#[serde(rename = "gemini-1.5-flash")]
Gemini15Flash,
#[serde(rename = "custom")]
Custom { name: String, max_tokens: usize },
Custom {
name: String,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>,
max_tokens: usize,
},
}
impl Model {
@ -320,7 +325,9 @@ impl Model {
match self {
Model::Gemini15Pro => "Gemini 1.5 Pro",
Model::Gemini15Flash => "Gemini 1.5 Flash",
Model::Custom { name, .. } => name,
Self::Custom {
name, display_name, ..
} => display_name.as_ref().unwrap_or(name),
}
}

View File

@ -254,11 +254,13 @@ impl LanguageModelProvider for CloudLanguageModelProvider {
}),
AvailableProvider::OpenAi => CloudModel::OpenAi(open_ai::Model::Custom {
name: model.name.clone(),
display_name: model.display_name.clone(),
max_tokens: model.max_tokens,
max_output_tokens: model.max_output_tokens,
}),
AvailableProvider::Google => CloudModel::Google(google_ai::Model::Custom {
name: model.name.clone(),
display_name: model.display_name.clone(),
max_tokens: model.max_tokens,
}),
};

View File

@ -37,6 +37,7 @@ pub struct GoogleSettings {
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
pub struct AvailableModel {
name: String,
display_name: Option<String>,
max_tokens: usize,
}
@ -170,6 +171,7 @@ impl LanguageModelProvider for GoogleLanguageModelProvider {
model.name.clone(),
google_ai::Model::Custom {
name: model.name.clone(),
display_name: model.display_name.clone(),
max_tokens: model.max_tokens,
},
);

View File

@ -40,6 +40,7 @@ pub struct OpenAiSettings {
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
pub struct AvailableModel {
pub name: String,
pub display_name: Option<String>,
pub max_tokens: usize,
pub max_output_tokens: Option<u32>,
}
@ -171,6 +172,7 @@ impl LanguageModelProvider for OpenAiLanguageModelProvider {
model.name.clone(),
open_ai::Model::Custom {
name: model.name.clone(),
display_name: model.display_name.clone(),
max_tokens: model.max_tokens,
max_output_tokens: model.max_output_tokens,
},
@ -368,11 +370,7 @@ pub fn count_open_ai_tokens(
})
.collect::<Vec<_>>();
if let open_ai::Model::Custom { .. } = model {
tiktoken_rs::num_tokens_from_messages("gpt-4", &messages)
} else {
tiktoken_rs::num_tokens_from_messages(model.id(), &messages)
}
tiktoken_rs::num_tokens_from_messages(model.id(), &messages)
})
.boxed()
}

View File

@ -175,12 +175,14 @@ impl OpenAiSettingsContent {
.filter_map(|model| match model {
open_ai::Model::Custom {
name,
display_name,
max_tokens,
max_output_tokens,
} => Some(provider::open_ai::AvailableModel {
name,
max_tokens,
max_output_tokens,
display_name,
}),
_ => None,
})

View File

@ -68,6 +68,8 @@ pub enum Model {
#[serde(rename = "custom")]
Custom {
name: String,
/// The name displayed in the UI, such as in the assistant panel model dropdown menu.
display_name: Option<String>,
max_tokens: usize,
max_output_tokens: Option<u32>,
},
@ -103,7 +105,9 @@ impl Model {
Self::FourTurbo => "gpt-4-turbo",
Self::FourOmni => "gpt-4o",
Self::FourOmniMini => "gpt-4o-mini",
Self::Custom { name, .. } => name,
Self::Custom {
name, display_name, ..
} => display_name.as_ref().unwrap_or(name),
}
}

View File

@ -77,7 +77,7 @@ You can use Gemini 1.5 Pro/Flash with the Zed assistant by choosing it via the m
1. Go the Google AI Studio site and [create an API key](https://aistudio.google.com/app/apikey).
2. Open the configuration view (`assistant: show configuration`) and navigate to the Google AI section
3. Enter your Google AI API key
3. Enter your Google AI API key and press enter.
The Google AI API key will be saved in your keychain.
@ -85,7 +85,7 @@ Zed will also use the `GOOGLE_AI_API_KEY` environment variable if it's defined.
#### Google AI custom models {#google-ai-custom-models}
You can add custom models to the Google AI provider by adding the following to your Zed `settings.json`:
By default Zed will use `stable` versions of models, but you can use specific versions of models, including [experimental models](https://ai.google.dev/gemini-api/docs/models/experimental-models) with the Google AI provider by adding the following to your Zed `settings.json`:
```json
{
@ -93,8 +93,9 @@ You can add custom models to the Google AI provider by adding the following to y
"google": {
"available_models": [
{
"name": "custom-model",
"max_tokens": 128000
"name": "gemini-1.5-flash-latest",
"display_name": "Gemini 1.5 Flash (Latest)",
"max_tokens": 1000000
}
]
}
@ -164,16 +165,16 @@ Zed will also use the `OPENAI_API_KEY` environment variable if it's defined.
#### OpenAI Custom Models {#openai-custom-models}
You can add custom models to the OpenAI provider, by adding the following to your Zed `settings.json`:
The Zed Assistant comes pre-configured to use the latest version for common models (GPT-3.5 Turbo, GPT-4, GPT-4 Turbo, GPT-4o, GPT-4o mini). If you wish to use alternate models, perhaps a preview release or a dated model release, you can do so by adding the following to your Zed `settings.json`:
```json
{
"language_models": {
"openai": {
"version": "1",
"available_models": [
{
"name": "custom-model",
"provider": "openai",
"name": "gpt-4o-2024-08-06",
"max_tokens": 128000
}
]
@ -182,7 +183,7 @@ You can add custom models to the OpenAI provider, by adding the following to you
}
```
Custom models will be listed in the model dropdown in the assistant panel.
You must provide the model's Context Window in the `max_tokens` parameter, this can be found [OpenAI Model Docs](https://platform.openai.com/docs/models). Custom models will be listed in the model dropdown in the assistant panel.
### Advanced configuration {#advanced-configuration}