Allow an initial prompt to be associated with inline assist (#14816)

Release Notes:

- Added the ability to create custom inline assist bindings that
pre-fill a prompt from your keymap, e.g.:
    ```json
    [
        {
            "context": "Editor && mode == full",
            "bindings": {
                "ctrl-shift-enter": [
                    "assistant::InlineAssist",
                    { "prompt": "Build a snake game" }
                ]
            }
        }
    ]
    ```

---------

Co-authored-by: Nathan <nathan@zed.dev>
This commit is contained in:
Antonio Scandurra 2024-07-19 17:13:48 +02:00 committed by GitHub
parent d61eaea4b9
commit 4c7f1032a4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 52 additions and 54 deletions

View File

@ -20,7 +20,7 @@ pub use completion_provider::*;
pub use context::*;
pub use context_store::*;
use fs::Fs;
use gpui::{actions, AppContext, Global, SharedString, UpdateGlobal};
use gpui::{actions, impl_actions, AppContext, Global, SharedString, UpdateGlobal};
use indexed_docs::IndexedDocsRegistry;
pub(crate) use inline_assistant::*;
pub(crate) use model_selector::*;
@ -48,7 +48,6 @@ actions!(
InsertIntoEditor,
ToggleFocus,
ResetKey,
InlineAssist,
InsertActivePrompt,
DeployHistory,
DeployPromptLibrary,
@ -58,6 +57,13 @@ actions!(
]
);
#[derive(Clone, Default, Deserialize, PartialEq)]
pub struct InlineAssist {
prompt: Option<String>,
}
impl_actions!(assistant, [InlineAssist]);
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct MessageId(clock::Lamport);

View File

@ -454,7 +454,7 @@ impl AssistantPanel {
pub fn inline_assist(
workspace: &mut Workspace,
_: &InlineAssist,
action: &InlineAssist,
cx: &mut ViewContext<Workspace>,
) {
let settings = AssistantSettings::get_global(cx);
@ -472,6 +472,7 @@ impl AssistantPanel {
return;
};
let initial_prompt = action.prompt.clone();
if assistant_panel.update(cx, |assistant, cx| assistant.is_authenticated(cx)) {
match inline_assist_target {
InlineAssistTarget::Editor(active_editor, include_context) => {
@ -480,6 +481,7 @@ impl AssistantPanel {
&active_editor,
Some(cx.view().downgrade()),
include_context.then_some(&assistant_panel),
initial_prompt,
cx,
)
})
@ -490,6 +492,7 @@ impl AssistantPanel {
&active_terminal,
Some(cx.view().downgrade()),
Some(&assistant_panel),
initial_prompt,
cx,
)
})
@ -514,6 +517,7 @@ impl AssistantPanel {
&active_editor,
Some(workspace),
assistant_panel.as_ref(),
initial_prompt,
cx,
)
})
@ -524,6 +528,7 @@ impl AssistantPanel {
&active_terminal,
Some(workspace),
assistant_panel.upgrade().as_ref(),
initial_prompt,
cx,
)
})

View File

@ -87,6 +87,7 @@ impl InlineAssistant {
editor: &View<Editor>,
workspace: Option<WeakView<Workspace>>,
assistant_panel: Option<&View<AssistantPanel>>,
initial_prompt: Option<String>,
cx: &mut WindowContext,
) {
let snapshot = editor.read(cx).buffer().read(cx).snapshot(cx);
@ -138,7 +139,8 @@ impl InlineAssistant {
}
let assist_group_id = self.next_assist_group_id.post_inc();
let prompt_buffer = cx.new_model(|cx| Buffer::local("", cx));
let prompt_buffer =
cx.new_model(|cx| Buffer::local(initial_prompt.unwrap_or_default(), cx));
let prompt_buffer = cx.new_model(|cx| MultiBuffer::singleton(prompt_buffer, cx));
let mut assists = Vec::new();

View File

@ -628,7 +628,7 @@ impl PromptLibrary {
self.picker.update(cx, |picker, cx| picker.focus(cx));
}
pub fn inline_assist(&mut self, _: &InlineAssist, cx: &mut ViewContext<Self>) {
pub fn inline_assist(&mut self, action: &InlineAssist, cx: &mut ViewContext<Self>) {
let Some(active_prompt_id) = self.active_prompt_id else {
cx.propagate();
return;
@ -636,9 +636,10 @@ impl PromptLibrary {
let prompt_editor = &self.prompt_editors[&active_prompt_id].body_editor;
let provider = CompletionProvider::global(cx);
let initial_prompt = action.prompt.clone();
if provider.is_authenticated() {
InlineAssistant::update_global(cx, |assistant, cx| {
assistant.assist(&prompt_editor, None, None, cx)
assistant.assist(&prompt_editor, None, None, initial_prompt, cx)
})
} else {
for window in cx.windows() {

View File

@ -73,11 +73,13 @@ impl TerminalInlineAssistant {
terminal_view: &View<TerminalView>,
workspace: Option<WeakView<Workspace>>,
assistant_panel: Option<&View<AssistantPanel>>,
initial_prompt: Option<String>,
cx: &mut WindowContext,
) {
let terminal = terminal_view.read(cx).terminal().clone();
let assist_id = self.next_assist_id.post_inc();
let prompt_buffer = cx.new_model(|cx| Buffer::local("", cx));
let prompt_buffer =
cx.new_model(|cx| Buffer::local(initial_prompt.unwrap_or_default(), cx));
let prompt_buffer = cx.new_model(|cx| MultiBuffer::singleton(prompt_buffer, cx));
let codegen = cx.new_model(|_| Codegen::new(terminal, self.telemetry.clone()));

View File

@ -142,14 +142,14 @@ impl Render for QuickActionBar {
"toggle inline assistant",
IconName::MagicWand,
false,
Box::new(InlineAssist),
Box::new(InlineAssist::default()),
"Inline Assist",
{
let workspace = self.workspace.clone();
move |_, cx| {
if let Some(workspace) = workspace.upgrade() {
workspace.update(cx, |workspace, cx| {
AssistantPanel::inline_assist(workspace, &InlineAssist, cx);
AssistantPanel::inline_assist(workspace, &InlineAssist::default(), cx);
});
}
}

View File

@ -18,7 +18,7 @@
# Using Zed
- [Multibuffers](./multibuffers.md)
- [Assistant Panel](./assistant-panel.md)
- [Language model integration](./language-model-integration.md)
- [Channels](./channels.md)
- [Collaboration](./collaboration.md)
- [Git](./git.md)

View File

@ -1,10 +1,12 @@
# Assistant Panel
# Language model integration
## Assistant Panel
The assistant panel provides you with a way to interact with OpenAI's large language models. The assistant is good for various tasks, such as generating code, asking questions about existing code, and even writing plaintext, such as emails and documentation. To open the assistant panel, toggle the right dock by using the `workspace: toggle right dock` action in the command palette (`cmd-shift-p`).
> **Note**: A default binding can be set to toggle the right dock via the settings.
## Setup
### Setup
1. Create an [OpenAI API key](https://platform.openai.com/account/api-keys)
2. Make sure that your OpenAI account has credits
@ -20,7 +22,7 @@ The OpenAI API key will be saved in your keychain.
Zed will also use the `OPENAI_API_KEY` environment variable if it's defined. If you need to reset your OpenAI API key, focus on the assistant panel and run the command palette action `assistant: reset key`.
## Having a conversation
### Having a conversation
The assistant editor in Zed functions similarly to any other editor. You can use custom key bindings and work with multiple cursors, allowing for seamless transitions between coding and engaging in discussions with the language models. However, the assistant editor differs with the inclusion of message blocks. These blocks serve as containers for text that correspond to different roles within the conversation. These roles include:
@ -50,7 +52,7 @@ If you want to start a new conversation at any time, you can hit `cmd-n` or use
Simple back-and-forth conversations work well with the assistant. However, there may come a time when you want to modify the previous text in the conversation and steer it in a different direction.
## Editing a conversation
### Editing a conversation
The assistant gives you the flexibility to have control over the conversation. You can freely edit any previous text, including the responses from the assistant. If you want to remove a message block entirely, simply place your cursor at the beginning of the block and use the `delete` key. A typical workflow might involve making edits and adjustments throughout the conversation to refine your inquiry or provide additional context. Here's an example:
@ -69,17 +71,13 @@ Some additional points to keep in mind:
- You are free to change the model type at any point in the conversation.
- You can cycle the role of a message block by clicking on the role, which is useful when you receive a response in an `Assistant` block that you want to edit and send back up as a `You` block.
## Saving and loading conversations
### Saving and loading conversations
After you submit your first message, a name for your conversation is generated by the language model, and the conversation is automatically saved to your file system in `~/.config/zed/conversations`. You can access and load previous messages by clicking on the hamburger button in the top-left corner of the assistant panel.
![Viewing assistant history](https://zed.dev/img/assistant/assistant-history.png)
## Multiple cursor demo
The assistant is capable of sending multiple requests, and receiving multiple responses, in parallel. [Here's a demo](https://zed.dev/img/post/assistant/demo.webm).
## Using a custom API endpoint for OpenAI
### Using a custom API endpoint for OpenAI
You can use a custom API endpoint for OpenAI, as long as it's compatible with the OpenAI API structure.
@ -101,7 +99,7 @@ To do so, add the following to your Zed `settings.json`:
The custom URL here is `http://localhost:11434/v1`.
## Using Ollama on macOS
### Using Ollama on macOS
You can use Ollama with the Zed assistant by making Ollama appear as an OpenAPI endpoint.
@ -134,7 +132,7 @@ You can use Ollama with the Zed assistant by making Ollama appear as an OpenAPI
```
5. Restart Zed
## Using Claude 3.5 Sonnet
### Using Claude 3.5 Sonnet
You can use Claude with the Zed assistant by adding the following settings:
@ -153,38 +151,22 @@ You need can obtain this key [here](https://console.anthropic.com/settings/keys)
Even if you pay for Claude Pro, you will still have to [pay for additional credits](https://console.anthropic.com/settings/plans) to use it via the API.
## Prompt Library
## Inline generation
**Warning: This feature is experimental and the format of prompts is _highly_ likely to change. Use at your own risk!**
You can generate and transform text in any editor by selecting text and pressing `ctrl-enter`.
You can also perform multiple generation requests in parallel by pressing `ctrl-enter` with multiple cursors, or by pressing `ctrl-enter` with a selection that spans multiple excerpts in a multibuffer.
To create a custom keybinding that prefills a prompt, you can add the following format in your keymap:
Zed has a prompt library that allows you to manage prompts.
These are useful for:
- Creating a "default prompt" - a super prompt that includes a collection of things you want the assistant to know in every conversation.
- Adding single prompts to your current context to help guide the assistant's responses.
- (In the future) dynamically adding certain prompts to the assistant based on the current context, such as the presence of Rust code or a specific async runtime you want to work with.
You can access the prompt manager by selecting `Prompt Library...` from the assistant panel's more menu.
By default when opening the assistant, the prompt manager will load any custom prompts present in your `~/.config/zed/prompts` directory.
Checked prompts are included in your "default prompt", which can be inserted into the assistant by running `assistant: insert default prompt` or clicking the `Insert Default Prompt` button in the assistant panel's more menu.
### Creating a custom prompt
Prompts have a simple format:
```md
---
title: Foo
version: 1.0
author: Jane Kim <jane@kim.com
languages: ["*"]
dependencies: []
---
Foo and bar are terms used in programming to describe generic concepts.
```json
[
{
"context": "Editor && mode == full",
"bindings": {
"ctrl-shift-enter": [
"assistant::InlineAssist",
{ "prompt": "Build a snake game" }
]
}
}
]
```
In the future we'll allow creating and editing prompts directly in the prompt manager.