diff --git a/.github/workflows/release_actions.yml b/.github/workflows/release_actions.yml
index c1df24a8e5..550eda882b 100644
--- a/.github/workflows/release_actions.yml
+++ b/.github/workflows/release_actions.yml
@@ -20,9 +20,7 @@ jobs:
id: get-content
with:
stringToTruncate: |
- 📣 Zed ${{ github.event.release.tag_name }} was just released!
-
- Restart your Zed or head to ${{ steps.get-release-url.outputs.URL }} to grab it.
+ 📣 Zed [${{ github.event.release.tag_name }}](${{ steps.get-release-url.outputs.URL }}) was just released!
${{ github.event.release.body }}
maxLength: 2000
diff --git a/Cargo.lock b/Cargo.lock
index df9574be93..450d435ac2 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -91,6 +91,7 @@ dependencies = [
"futures 0.3.28",
"gpui",
"isahc",
+ "language",
"lazy_static",
"log",
"matrixmultiply",
@@ -103,7 +104,7 @@ dependencies = [
"rusqlite",
"serde",
"serde_json",
- "tiktoken-rs 0.5.4",
+ "tiktoken-rs",
"util",
]
@@ -309,6 +310,7 @@ dependencies = [
"language",
"log",
"menu",
+ "multi_buffer",
"ordered-float 2.10.0",
"parking_lot 0.11.2",
"project",
@@ -316,12 +318,13 @@ dependencies = [
"regex",
"schemars",
"search",
+ "semantic_index",
"serde",
"serde_json",
"settings",
"smol",
"theme",
- "tiktoken-rs 0.4.5",
+ "tiktoken-rs",
"util",
"uuid 1.4.1",
"workspace",
@@ -1573,7 +1576,7 @@ dependencies = [
[[package]]
name = "collab"
-version = "0.24.0"
+version = "0.27.0"
dependencies = [
"anyhow",
"async-trait",
@@ -1609,6 +1612,7 @@ dependencies = [
"lsp",
"nanoid",
"node_runtime",
+ "notifications",
"parking_lot 0.11.2",
"pretty_assertions",
"project",
@@ -1664,20 +1668,26 @@ dependencies = [
"fuzzy",
"gpui",
"language",
+ "lazy_static",
"log",
"menu",
+ "notifications",
"picker",
"postage",
+ "pretty_assertions",
"project",
"recent_projects",
"rich_text",
+ "rpc",
"schemars",
"serde",
"serde_derive",
"settings",
+ "smallvec",
"theme",
"theme_selector",
"time",
+ "tree-sitter-markdown",
"util",
"vcs_menu",
"workspace",
@@ -1731,6 +1741,7 @@ dependencies = [
"theme",
"util",
"workspace",
+ "zed-actions",
]
[[package]]
@@ -1810,6 +1821,7 @@ dependencies = [
"log",
"lsp",
"node_runtime",
+ "parking_lot 0.11.2",
"rpc",
"serde",
"serde_derive",
@@ -2556,11 +2568,11 @@ dependencies = [
"lazy_static",
"log",
"lsp",
+ "multi_buffer",
"ordered-float 2.10.0",
"parking_lot 0.11.2",
"postage",
"project",
- "pulldown-cmark",
"rand 0.8.5",
"rich_text",
"rpc",
@@ -4244,6 +4256,7 @@ dependencies = [
"lsp",
"parking_lot 0.11.2",
"postage",
+ "pulldown-cmark",
"rand 0.8.5",
"regex",
"rpc",
@@ -4921,6 +4934,55 @@ version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7843ec2de400bcbc6a6328c958dc38e5359da6e93e72e37bc5246bf1ae776389"
+[[package]]
+name = "multi_buffer"
+version = "0.1.0"
+dependencies = [
+ "aho-corasick",
+ "anyhow",
+ "client",
+ "clock",
+ "collections",
+ "context_menu",
+ "convert_case 0.6.0",
+ "copilot",
+ "ctor",
+ "env_logger 0.9.3",
+ "futures 0.3.28",
+ "git",
+ "gpui",
+ "indoc",
+ "itertools 0.10.5",
+ "language",
+ "lazy_static",
+ "log",
+ "lsp",
+ "ordered-float 2.10.0",
+ "parking_lot 0.11.2",
+ "postage",
+ "project",
+ "pulldown-cmark",
+ "rand 0.8.5",
+ "rich_text",
+ "schemars",
+ "serde",
+ "serde_derive",
+ "settings",
+ "smallvec",
+ "smol",
+ "snippet",
+ "sum_tree",
+ "text",
+ "theme",
+ "tree-sitter",
+ "tree-sitter-html",
+ "tree-sitter-rust",
+ "tree-sitter-typescript",
+ "unindent",
+ "util",
+ "workspace",
+]
+
[[package]]
name = "multimap"
version = "0.8.3"
@@ -5070,6 +5132,26 @@ dependencies = [
"minimal-lexical",
]
+[[package]]
+name = "notifications"
+version = "0.1.0"
+dependencies = [
+ "anyhow",
+ "channel",
+ "client",
+ "clock",
+ "collections",
+ "db",
+ "feature_flags",
+ "gpui",
+ "rpc",
+ "settings",
+ "sum_tree",
+ "text",
+ "time",
+ "util",
+]
+
[[package]]
name = "ntapi"
version = "0.3.7"
@@ -5886,6 +5968,7 @@ dependencies = [
"log",
"lsp",
"node_runtime",
+ "parking_lot 0.11.2",
"serde",
"serde_derive",
"serde_json",
@@ -6831,8 +6914,10 @@ dependencies = [
"rsa 0.4.0",
"serde",
"serde_derive",
+ "serde_json",
"smol",
"smol-timeout",
+ "strum",
"tempdir",
"tracing",
"util",
@@ -7407,7 +7492,7 @@ dependencies = [
"smol",
"tempdir",
"theme",
- "tiktoken-rs 0.5.4",
+ "tiktoken-rs",
"tree-sitter",
"tree-sitter-cpp",
"tree-sitter-elixir",
@@ -7421,7 +7506,6 @@ dependencies = [
"unindent",
"util",
"workspace",
- "zed",
]
[[package]]
@@ -8713,21 +8797,6 @@ dependencies = [
"weezl",
]
-[[package]]
-name = "tiktoken-rs"
-version = "0.4.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "52aacc1cff93ba9d5f198c62c49c77fa0355025c729eed3326beaf7f33bc8614"
-dependencies = [
- "anyhow",
- "base64 0.21.4",
- "bstr",
- "fancy-regex",
- "lazy_static",
- "parking_lot 0.12.1",
- "rustc-hash",
-]
-
[[package]]
name = "tiktoken-rs"
version = "0.5.4"
@@ -9148,8 +9217,8 @@ dependencies = [
[[package]]
name = "tree-sitter-bash"
-version = "0.19.0"
-source = "git+https://github.com/tree-sitter/tree-sitter-bash?rev=1b0321ee85701d5036c334a6f04761cdc672e64c#1b0321ee85701d5036c334a6f04761cdc672e64c"
+version = "0.20.4"
+source = "git+https://github.com/tree-sitter/tree-sitter-bash?rev=7331995b19b8f8aba2d5e26deb51d2195c18bc94#7331995b19b8f8aba2d5e26deb51d2195c18bc94"
dependencies = [
"cc",
"tree-sitter",
@@ -9388,6 +9457,15 @@ dependencies = [
"tree-sitter",
]
+[[package]]
+name = "tree-sitter-vue"
+version = "0.0.1"
+source = "git+https://github.com/zed-industries/tree-sitter-vue?rev=95b2890#95b28908d90e928c308866f7631e73ef6e1d4b5f"
+dependencies = [
+ "cc",
+ "tree-sitter",
+]
+
[[package]]
name = "tree-sitter-yaml"
version = "0.0.1"
@@ -9712,6 +9790,7 @@ name = "vcs_menu"
version = "0.1.0"
dependencies = [
"anyhow",
+ "fs",
"fuzzy",
"gpui",
"picker",
@@ -10656,9 +10735,10 @@ dependencies = [
[[package]]
name = "zed"
-version = "0.109.0"
+version = "0.111.0"
dependencies = [
"activity_indicator",
+ "ai",
"anyhow",
"assistant",
"async-compression",
@@ -10710,6 +10790,7 @@ dependencies = [
"log",
"lsp",
"node_runtime",
+ "notifications",
"num_cpus",
"outline",
"parking_lot 0.11.2",
@@ -10771,6 +10852,7 @@ dependencies = [
"tree-sitter-svelte",
"tree-sitter-toml",
"tree-sitter-typescript",
+ "tree-sitter-vue",
"tree-sitter-yaml",
"unindent",
"url",
@@ -10788,6 +10870,7 @@ name = "zed-actions"
version = "0.1.0"
dependencies = [
"gpui",
+ "serde",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 82af9265dd..7db8e1073d 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -58,7 +58,9 @@ members = [
"crates/lsp2",
"crates/media",
"crates/menu",
+ "crates/multi_buffer",
"crates/node_runtime",
+ "crates/notifications",
"crates/outline",
"crates/picker",
"crates/plugin",
@@ -133,6 +135,7 @@ serde_derive = { version = "1.0", features = ["deserialize_in_place"] }
serde_json = { version = "1.0", features = ["preserve_order", "raw_value"] }
smallvec = { version = "1.6", features = ["union"] }
smol = { version = "1.2" }
+strum = { version = "0.25.0", features = ["derive"] }
sysinfo = "0.29.10"
tempdir = { version = "0.3.7" }
thiserror = { version = "1.0.29" }
@@ -144,7 +147,7 @@ pretty_assertions = "1.3.0"
git2 = { version = "0.15", default-features = false}
uuid = { version = "1.1.2", features = ["v4"] }
-tree-sitter-bash = { git = "https://github.com/tree-sitter/tree-sitter-bash", rev = "1b0321ee85701d5036c334a6f04761cdc672e64c" }
+tree-sitter-bash = { git = "https://github.com/tree-sitter/tree-sitter-bash", rev = "7331995b19b8f8aba2d5e26deb51d2195c18bc94" }
tree-sitter-c = "0.20.1"
tree-sitter-cpp = { git = "https://github.com/tree-sitter/tree-sitter-cpp", rev="f44509141e7e483323d2ec178f2d2e6c0fc041c1" }
tree-sitter-css = { git = "https://github.com/tree-sitter/tree-sitter-css", rev = "769203d0f9abe1a9a691ac2b9fe4bb4397a73c51" }
@@ -170,7 +173,7 @@ tree-sitter-yaml = { git = "https://github.com/zed-industries/tree-sitter-yaml",
tree-sitter-lua = "0.0.14"
tree-sitter-nix = { git = "https://github.com/nix-community/tree-sitter-nix", rev = "66e3e9ce9180ae08fc57372061006ef83f0abde7" }
tree-sitter-nu = { git = "https://github.com/nushell/tree-sitter-nu", rev = "786689b0562b9799ce53e824cb45a1a2a04dc673"}
-
+tree-sitter-vue = {git = "https://github.com/zed-industries/tree-sitter-vue", rev = "95b2890"}
[patch.crates-io]
tree-sitter = { git = "https://github.com/tree-sitter/tree-sitter", rev = "35a6052fbcafc5e5fc0f9415b8652be7dcaf7222" }
async-task = { git = "https://github.com/zed-industries/async-task", rev = "341b57d6de98cdfd7b418567b8de2022ca993a6e" }
diff --git a/Procfile b/Procfile
index 2eb7de20fb..3f42c3a967 100644
--- a/Procfile
+++ b/Procfile
@@ -1,4 +1,4 @@
web: cd ../zed.dev && PORT=3000 npm run dev
-collab: cd crates/collab && RUST_LOG=${RUST_LOG:-collab=info} cargo run serve
+collab: cd crates/collab && RUST_LOG=${RUST_LOG:-warn,collab=info} cargo run serve
livekit: livekit-server --dev
postgrest: postgrest crates/collab/admin_api.conf
diff --git a/assets/icons/bell.svg b/assets/icons/bell.svg
new file mode 100644
index 0000000000..ea1c6dd42e
--- /dev/null
+++ b/assets/icons/bell.svg
@@ -0,0 +1,8 @@
+
diff --git a/assets/icons/link.svg b/assets/icons/link.svg
new file mode 100644
index 0000000000..4925bd8e00
--- /dev/null
+++ b/assets/icons/link.svg
@@ -0,0 +1,3 @@
+
diff --git a/assets/icons/public.svg b/assets/icons/public.svg
new file mode 100644
index 0000000000..38278cdaba
--- /dev/null
+++ b/assets/icons/public.svg
@@ -0,0 +1,3 @@
+
diff --git a/assets/icons/update.svg b/assets/icons/update.svg
new file mode 100644
index 0000000000..b529b2b08b
--- /dev/null
+++ b/assets/icons/update.svg
@@ -0,0 +1,8 @@
+
diff --git a/assets/keymaps/default.json b/assets/keymaps/default.json
index 8422d53abc..ef6a655bdc 100644
--- a/assets/keymaps/default.json
+++ b/assets/keymaps/default.json
@@ -370,42 +370,15 @@
{
"context": "Pane",
"bindings": {
- "ctrl-1": [
- "pane::ActivateItem",
- 0
- ],
- "ctrl-2": [
- "pane::ActivateItem",
- 1
- ],
- "ctrl-3": [
- "pane::ActivateItem",
- 2
- ],
- "ctrl-4": [
- "pane::ActivateItem",
- 3
- ],
- "ctrl-5": [
- "pane::ActivateItem",
- 4
- ],
- "ctrl-6": [
- "pane::ActivateItem",
- 5
- ],
- "ctrl-7": [
- "pane::ActivateItem",
- 6
- ],
- "ctrl-8": [
- "pane::ActivateItem",
- 7
- ],
- "ctrl-9": [
- "pane::ActivateItem",
- 8
- ],
+ "ctrl-1": ["pane::ActivateItem", 0],
+ "ctrl-2": ["pane::ActivateItem", 1],
+ "ctrl-3": ["pane::ActivateItem", 2],
+ "ctrl-4": ["pane::ActivateItem", 3],
+ "ctrl-5": ["pane::ActivateItem", 4],
+ "ctrl-6": ["pane::ActivateItem", 5],
+ "ctrl-7": ["pane::ActivateItem", 6],
+ "ctrl-8": ["pane::ActivateItem", 7],
+ "ctrl-9": ["pane::ActivateItem", 8],
"ctrl-0": "pane::ActivateLastItem",
"ctrl--": "pane::GoBack",
"ctrl-_": "pane::GoForward",
@@ -416,42 +389,15 @@
{
"context": "Workspace",
"bindings": {
- "cmd-1": [
- "workspace::ActivatePane",
- 0
- ],
- "cmd-2": [
- "workspace::ActivatePane",
- 1
- ],
- "cmd-3": [
- "workspace::ActivatePane",
- 2
- ],
- "cmd-4": [
- "workspace::ActivatePane",
- 3
- ],
- "cmd-5": [
- "workspace::ActivatePane",
- 4
- ],
- "cmd-6": [
- "workspace::ActivatePane",
- 5
- ],
- "cmd-7": [
- "workspace::ActivatePane",
- 6
- ],
- "cmd-8": [
- "workspace::ActivatePane",
- 7
- ],
- "cmd-9": [
- "workspace::ActivatePane",
- 8
- ],
+ "cmd-1": ["workspace::ActivatePane", 0],
+ "cmd-2": ["workspace::ActivatePane", 1],
+ "cmd-3": ["workspace::ActivatePane", 2],
+ "cmd-4": ["workspace::ActivatePane", 3],
+ "cmd-5": ["workspace::ActivatePane", 4],
+ "cmd-6": ["workspace::ActivatePane", 5],
+ "cmd-7": ["workspace::ActivatePane", 6],
+ "cmd-8": ["workspace::ActivatePane", 7],
+ "cmd-9": ["workspace::ActivatePane", 8],
"cmd-b": "workspace::ToggleLeftDock",
"cmd-r": "workspace::ToggleRightDock",
"cmd-j": "workspace::ToggleBottomDock",
@@ -494,38 +440,14 @@
},
{
"bindings": {
- "cmd-k cmd-left": [
- "workspace::ActivatePaneInDirection",
- "Left"
- ],
- "cmd-k cmd-right": [
- "workspace::ActivatePaneInDirection",
- "Right"
- ],
- "cmd-k cmd-up": [
- "workspace::ActivatePaneInDirection",
- "Up"
- ],
- "cmd-k cmd-down": [
- "workspace::ActivatePaneInDirection",
- "Down"
- ],
- "cmd-k shift-left": [
- "workspace::SwapPaneInDirection",
- "Left"
- ],
- "cmd-k shift-right": [
- "workspace::SwapPaneInDirection",
- "Right"
- ],
- "cmd-k shift-up": [
- "workspace::SwapPaneInDirection",
- "Up"
- ],
- "cmd-k shift-down": [
- "workspace::SwapPaneInDirection",
- "Down"
- ]
+ "cmd-k cmd-left": ["workspace::ActivatePaneInDirection", "Left"],
+ "cmd-k cmd-right": ["workspace::ActivatePaneInDirection", "Right"],
+ "cmd-k cmd-up": ["workspace::ActivatePaneInDirection", "Up"],
+ "cmd-k cmd-down": ["workspace::ActivatePaneInDirection", "Down"],
+ "cmd-k shift-left": ["workspace::SwapPaneInDirection", "Left"],
+ "cmd-k shift-right": ["workspace::SwapPaneInDirection", "Right"],
+ "cmd-k shift-up": ["workspace::SwapPaneInDirection", "Up"],
+ "cmd-k shift-down": ["workspace::SwapPaneInDirection", "Down"]
}
},
// Bindings from Atom
@@ -627,14 +549,6 @@
"space": "collab_panel::InsertSpace"
}
},
- {
- "context": "(CollabPanel && not_editing) > Editor",
- "bindings": {
- "cmd-c": "collab_panel::StartLinkChannel",
- "cmd-x": "collab_panel::StartMoveChannel",
- "cmd-v": "collab_panel::MoveOrLinkToSelected"
- }
- },
{
"context": "ChannelModal",
"bindings": {
@@ -655,57 +569,21 @@
"cmd-v": "terminal::Paste",
"cmd-k": "terminal::Clear",
// Some nice conveniences
- "cmd-backspace": [
- "terminal::SendText",
- "\u0015"
- ],
- "cmd-right": [
- "terminal::SendText",
- "\u0005"
- ],
- "cmd-left": [
- "terminal::SendText",
- "\u0001"
- ],
+ "cmd-backspace": ["terminal::SendText", "\u0015"],
+ "cmd-right": ["terminal::SendText", "\u0005"],
+ "cmd-left": ["terminal::SendText", "\u0001"],
// Terminal.app compatibility
- "alt-left": [
- "terminal::SendText",
- "\u001bb"
- ],
- "alt-right": [
- "terminal::SendText",
- "\u001bf"
- ],
+ "alt-left": ["terminal::SendText", "\u001bb"],
+ "alt-right": ["terminal::SendText", "\u001bf"],
// There are conflicting bindings for these keys in the global context.
// these bindings override them, remove at your own risk:
- "up": [
- "terminal::SendKeystroke",
- "up"
- ],
- "pageup": [
- "terminal::SendKeystroke",
- "pageup"
- ],
- "down": [
- "terminal::SendKeystroke",
- "down"
- ],
- "pagedown": [
- "terminal::SendKeystroke",
- "pagedown"
- ],
- "escape": [
- "terminal::SendKeystroke",
- "escape"
- ],
- "enter": [
- "terminal::SendKeystroke",
- "enter"
- ],
- "ctrl-c": [
- "terminal::SendKeystroke",
- "ctrl-c"
- ]
+ "up": ["terminal::SendKeystroke", "up"],
+ "pageup": ["terminal::SendKeystroke", "pageup"],
+ "down": ["terminal::SendKeystroke", "down"],
+ "pagedown": ["terminal::SendKeystroke", "pagedown"],
+ "escape": ["terminal::SendKeystroke", "escape"],
+ "enter": ["terminal::SendKeystroke", "enter"],
+ "ctrl-c": ["terminal::SendKeystroke", "ctrl-c"]
}
}
]
diff --git a/assets/keymaps/vim.json b/assets/keymaps/vim.json
index ea025747d8..81235bb72a 100644
--- a/assets/keymaps/vim.json
+++ b/assets/keymaps/vim.json
@@ -39,6 +39,7 @@
"w": "vim::NextWordStart",
"{": "vim::StartOfParagraph",
"}": "vim::EndOfParagraph",
+ "|": "vim::GoToColumn",
"shift-w": [
"vim::NextWordStart",
{
@@ -97,14 +98,8 @@
"ctrl-o": "pane::GoBack",
"ctrl-i": "pane::GoForward",
"ctrl-]": "editor::GoToDefinition",
- "escape": [
- "vim::SwitchMode",
- "Normal"
- ],
- "ctrl+[": [
- "vim::SwitchMode",
- "Normal"
- ],
+ "escape": ["vim::SwitchMode", "Normal"],
+ "ctrl+[": ["vim::SwitchMode", "Normal"],
"v": "vim::ToggleVisual",
"shift-v": "vim::ToggleVisualLine",
"ctrl-v": "vim::ToggleVisualBlock",
@@ -233,123 +228,36 @@
}
],
// Count support
- "1": [
- "vim::Number",
- 1
- ],
- "2": [
- "vim::Number",
- 2
- ],
- "3": [
- "vim::Number",
- 3
- ],
- "4": [
- "vim::Number",
- 4
- ],
- "5": [
- "vim::Number",
- 5
- ],
- "6": [
- "vim::Number",
- 6
- ],
- "7": [
- "vim::Number",
- 7
- ],
- "8": [
- "vim::Number",
- 8
- ],
- "9": [
- "vim::Number",
- 9
- ],
+ "1": ["vim::Number", 1],
+ "2": ["vim::Number", 2],
+ "3": ["vim::Number", 3],
+ "4": ["vim::Number", 4],
+ "5": ["vim::Number", 5],
+ "6": ["vim::Number", 6],
+ "7": ["vim::Number", 7],
+ "8": ["vim::Number", 8],
+ "9": ["vim::Number", 9],
// window related commands (ctrl-w X)
- "ctrl-w left": [
- "workspace::ActivatePaneInDirection",
- "Left"
- ],
- "ctrl-w right": [
- "workspace::ActivatePaneInDirection",
- "Right"
- ],
- "ctrl-w up": [
- "workspace::ActivatePaneInDirection",
- "Up"
- ],
- "ctrl-w down": [
- "workspace::ActivatePaneInDirection",
- "Down"
- ],
- "ctrl-w h": [
- "workspace::ActivatePaneInDirection",
- "Left"
- ],
- "ctrl-w l": [
- "workspace::ActivatePaneInDirection",
- "Right"
- ],
- "ctrl-w k": [
- "workspace::ActivatePaneInDirection",
- "Up"
- ],
- "ctrl-w j": [
- "workspace::ActivatePaneInDirection",
- "Down"
- ],
- "ctrl-w ctrl-h": [
- "workspace::ActivatePaneInDirection",
- "Left"
- ],
- "ctrl-w ctrl-l": [
- "workspace::ActivatePaneInDirection",
- "Right"
- ],
- "ctrl-w ctrl-k": [
- "workspace::ActivatePaneInDirection",
- "Up"
- ],
- "ctrl-w ctrl-j": [
- "workspace::ActivatePaneInDirection",
- "Down"
- ],
- "ctrl-w shift-left": [
- "workspace::SwapPaneInDirection",
- "Left"
- ],
- "ctrl-w shift-right": [
- "workspace::SwapPaneInDirection",
- "Right"
- ],
- "ctrl-w shift-up": [
- "workspace::SwapPaneInDirection",
- "Up"
- ],
- "ctrl-w shift-down": [
- "workspace::SwapPaneInDirection",
- "Down"
- ],
- "ctrl-w shift-h": [
- "workspace::SwapPaneInDirection",
- "Left"
- ],
- "ctrl-w shift-l": [
- "workspace::SwapPaneInDirection",
- "Right"
- ],
- "ctrl-w shift-k": [
- "workspace::SwapPaneInDirection",
- "Up"
- ],
- "ctrl-w shift-j": [
- "workspace::SwapPaneInDirection",
- "Down"
- ],
+ "ctrl-w left": ["workspace::ActivatePaneInDirection", "Left"],
+ "ctrl-w right": ["workspace::ActivatePaneInDirection", "Right"],
+ "ctrl-w up": ["workspace::ActivatePaneInDirection", "Up"],
+ "ctrl-w down": ["workspace::ActivatePaneInDirection", "Down"],
+ "ctrl-w h": ["workspace::ActivatePaneInDirection", "Left"],
+ "ctrl-w l": ["workspace::ActivatePaneInDirection", "Right"],
+ "ctrl-w k": ["workspace::ActivatePaneInDirection", "Up"],
+ "ctrl-w j": ["workspace::ActivatePaneInDirection", "Down"],
+ "ctrl-w ctrl-h": ["workspace::ActivatePaneInDirection", "Left"],
+ "ctrl-w ctrl-l": ["workspace::ActivatePaneInDirection", "Right"],
+ "ctrl-w ctrl-k": ["workspace::ActivatePaneInDirection", "Up"],
+ "ctrl-w ctrl-j": ["workspace::ActivatePaneInDirection", "Down"],
+ "ctrl-w shift-left": ["workspace::SwapPaneInDirection", "Left"],
+ "ctrl-w shift-right": ["workspace::SwapPaneInDirection", "Right"],
+ "ctrl-w shift-up": ["workspace::SwapPaneInDirection", "Up"],
+ "ctrl-w shift-down": ["workspace::SwapPaneInDirection", "Down"],
+ "ctrl-w shift-h": ["workspace::SwapPaneInDirection", "Left"],
+ "ctrl-w shift-l": ["workspace::SwapPaneInDirection", "Right"],
+ "ctrl-w shift-k": ["workspace::SwapPaneInDirection", "Up"],
+ "ctrl-w shift-j": ["workspace::SwapPaneInDirection", "Down"],
"ctrl-w g t": "pane::ActivateNextItem",
"ctrl-w ctrl-g t": "pane::ActivateNextItem",
"ctrl-w g shift-t": "pane::ActivatePrevItem",
@@ -371,14 +279,8 @@
"ctrl-w ctrl-q": "pane::CloseAllItems",
"ctrl-w o": "workspace::CloseInactiveTabsAndPanes",
"ctrl-w ctrl-o": "workspace::CloseInactiveTabsAndPanes",
- "ctrl-w n": [
- "workspace::NewFileInDirection",
- "Up"
- ],
- "ctrl-w ctrl-n": [
- "workspace::NewFileInDirection",
- "Up"
- ]
+ "ctrl-w n": ["workspace::NewFileInDirection", "Up"],
+ "ctrl-w ctrl-n": ["workspace::NewFileInDirection", "Up"]
}
},
{
@@ -393,21 +295,12 @@
"context": "Editor && vim_mode == normal && vim_operator == none && !VimWaiting",
"bindings": {
".": "vim::Repeat",
- "c": [
- "vim::PushOperator",
- "Change"
- ],
+ "c": ["vim::PushOperator", "Change"],
"shift-c": "vim::ChangeToEndOfLine",
- "d": [
- "vim::PushOperator",
- "Delete"
- ],
+ "d": ["vim::PushOperator", "Delete"],
"shift-d": "vim::DeleteToEndOfLine",
"shift-j": "vim::JoinLines",
- "y": [
- "vim::PushOperator",
- "Yank"
- ],
+ "y": ["vim::PushOperator", "Yank"],
"shift-y": "vim::YankLine",
"i": "vim::InsertBefore",
"shift-i": "vim::InsertFirstNonWhitespace",
@@ -443,10 +336,7 @@
"backwards": true
}
],
- "r": [
- "vim::PushOperator",
- "Replace"
- ],
+ "r": ["vim::PushOperator", "Replace"],
"s": "vim::Substitute",
"shift-s": "vim::SubstituteLine",
"> >": "editor::Indent",
@@ -458,10 +348,7 @@
{
"context": "Editor && VimCount",
"bindings": {
- "0": [
- "vim::Number",
- 0
- ]
+ "0": ["vim::Number", 0]
}
},
{
@@ -497,12 +384,15 @@
"'": "vim::Quotes",
"`": "vim::BackQuotes",
"\"": "vim::DoubleQuotes",
+ "|": "vim::VerticalBars",
"(": "vim::Parentheses",
")": "vim::Parentheses",
+ "b": "vim::Parentheses",
"[": "vim::SquareBrackets",
"]": "vim::SquareBrackets",
"{": "vim::CurlyBrackets",
"}": "vim::CurlyBrackets",
+ "shift-b": "vim::CurlyBrackets",
"<": "vim::AngleBrackets",
">": "vim::AngleBrackets"
}
@@ -548,22 +438,10 @@
"shift-i": "vim::InsertBefore",
"shift-a": "vim::InsertAfter",
"shift-j": "vim::JoinLines",
- "r": [
- "vim::PushOperator",
- "Replace"
- ],
- "ctrl-c": [
- "vim::SwitchMode",
- "Normal"
- ],
- "escape": [
- "vim::SwitchMode",
- "Normal"
- ],
- "ctrl+[": [
- "vim::SwitchMode",
- "Normal"
- ],
+ "r": ["vim::PushOperator", "Replace"],
+ "ctrl-c": ["vim::SwitchMode", "Normal"],
+ "escape": ["vim::SwitchMode", "Normal"],
+ "ctrl+[": ["vim::SwitchMode", "Normal"],
">": "editor::Indent",
"<": "editor::Outdent",
"i": [
@@ -602,14 +480,8 @@
"bindings": {
"tab": "vim::Tab",
"enter": "vim::Enter",
- "escape": [
- "vim::SwitchMode",
- "Normal"
- ],
- "ctrl+[": [
- "vim::SwitchMode",
- "Normal"
- ]
+ "escape": ["vim::SwitchMode", "Normal"],
+ "ctrl+[": ["vim::SwitchMode", "Normal"]
}
},
{
diff --git a/assets/settings/default.json b/assets/settings/default.json
index 1611d80e2f..19c73ca021 100644
--- a/assets/settings/default.json
+++ b/assets/settings/default.json
@@ -50,6 +50,9 @@
// Whether to pop the completions menu while typing in an editor without
// explicitly requesting it.
"show_completions_on_input": true,
+ // Whether to display inline and alongside documentation for items in the
+ // completions menu
+ "show_completion_documentation": true,
// Whether to show wrap guides in the editor. Setting this to true will
// show a guide at the 'preferred_line_length' value if softwrap is set to
// 'preferred_line_length', and will show any additional guides as specified
@@ -139,6 +142,14 @@
// Default width of the channels panel.
"default_width": 240
},
+ "notification_panel": {
+ // Whether to show the collaboration panel button in the status bar.
+ "button": true,
+ // Where to dock channels panel. Can be 'left' or 'right'.
+ "dock": "right",
+ // Default width of the channels panel.
+ "default_width": 380
+ },
"assistant": {
// Whether to show the assistant panel button in the status bar.
"button": true,
diff --git a/crates/ai/Cargo.toml b/crates/ai/Cargo.toml
index 542d7f422f..b24c4e5ece 100644
--- a/crates/ai/Cargo.toml
+++ b/crates/ai/Cargo.toml
@@ -11,6 +11,7 @@ doctest = false
[dependencies]
gpui = { path = "../gpui" }
util = { path = "../util" }
+language = { path = "../language" }
async-trait.workspace = true
anyhow.workspace = true
futures.workspace = true
diff --git a/crates/ai/src/ai.rs b/crates/ai/src/ai.rs
index 5256a6a643..f168c15793 100644
--- a/crates/ai/src/ai.rs
+++ b/crates/ai/src/ai.rs
@@ -1,2 +1,4 @@
pub mod completion;
pub mod embedding;
+pub mod models;
+pub mod templates;
diff --git a/crates/ai/src/completion.rs b/crates/ai/src/completion.rs
index 170b2268f9..de6ce9da71 100644
--- a/crates/ai/src/completion.rs
+++ b/crates/ai/src/completion.rs
@@ -53,6 +53,8 @@ pub struct OpenAIRequest {
pub model: String,
pub messages: Vec,
pub stream: bool,
+ pub stop: Vec,
+ pub temperature: f32,
}
#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
diff --git a/crates/ai/src/embedding.rs b/crates/ai/src/embedding.rs
index 332470aa54..b791414ba2 100644
--- a/crates/ai/src/embedding.rs
+++ b/crates/ai/src/embedding.rs
@@ -2,7 +2,7 @@ use anyhow::{anyhow, Result};
use async_trait::async_trait;
use futures::AsyncReadExt;
use gpui::executor::Background;
-use gpui::serde_json;
+use gpui::{serde_json, AppContext};
use isahc::http::StatusCode;
use isahc::prelude::Configurable;
use isahc::{AsyncBody, Response};
@@ -20,9 +20,11 @@ use std::sync::Arc;
use std::time::{Duration, Instant};
use tiktoken_rs::{cl100k_base, CoreBPE};
use util::http::{HttpClient, Request};
+use util::ResultExt;
+
+use crate::completion::OPENAI_API_URL;
lazy_static! {
- static ref OPENAI_API_KEY: Option = env::var("OPENAI_API_KEY").ok();
static ref OPENAI_BPE_TOKENIZER: CoreBPE = cl100k_base().unwrap();
}
@@ -85,25 +87,6 @@ impl Embedding {
}
}
-// impl FromSql for Embedding {
-// fn column_result(value: ValueRef) -> FromSqlResult {
-// let bytes = value.as_blob()?;
-// let embedding: Result, Box> = bincode::deserialize(bytes);
-// if embedding.is_err() {
-// return Err(rusqlite::types::FromSqlError::Other(embedding.unwrap_err()));
-// }
-// Ok(Embedding(embedding.unwrap()))
-// }
-// }
-
-// impl ToSql for Embedding {
-// fn to_sql(&self) -> rusqlite::Result {
-// let bytes = bincode::serialize(&self.0)
-// .map_err(|err| rusqlite::Error::ToSqlConversionFailure(Box::new(err)))?;
-// Ok(ToSqlOutput::Owned(rusqlite::types::Value::Blob(bytes)))
-// }
-// }
-
#[derive(Clone)]
pub struct OpenAIEmbeddings {
pub client: Arc,
@@ -139,8 +122,12 @@ struct OpenAIEmbeddingUsage {
#[async_trait]
pub trait EmbeddingProvider: Sync + Send {
- fn is_authenticated(&self) -> bool;
- async fn embed_batch(&self, spans: Vec) -> Result>;
+ fn retrieve_credentials(&self, cx: &AppContext) -> Option;
+ async fn embed_batch(
+ &self,
+ spans: Vec,
+ api_key: Option,
+ ) -> Result>;
fn max_tokens_per_batch(&self) -> usize;
fn truncate(&self, span: &str) -> (String, usize);
fn rate_limit_expiration(&self) -> Option;
@@ -150,13 +137,17 @@ pub struct DummyEmbeddings {}
#[async_trait]
impl EmbeddingProvider for DummyEmbeddings {
- fn is_authenticated(&self) -> bool {
- true
+ fn retrieve_credentials(&self, _cx: &AppContext) -> Option {
+ Some("Dummy API KEY".to_string())
}
fn rate_limit_expiration(&self) -> Option {
None
}
- async fn embed_batch(&self, spans: Vec) -> Result> {
+ async fn embed_batch(
+ &self,
+ spans: Vec,
+ _api_key: Option,
+ ) -> Result> {
// 1024 is the OpenAI Embeddings size for ada models.
// the model we will likely be starting with.
let dummy_vec = Embedding::from(vec![0.32 as f32; 1536]);
@@ -255,9 +246,21 @@ impl OpenAIEmbeddings {
#[async_trait]
impl EmbeddingProvider for OpenAIEmbeddings {
- fn is_authenticated(&self) -> bool {
- OPENAI_API_KEY.as_ref().is_some()
+ fn retrieve_credentials(&self, cx: &AppContext) -> Option {
+ if let Ok(api_key) = env::var("OPENAI_API_KEY") {
+ Some(api_key)
+ } else if let Some((_, api_key)) = cx
+ .platform()
+ .read_credentials(OPENAI_API_URL)
+ .log_err()
+ .flatten()
+ {
+ String::from_utf8(api_key).log_err()
+ } else {
+ None
+ }
}
+
fn max_tokens_per_batch(&self) -> usize {
50000
}
@@ -280,13 +283,17 @@ impl EmbeddingProvider for OpenAIEmbeddings {
(output, tokens.len())
}
- async fn embed_batch(&self, spans: Vec) -> Result> {
+ async fn embed_batch(
+ &self,
+ spans: Vec,
+ api_key: Option,
+ ) -> Result> {
const BACKOFF_SECONDS: [usize; 4] = [3, 5, 15, 45];
const MAX_RETRIES: usize = 4;
- let api_key = OPENAI_API_KEY
- .as_ref()
- .ok_or_else(|| anyhow!("no api key"))?;
+ let Some(api_key) = api_key else {
+ return Err(anyhow!("no open ai key provided"));
+ };
let mut request_number = 0;
let mut rate_limiting = false;
@@ -295,11 +302,12 @@ impl EmbeddingProvider for OpenAIEmbeddings {
while request_number < MAX_RETRIES {
response = self
.send_request(
- api_key,
+ &api_key,
spans.iter().map(|x| &**x).collect(),
request_timeout,
)
.await?;
+
request_number += 1;
match response.status() {
diff --git a/crates/ai/src/models.rs b/crates/ai/src/models.rs
new file mode 100644
index 0000000000..d0206cc41c
--- /dev/null
+++ b/crates/ai/src/models.rs
@@ -0,0 +1,66 @@
+use anyhow::anyhow;
+use tiktoken_rs::CoreBPE;
+use util::ResultExt;
+
+pub trait LanguageModel {
+ fn name(&self) -> String;
+ fn count_tokens(&self, content: &str) -> anyhow::Result;
+ fn truncate(&self, content: &str, length: usize) -> anyhow::Result;
+ fn truncate_start(&self, content: &str, length: usize) -> anyhow::Result;
+ fn capacity(&self) -> anyhow::Result;
+}
+
+pub struct OpenAILanguageModel {
+ name: String,
+ bpe: Option,
+}
+
+impl OpenAILanguageModel {
+ pub fn load(model_name: &str) -> Self {
+ let bpe = tiktoken_rs::get_bpe_from_model(model_name).log_err();
+ OpenAILanguageModel {
+ name: model_name.to_string(),
+ bpe,
+ }
+ }
+}
+
+impl LanguageModel for OpenAILanguageModel {
+ fn name(&self) -> String {
+ self.name.clone()
+ }
+ fn count_tokens(&self, content: &str) -> anyhow::Result {
+ if let Some(bpe) = &self.bpe {
+ anyhow::Ok(bpe.encode_with_special_tokens(content).len())
+ } else {
+ Err(anyhow!("bpe for open ai model was not retrieved"))
+ }
+ }
+ fn truncate(&self, content: &str, length: usize) -> anyhow::Result {
+ if let Some(bpe) = &self.bpe {
+ let tokens = bpe.encode_with_special_tokens(content);
+ if tokens.len() > length {
+ bpe.decode(tokens[..length].to_vec())
+ } else {
+ bpe.decode(tokens)
+ }
+ } else {
+ Err(anyhow!("bpe for open ai model was not retrieved"))
+ }
+ }
+ fn truncate_start(&self, content: &str, length: usize) -> anyhow::Result {
+ if let Some(bpe) = &self.bpe {
+ let tokens = bpe.encode_with_special_tokens(content);
+ if tokens.len() > length {
+ bpe.decode(tokens[length..].to_vec())
+ } else {
+ bpe.decode(tokens)
+ }
+ } else {
+ Err(anyhow!("bpe for open ai model was not retrieved"))
+ }
+ }
+ fn capacity(&self) -> anyhow::Result {
+ anyhow::Ok(tiktoken_rs::model::get_context_size(&self.name))
+ }
+}
diff --git a/crates/ai/src/templates/base.rs b/crates/ai/src/templates/base.rs
new file mode 100644
index 0000000000..bda1d6c30e
--- /dev/null
+++ b/crates/ai/src/templates/base.rs
@@ -0,0 +1,350 @@
+use std::cmp::Reverse;
+use std::ops::Range;
+use std::sync::Arc;
+
+use language::BufferSnapshot;
+use util::ResultExt;
+
+use crate::models::LanguageModel;
+use crate::templates::repository_context::PromptCodeSnippet;
+
+pub(crate) enum PromptFileType {
+ Text,
+ Code,
+}
+
+// TODO: Set this up to manage for defaults well
+pub struct PromptArguments {
+ pub model: Arc,
+ pub user_prompt: Option,
+ pub language_name: Option,
+ pub project_name: Option,
+ pub snippets: Vec,
+ pub reserved_tokens: usize,
+ pub buffer: Option,
+ pub selected_range: Option>,
+}
+
+impl PromptArguments {
+ pub(crate) fn get_file_type(&self) -> PromptFileType {
+ if self
+ .language_name
+ .as_ref()
+ .and_then(|name| Some(!["Markdown", "Plain Text"].contains(&name.as_str())))
+ .unwrap_or(true)
+ {
+ PromptFileType::Code
+ } else {
+ PromptFileType::Text
+ }
+ }
+}
+
+pub trait PromptTemplate {
+ fn generate(
+ &self,
+ args: &PromptArguments,
+ max_token_length: Option,
+ ) -> anyhow::Result<(String, usize)>;
+}
+
+#[repr(i8)]
+#[derive(PartialEq, Eq, Ord)]
+pub enum PromptPriority {
+ Mandatory, // Ignores truncation
+ Ordered { order: usize }, // Truncates based on priority
+}
+
+impl PartialOrd for PromptPriority {
+ fn partial_cmp(&self, other: &Self) -> Option {
+ match (self, other) {
+ (Self::Mandatory, Self::Mandatory) => Some(std::cmp::Ordering::Equal),
+ (Self::Mandatory, Self::Ordered { .. }) => Some(std::cmp::Ordering::Greater),
+ (Self::Ordered { .. }, Self::Mandatory) => Some(std::cmp::Ordering::Less),
+ (Self::Ordered { order: a }, Self::Ordered { order: b }) => b.partial_cmp(a),
+ }
+ }
+}
+
+pub struct PromptChain {
+ args: PromptArguments,
+ templates: Vec<(PromptPriority, Box)>,
+}
+
+impl PromptChain {
+ pub fn new(
+ args: PromptArguments,
+ templates: Vec<(PromptPriority, Box)>,
+ ) -> Self {
+ PromptChain { args, templates }
+ }
+
+ pub fn generate(&self, truncate: bool) -> anyhow::Result<(String, usize)> {
+ // Argsort based on Prompt Priority
+ let seperator = "\n";
+ let seperator_tokens = self.args.model.count_tokens(seperator)?;
+ let mut sorted_indices = (0..self.templates.len()).collect::>();
+ sorted_indices.sort_by_key(|&i| Reverse(&self.templates[i].0));
+
+ // If Truncate
+ let mut tokens_outstanding = if truncate {
+ Some(self.args.model.capacity()? - self.args.reserved_tokens)
+ } else {
+ None
+ };
+
+ let mut prompts = vec!["".to_string(); sorted_indices.len()];
+ for idx in sorted_indices {
+ let (_, template) = &self.templates[idx];
+
+ if let Some((template_prompt, prompt_token_count)) =
+ template.generate(&self.args, tokens_outstanding).log_err()
+ {
+ if template_prompt != "" {
+ prompts[idx] = template_prompt;
+
+ if let Some(remaining_tokens) = tokens_outstanding {
+ let new_tokens = prompt_token_count + seperator_tokens;
+ tokens_outstanding = if remaining_tokens > new_tokens {
+ Some(remaining_tokens - new_tokens)
+ } else {
+ Some(0)
+ };
+ }
+ }
+ }
+ }
+
+ prompts.retain(|x| x != "");
+
+ let full_prompt = prompts.join(seperator);
+ let total_token_count = self.args.model.count_tokens(&full_prompt)?;
+ anyhow::Ok((prompts.join(seperator), total_token_count))
+ }
+}
+
+#[cfg(test)]
+pub(crate) mod tests {
+ use super::*;
+
+ #[test]
+ pub fn test_prompt_chain() {
+ struct TestPromptTemplate {}
+ impl PromptTemplate for TestPromptTemplate {
+ fn generate(
+ &self,
+ args: &PromptArguments,
+ max_token_length: Option,
+ ) -> anyhow::Result<(String, usize)> {
+ let mut content = "This is a test prompt template".to_string();
+
+ let mut token_count = args.model.count_tokens(&content)?;
+ if let Some(max_token_length) = max_token_length {
+ if token_count > max_token_length {
+ content = args.model.truncate(&content, max_token_length)?;
+ token_count = max_token_length;
+ }
+ }
+
+ anyhow::Ok((content, token_count))
+ }
+ }
+
+ struct TestLowPriorityTemplate {}
+ impl PromptTemplate for TestLowPriorityTemplate {
+ fn generate(
+ &self,
+ args: &PromptArguments,
+ max_token_length: Option,
+ ) -> anyhow::Result<(String, usize)> {
+ let mut content = "This is a low priority test prompt template".to_string();
+
+ let mut token_count = args.model.count_tokens(&content)?;
+ if let Some(max_token_length) = max_token_length {
+ if token_count > max_token_length {
+ content = args.model.truncate(&content, max_token_length)?;
+ token_count = max_token_length;
+ }
+ }
+
+ anyhow::Ok((content, token_count))
+ }
+ }
+
+ #[derive(Clone)]
+ struct DummyLanguageModel {
+ capacity: usize,
+ }
+
+ impl LanguageModel for DummyLanguageModel {
+ fn name(&self) -> String {
+ "dummy".to_string()
+ }
+ fn count_tokens(&self, content: &str) -> anyhow::Result {
+ anyhow::Ok(content.chars().collect::>().len())
+ }
+ fn truncate(&self, content: &str, length: usize) -> anyhow::Result {
+ anyhow::Ok(
+ content.chars().collect::>()[..length]
+ .into_iter()
+ .collect::(),
+ )
+ }
+ fn truncate_start(&self, content: &str, length: usize) -> anyhow::Result {
+ anyhow::Ok(
+ content.chars().collect::>()[length..]
+ .into_iter()
+ .collect::(),
+ )
+ }
+ fn capacity(&self) -> anyhow::Result {
+ anyhow::Ok(self.capacity)
+ }
+ }
+
+ let model: Arc = Arc::new(DummyLanguageModel { capacity: 100 });
+ let args = PromptArguments {
+ model: model.clone(),
+ language_name: None,
+ project_name: None,
+ snippets: Vec::new(),
+ reserved_tokens: 0,
+ buffer: None,
+ selected_range: None,
+ user_prompt: None,
+ };
+
+ let templates: Vec<(PromptPriority, Box)> = vec![
+ (
+ PromptPriority::Ordered { order: 0 },
+ Box::new(TestPromptTemplate {}),
+ ),
+ (
+ PromptPriority::Ordered { order: 1 },
+ Box::new(TestLowPriorityTemplate {}),
+ ),
+ ];
+ let chain = PromptChain::new(args, templates);
+
+ let (prompt, token_count) = chain.generate(false).unwrap();
+
+ assert_eq!(
+ prompt,
+ "This is a test prompt template\nThis is a low priority test prompt template"
+ .to_string()
+ );
+
+ assert_eq!(model.count_tokens(&prompt).unwrap(), token_count);
+
+ // Testing with Truncation Off
+ // Should ignore capacity and return all prompts
+ let model: Arc = Arc::new(DummyLanguageModel { capacity: 20 });
+ let args = PromptArguments {
+ model: model.clone(),
+ language_name: None,
+ project_name: None,
+ snippets: Vec::new(),
+ reserved_tokens: 0,
+ buffer: None,
+ selected_range: None,
+ user_prompt: None,
+ };
+
+ let templates: Vec<(PromptPriority, Box)> = vec![
+ (
+ PromptPriority::Ordered { order: 0 },
+ Box::new(TestPromptTemplate {}),
+ ),
+ (
+ PromptPriority::Ordered { order: 1 },
+ Box::new(TestLowPriorityTemplate {}),
+ ),
+ ];
+ let chain = PromptChain::new(args, templates);
+
+ let (prompt, token_count) = chain.generate(false).unwrap();
+
+ assert_eq!(
+ prompt,
+ "This is a test prompt template\nThis is a low priority test prompt template"
+ .to_string()
+ );
+
+ assert_eq!(model.count_tokens(&prompt).unwrap(), token_count);
+
+ // Testing with Truncation Off
+ // Should ignore capacity and return all prompts
+ let capacity = 20;
+ let model: Arc = Arc::new(DummyLanguageModel { capacity });
+ let args = PromptArguments {
+ model: model.clone(),
+ language_name: None,
+ project_name: None,
+ snippets: Vec::new(),
+ reserved_tokens: 0,
+ buffer: None,
+ selected_range: None,
+ user_prompt: None,
+ };
+
+ let templates: Vec<(PromptPriority, Box)> = vec![
+ (
+ PromptPriority::Ordered { order: 0 },
+ Box::new(TestPromptTemplate {}),
+ ),
+ (
+ PromptPriority::Ordered { order: 1 },
+ Box::new(TestLowPriorityTemplate {}),
+ ),
+ (
+ PromptPriority::Ordered { order: 2 },
+ Box::new(TestLowPriorityTemplate {}),
+ ),
+ ];
+ let chain = PromptChain::new(args, templates);
+
+ let (prompt, token_count) = chain.generate(true).unwrap();
+
+ assert_eq!(prompt, "This is a test promp".to_string());
+ assert_eq!(token_count, capacity);
+
+ // Change Ordering of Prompts Based on Priority
+ let capacity = 120;
+ let reserved_tokens = 10;
+ let model: Arc = Arc::new(DummyLanguageModel { capacity });
+ let args = PromptArguments {
+ model: model.clone(),
+ language_name: None,
+ project_name: None,
+ snippets: Vec::new(),
+ reserved_tokens,
+ buffer: None,
+ selected_range: None,
+ user_prompt: None,
+ };
+ let templates: Vec<(PromptPriority, Box)> = vec![
+ (
+ PromptPriority::Mandatory,
+ Box::new(TestLowPriorityTemplate {}),
+ ),
+ (
+ PromptPriority::Ordered { order: 0 },
+ Box::new(TestPromptTemplate {}),
+ ),
+ (
+ PromptPriority::Ordered { order: 1 },
+ Box::new(TestLowPriorityTemplate {}),
+ ),
+ ];
+ let chain = PromptChain::new(args, templates);
+
+ let (prompt, token_count) = chain.generate(true).unwrap();
+
+ assert_eq!(
+ prompt,
+ "This is a low priority test prompt template\nThis is a test prompt template\nThis is a low priority test prompt "
+ .to_string()
+ );
+ assert_eq!(token_count, capacity - reserved_tokens);
+ }
+}
diff --git a/crates/ai/src/templates/file_context.rs b/crates/ai/src/templates/file_context.rs
new file mode 100644
index 0000000000..1afd61192e
--- /dev/null
+++ b/crates/ai/src/templates/file_context.rs
@@ -0,0 +1,160 @@
+use anyhow::anyhow;
+use language::BufferSnapshot;
+use language::ToOffset;
+
+use crate::models::LanguageModel;
+use crate::templates::base::PromptArguments;
+use crate::templates::base::PromptTemplate;
+use std::fmt::Write;
+use std::ops::Range;
+use std::sync::Arc;
+
+fn retrieve_context(
+ buffer: &BufferSnapshot,
+ selected_range: &Option>,
+ model: Arc,
+ max_token_count: Option,
+) -> anyhow::Result<(String, usize, bool)> {
+ let mut prompt = String::new();
+ let mut truncated = false;
+ if let Some(selected_range) = selected_range {
+ let start = selected_range.start.to_offset(buffer);
+ let end = selected_range.end.to_offset(buffer);
+
+ let start_window = buffer.text_for_range(0..start).collect::();
+
+ let mut selected_window = String::new();
+ if start == end {
+ write!(selected_window, "<|START|>").unwrap();
+ } else {
+ write!(selected_window, "<|START|").unwrap();
+ }
+
+ write!(
+ selected_window,
+ "{}",
+ buffer.text_for_range(start..end).collect::()
+ )
+ .unwrap();
+
+ if start != end {
+ write!(selected_window, "|END|>").unwrap();
+ }
+
+ let end_window = buffer.text_for_range(end..buffer.len()).collect::();
+
+ if let Some(max_token_count) = max_token_count {
+ let selected_tokens = model.count_tokens(&selected_window)?;
+ if selected_tokens > max_token_count {
+ return Err(anyhow!(
+ "selected range is greater than model context window, truncation not possible"
+ ));
+ };
+
+ let mut remaining_tokens = max_token_count - selected_tokens;
+ let start_window_tokens = model.count_tokens(&start_window)?;
+ let end_window_tokens = model.count_tokens(&end_window)?;
+ let outside_tokens = start_window_tokens + end_window_tokens;
+ if outside_tokens > remaining_tokens {
+ let (start_goal_tokens, end_goal_tokens) =
+ if start_window_tokens < end_window_tokens {
+ let start_goal_tokens = (remaining_tokens / 2).min(start_window_tokens);
+ remaining_tokens -= start_goal_tokens;
+ let end_goal_tokens = remaining_tokens.min(end_window_tokens);
+ (start_goal_tokens, end_goal_tokens)
+ } else {
+ let end_goal_tokens = (remaining_tokens / 2).min(end_window_tokens);
+ remaining_tokens -= end_goal_tokens;
+ let start_goal_tokens = remaining_tokens.min(start_window_tokens);
+ (start_goal_tokens, end_goal_tokens)
+ };
+
+ let truncated_start_window =
+ model.truncate_start(&start_window, start_goal_tokens)?;
+ let truncated_end_window = model.truncate(&end_window, end_goal_tokens)?;
+ writeln!(
+ prompt,
+ "{truncated_start_window}{selected_window}{truncated_end_window}"
+ )
+ .unwrap();
+ truncated = true;
+ } else {
+ writeln!(prompt, "{start_window}{selected_window}{end_window}").unwrap();
+ }
+ } else {
+ // If we dont have a selected range, include entire file.
+ writeln!(prompt, "{}", &buffer.text()).unwrap();
+
+ // Dumb truncation strategy
+ if let Some(max_token_count) = max_token_count {
+ if model.count_tokens(&prompt)? > max_token_count {
+ truncated = true;
+ prompt = model.truncate(&prompt, max_token_count)?;
+ }
+ }
+ }
+ }
+
+ let token_count = model.count_tokens(&prompt)?;
+ anyhow::Ok((prompt, token_count, truncated))
+}
+
+pub struct FileContext {}
+
+impl PromptTemplate for FileContext {
+ fn generate(
+ &self,
+ args: &PromptArguments,
+ max_token_length: Option,
+ ) -> anyhow::Result<(String, usize)> {
+ if let Some(buffer) = &args.buffer {
+ let mut prompt = String::new();
+ // Add Initial Preamble
+ // TODO: Do we want to add the path in here?
+ writeln!(
+ prompt,
+ "The file you are currently working on has the following content:"
+ )
+ .unwrap();
+
+ let language_name = args
+ .language_name
+ .clone()
+ .unwrap_or("".to_string())
+ .to_lowercase();
+
+ let (context, _, truncated) = retrieve_context(
+ buffer,
+ &args.selected_range,
+ args.model.clone(),
+ max_token_length,
+ )?;
+ writeln!(prompt, "```{language_name}\n{context}\n```").unwrap();
+
+ if truncated {
+ writeln!(prompt, "Note the content has been truncated and only represents a portion of the file.").unwrap();
+ }
+
+ if let Some(selected_range) = &args.selected_range {
+ let start = selected_range.start.to_offset(buffer);
+ let end = selected_range.end.to_offset(buffer);
+
+ if start == end {
+ writeln!(prompt, "In particular, the user's cursor is currently on the '<|START|>' span in the above content, with no text selected.").unwrap();
+ } else {
+ writeln!(prompt, "In particular, the user has selected a section of the text between the '<|START|' and '|END|>' spans.").unwrap();
+ }
+ }
+
+ // Really dumb truncation strategy
+ if let Some(max_tokens) = max_token_length {
+ prompt = args.model.truncate(&prompt, max_tokens)?;
+ }
+
+ let token_count = args.model.count_tokens(&prompt)?;
+ anyhow::Ok((prompt, token_count))
+ } else {
+ Err(anyhow!("no buffer provided to retrieve file context from"))
+ }
+ }
+}
diff --git a/crates/ai/src/templates/generate.rs b/crates/ai/src/templates/generate.rs
new file mode 100644
index 0000000000..1eeb197f93
--- /dev/null
+++ b/crates/ai/src/templates/generate.rs
@@ -0,0 +1,95 @@
+use crate::templates::base::{PromptArguments, PromptFileType, PromptTemplate};
+use anyhow::anyhow;
+use std::fmt::Write;
+
+pub fn capitalize(s: &str) -> String {
+ let mut c = s.chars();
+ match c.next() {
+ None => String::new(),
+ Some(f) => f.to_uppercase().collect::() + c.as_str(),
+ }
+}
+
+pub struct GenerateInlineContent {}
+
+impl PromptTemplate for GenerateInlineContent {
+ fn generate(
+ &self,
+ args: &PromptArguments,
+ max_token_length: Option,
+ ) -> anyhow::Result<(String, usize)> {
+ let Some(user_prompt) = &args.user_prompt else {
+ return Err(anyhow!("user prompt not provided"));
+ };
+
+ let file_type = args.get_file_type();
+ let content_type = match &file_type {
+ PromptFileType::Code => "code",
+ PromptFileType::Text => "text",
+ };
+
+ let mut prompt = String::new();
+
+ if let Some(selected_range) = &args.selected_range {
+ if selected_range.start == selected_range.end {
+ writeln!(
+ prompt,
+ "Assume the cursor is located where the `<|START|>` span is."
+ )
+ .unwrap();
+ writeln!(
+ prompt,
+ "{} can't be replaced, so assume your answer will be inserted at the cursor.",
+ capitalize(content_type)
+ )
+ .unwrap();
+ writeln!(
+ prompt,
+ "Generate {content_type} based on the users prompt: {user_prompt}",
+ )
+ .unwrap();
+ } else {
+ writeln!(prompt, "Modify the user's selected {content_type} based upon the users prompt: '{user_prompt}'").unwrap();
+ writeln!(prompt, "You must reply with only the adjusted {content_type} (within the '<|START|' and '|END|>' spans) not the entire file.").unwrap();
+ writeln!(prompt, "Double check that you only return code and not the '<|START|' and '|END|'> spans").unwrap();
+ }
+ } else {
+ writeln!(
+ prompt,
+ "Generate {content_type} based on the users prompt: {user_prompt}"
+ )
+ .unwrap();
+ }
+
+ if let Some(language_name) = &args.language_name {
+ writeln!(
+ prompt,
+ "Your answer MUST always and only be valid {}.",
+ language_name
+ )
+ .unwrap();
+ }
+ writeln!(prompt, "Never make remarks about the output.").unwrap();
+ writeln!(
+ prompt,
+ "Do not return anything else, except the generated {content_type}."
+ )
+ .unwrap();
+
+ match file_type {
+ PromptFileType::Code => {
+ // writeln!(prompt, "Always wrap your code in a Markdown block.").unwrap();
+ }
+ _ => {}
+ }
+
+ // Really dumb truncation strategy
+ if let Some(max_tokens) = max_token_length {
+ prompt = args.model.truncate(&prompt, max_tokens)?;
+ }
+
+ let token_count = args.model.count_tokens(&prompt)?;
+
+ anyhow::Ok((prompt, token_count))
+ }
+}
diff --git a/crates/ai/src/templates/mod.rs b/crates/ai/src/templates/mod.rs
new file mode 100644
index 0000000000..0025269a44
--- /dev/null
+++ b/crates/ai/src/templates/mod.rs
@@ -0,0 +1,5 @@
+pub mod base;
+pub mod file_context;
+pub mod generate;
+pub mod preamble;
+pub mod repository_context;
diff --git a/crates/ai/src/templates/preamble.rs b/crates/ai/src/templates/preamble.rs
new file mode 100644
index 0000000000..9eabaaeb97
--- /dev/null
+++ b/crates/ai/src/templates/preamble.rs
@@ -0,0 +1,52 @@
+use crate::templates::base::{PromptArguments, PromptFileType, PromptTemplate};
+use std::fmt::Write;
+
+pub struct EngineerPreamble {}
+
+impl PromptTemplate for EngineerPreamble {
+ fn generate(
+ &self,
+ args: &PromptArguments,
+ max_token_length: Option,
+ ) -> anyhow::Result<(String, usize)> {
+ let mut prompts = Vec::new();
+
+ match args.get_file_type() {
+ PromptFileType::Code => {
+ prompts.push(format!(
+ "You are an expert {}engineer.",
+ args.language_name.clone().unwrap_or("".to_string()) + " "
+ ));
+ }
+ PromptFileType::Text => {
+ prompts.push("You are an expert engineer.".to_string());
+ }
+ }
+
+ if let Some(project_name) = args.project_name.clone() {
+ prompts.push(format!(
+ "You are currently working inside the '{project_name}' project in code editor Zed."
+ ));
+ }
+
+ if let Some(mut remaining_tokens) = max_token_length {
+ let mut prompt = String::new();
+ let mut total_count = 0;
+ for prompt_piece in prompts {
+ let prompt_token_count =
+ args.model.count_tokens(&prompt_piece)? + args.model.count_tokens("\n")?;
+ if remaining_tokens > prompt_token_count {
+ writeln!(prompt, "{prompt_piece}").unwrap();
+ remaining_tokens -= prompt_token_count;
+ total_count += prompt_token_count;
+ }
+ }
+
+ anyhow::Ok((prompt, total_count))
+ } else {
+ let prompt = prompts.join("\n");
+ let token_count = args.model.count_tokens(&prompt)?;
+ anyhow::Ok((prompt, token_count))
+ }
+ }
+}
diff --git a/crates/ai/src/templates/repository_context.rs b/crates/ai/src/templates/repository_context.rs
new file mode 100644
index 0000000000..a8e7f4b5af
--- /dev/null
+++ b/crates/ai/src/templates/repository_context.rs
@@ -0,0 +1,94 @@
+use crate::templates::base::{PromptArguments, PromptTemplate};
+use std::fmt::Write;
+use std::{ops::Range, path::PathBuf};
+
+use gpui::{AsyncAppContext, ModelHandle};
+use language::{Anchor, Buffer};
+
+#[derive(Clone)]
+pub struct PromptCodeSnippet {
+ path: Option,
+ language_name: Option,
+ content: String,
+}
+
+impl PromptCodeSnippet {
+ pub fn new(buffer: ModelHandle, range: Range, cx: &AsyncAppContext) -> Self {
+ let (content, language_name, file_path) = buffer.read_with(cx, |buffer, _| {
+ let snapshot = buffer.snapshot();
+ let content = snapshot.text_for_range(range.clone()).collect::();
+
+ let language_name = buffer
+ .language()
+ .and_then(|language| Some(language.name().to_string().to_lowercase()));
+
+ let file_path = buffer
+ .file()
+ .and_then(|file| Some(file.path().to_path_buf()));
+
+ (content, language_name, file_path)
+ });
+
+ PromptCodeSnippet {
+ path: file_path,
+ language_name,
+ content,
+ }
+ }
+}
+
+impl ToString for PromptCodeSnippet {
+ fn to_string(&self) -> String {
+ let path = self
+ .path
+ .as_ref()
+ .and_then(|path| Some(path.to_string_lossy().to_string()))
+ .unwrap_or("".to_string());
+ let language_name = self.language_name.clone().unwrap_or("".to_string());
+ let content = self.content.clone();
+
+ format!("The below code snippet may be relevant from file: {path}\n```{language_name}\n{content}\n```")
+ }
+}
+
+pub struct RepositoryContext {}
+
+impl PromptTemplate for RepositoryContext {
+ fn generate(
+ &self,
+ args: &PromptArguments,
+ max_token_length: Option,
+ ) -> anyhow::Result<(String, usize)> {
+ const MAXIMUM_SNIPPET_TOKEN_COUNT: usize = 500;
+ let template = "You are working inside a large repository, here are a few code snippets that may be useful.";
+ let mut prompt = String::new();
+
+ let mut remaining_tokens = max_token_length.clone();
+ let seperator_token_length = args.model.count_tokens("\n")?;
+ for snippet in &args.snippets {
+ let mut snippet_prompt = template.to_string();
+ let content = snippet.to_string();
+ writeln!(snippet_prompt, "{content}").unwrap();
+
+ let token_count = args.model.count_tokens(&snippet_prompt)?;
+ if token_count <= MAXIMUM_SNIPPET_TOKEN_COUNT {
+ if let Some(tokens_left) = remaining_tokens {
+ if tokens_left >= token_count {
+ writeln!(prompt, "{snippet_prompt}").unwrap();
+ remaining_tokens = if tokens_left >= (token_count + seperator_token_length)
+ {
+ Some(tokens_left - token_count - seperator_token_length)
+ } else {
+ Some(0)
+ };
+ }
+ } else {
+ writeln!(prompt, "{snippet_prompt}").unwrap();
+ }
+ }
+ }
+
+ let total_token_count = args.model.count_tokens(&prompt)?;
+ anyhow::Ok((prompt, total_token_count))
+ }
+}
diff --git a/crates/assistant/Cargo.toml b/crates/assistant/Cargo.toml
index f1daf47bab..256f4d8416 100644
--- a/crates/assistant/Cargo.toml
+++ b/crates/assistant/Cargo.toml
@@ -17,13 +17,17 @@ fs = { path = "../fs" }
gpui = { path = "../gpui" }
language = { path = "../language" }
menu = { path = "../menu" }
+multi_buffer = { path = "../multi_buffer" }
search = { path = "../search" }
settings = { path = "../settings" }
theme = { path = "../theme" }
util = { path = "../util" }
workspace = { path = "../workspace" }
-uuid.workspace = true
+semantic_index = { path = "../semantic_index" }
+project = { path = "../project" }
+uuid.workspace = true
+log.workspace = true
anyhow.workspace = true
chrono = { version = "0.4", features = ["serde"] }
futures.workspace = true
@@ -36,7 +40,7 @@ schemars.workspace = true
serde.workspace = true
serde_json.workspace = true
smol.workspace = true
-tiktoken-rs = "0.4"
+tiktoken-rs = "0.5"
[dev-dependencies]
editor = { path = "../editor", features = ["test-support"] }
diff --git a/crates/assistant/src/assistant_panel.rs b/crates/assistant/src/assistant_panel.rs
index b1c6038602..0dee8be510 100644
--- a/crates/assistant/src/assistant_panel.rs
+++ b/crates/assistant/src/assistant_panel.rs
@@ -5,8 +5,11 @@ use crate::{
MessageId, MessageMetadata, MessageStatus, Role, SavedConversation, SavedConversationMetadata,
SavedMessage,
};
-use ai::completion::{
- stream_completion, OpenAICompletionProvider, OpenAIRequest, RequestMessage, OPENAI_API_URL,
+use ai::{
+ completion::{
+ stream_completion, OpenAICompletionProvider, OpenAIRequest, RequestMessage, OPENAI_API_URL,
+ },
+ templates::repository_context::PromptCodeSnippet,
};
use anyhow::{anyhow, Result};
use chrono::{DateTime, Local};
@@ -29,13 +32,15 @@ use gpui::{
},
fonts::HighlightStyle,
geometry::vector::{vec2f, Vector2F},
- platform::{CursorStyle, MouseButton},
+ platform::{CursorStyle, MouseButton, PromptLevel},
Action, AnyElement, AppContext, AsyncAppContext, ClipboardItem, Element, Entity, ModelContext,
- ModelHandle, SizeConstraint, Subscription, Task, View, ViewContext, ViewHandle, WeakViewHandle,
- WindowContext,
+ ModelHandle, SizeConstraint, Subscription, Task, View, ViewContext, ViewHandle,
+ WeakModelHandle, WeakViewHandle, WindowContext,
};
use language::{language_settings::SoftWrap, Buffer, LanguageRegistry, ToOffset as _};
+use project::Project;
use search::BufferSearchBar;
+use semantic_index::{SemanticIndex, SemanticIndexStatus};
use settings::SettingsStore;
use std::{
cell::{Cell, RefCell},
@@ -46,7 +51,7 @@ use std::{
path::{Path, PathBuf},
rc::Rc,
sync::Arc,
- time::Duration,
+ time::{Duration, Instant},
};
use theme::{
components::{action_button::Button, ComponentExt},
@@ -72,6 +77,7 @@ actions!(
ResetKey,
InlineAssist,
ToggleIncludeConversation,
+ ToggleRetrieveContext,
]
);
@@ -108,6 +114,7 @@ pub fn init(cx: &mut AppContext) {
cx.add_action(InlineAssistant::confirm);
cx.add_action(InlineAssistant::cancel);
cx.add_action(InlineAssistant::toggle_include_conversation);
+ cx.add_action(InlineAssistant::toggle_retrieve_context);
cx.add_action(InlineAssistant::move_up);
cx.add_action(InlineAssistant::move_down);
}
@@ -145,6 +152,8 @@ pub struct AssistantPanel {
include_conversation_in_next_inline_assist: bool,
inline_prompt_history: VecDeque,
_watch_saved_conversations: Task>,
+ semantic_index: Option>,
+ retrieve_context_in_next_inline_assist: bool,
}
impl AssistantPanel {
@@ -191,6 +200,9 @@ impl AssistantPanel {
toolbar.add_item(cx.add_view(|cx| BufferSearchBar::new(cx)), cx);
toolbar
});
+
+ let semantic_index = SemanticIndex::global(cx);
+
let mut this = Self {
workspace: workspace_handle,
active_editor_index: Default::default(),
@@ -215,6 +227,8 @@ impl AssistantPanel {
include_conversation_in_next_inline_assist: false,
inline_prompt_history: Default::default(),
_watch_saved_conversations,
+ semantic_index,
+ retrieve_context_in_next_inline_assist: false,
};
let mut old_dock_position = this.position(cx);
@@ -262,12 +276,19 @@ impl AssistantPanel {
return;
};
+ let project = workspace.project();
+
this.update(cx, |assistant, cx| {
- assistant.new_inline_assist(&active_editor, cx)
+ assistant.new_inline_assist(&active_editor, cx, project)
});
}
- fn new_inline_assist(&mut self, editor: &ViewHandle, cx: &mut ViewContext) {
+ fn new_inline_assist(
+ &mut self,
+ editor: &ViewHandle,
+ cx: &mut ViewContext,
+ project: &ModelHandle,
+ ) {
let api_key = if let Some(api_key) = self.api_key.borrow().clone() {
api_key
} else {
@@ -275,7 +296,7 @@ impl AssistantPanel {
};
let selection = editor.read(cx).selections.newest_anchor().clone();
- if selection.start.excerpt_id() != selection.end.excerpt_id() {
+ if selection.start.excerpt_id != selection.end.excerpt_id {
return;
}
let snapshot = editor.read(cx).buffer().read(cx).snapshot(cx);
@@ -312,6 +333,27 @@ impl AssistantPanel {
Codegen::new(editor.read(cx).buffer().clone(), codegen_kind, provider, cx)
});
+ if let Some(semantic_index) = self.semantic_index.clone() {
+ let project = project.clone();
+ cx.spawn(|_, mut cx| async move {
+ let previously_indexed = semantic_index
+ .update(&mut cx, |index, cx| {
+ index.project_previously_indexed(&project, cx)
+ })
+ .await
+ .unwrap_or(false);
+ if previously_indexed {
+ let _ = semantic_index
+ .update(&mut cx, |index, cx| {
+ index.index_project(project.clone(), cx)
+ })
+ .await;
+ }
+ anyhow::Ok(())
+ })
+ .detach_and_log_err(cx);
+ }
+
let measurements = Rc::new(Cell::new(BlockMeasurements::default()));
let inline_assistant = cx.add_view(|cx| {
let assistant = InlineAssistant::new(
@@ -322,6 +364,9 @@ impl AssistantPanel {
codegen.clone(),
self.workspace.clone(),
cx,
+ self.retrieve_context_in_next_inline_assist,
+ self.semantic_index.clone(),
+ project.clone(),
);
cx.focus_self();
assistant
@@ -362,6 +407,7 @@ impl AssistantPanel {
editor: editor.downgrade(),
inline_assistant: Some((block_id, inline_assistant.clone())),
codegen: codegen.clone(),
+ project: project.downgrade(),
_subscriptions: vec![
cx.subscribe(&inline_assistant, Self::handle_inline_assistant_event),
cx.subscribe(editor, {
@@ -440,8 +486,15 @@ impl AssistantPanel {
InlineAssistantEvent::Confirmed {
prompt,
include_conversation,
+ retrieve_context,
} => {
- self.confirm_inline_assist(assist_id, prompt, *include_conversation, cx);
+ self.confirm_inline_assist(
+ assist_id,
+ prompt,
+ *include_conversation,
+ cx,
+ *retrieve_context,
+ );
}
InlineAssistantEvent::Canceled => {
self.finish_inline_assist(assist_id, true, cx);
@@ -454,6 +507,9 @@ impl AssistantPanel {
} => {
self.include_conversation_in_next_inline_assist = *include_conversation;
}
+ InlineAssistantEvent::RetrieveContextToggled { retrieve_context } => {
+ self.retrieve_context_in_next_inline_assist = *retrieve_context
+ }
}
}
@@ -532,6 +588,7 @@ impl AssistantPanel {
user_prompt: &str,
include_conversation: bool,
cx: &mut ViewContext,
+ retrieve_context: bool,
) {
let conversation = if include_conversation {
self.active_editor()
@@ -553,6 +610,20 @@ impl AssistantPanel {
return;
};
+ let project = pending_assist.project.clone();
+
+ let project_name = if let Some(project) = project.upgrade(cx) {
+ Some(
+ project
+ .read(cx)
+ .worktree_root_names(cx)
+ .collect::>()
+ .join("/"),
+ )
+ } else {
+ None
+ };
+
self.inline_prompt_history
.retain(|prompt| prompt != user_prompt);
self.inline_prompt_history.push_back(user_prompt.into());
@@ -590,13 +661,70 @@ impl AssistantPanel {
None
};
- let codegen_kind = codegen.read(cx).kind().clone();
+ // Higher Temperature increases the randomness of model outputs.
+ // If Markdown or No Language is Known, increase the randomness for more creative output
+ // If Code, decrease temperature to get more deterministic outputs
+ let temperature = if let Some(language) = language_name.clone() {
+ if language.to_string() != "Markdown".to_string() {
+ 0.5
+ } else {
+ 1.0
+ }
+ } else {
+ 1.0
+ };
+
let user_prompt = user_prompt.to_string();
- let mut messages = Vec::new();
+ let snippets = if retrieve_context {
+ let Some(project) = project.upgrade(cx) else {
+ return;
+ };
+
+ let search_results = if let Some(semantic_index) = self.semantic_index.clone() {
+ let search_results = semantic_index.update(cx, |this, cx| {
+ this.search_project(project, user_prompt.to_string(), 10, vec![], vec![], cx)
+ });
+
+ cx.background()
+ .spawn(async move { search_results.await.unwrap_or_default() })
+ } else {
+ Task::ready(Vec::new())
+ };
+
+ let snippets = cx.spawn(|_, cx| async move {
+ let mut snippets = Vec::new();
+ for result in search_results.await {
+ snippets.push(PromptCodeSnippet::new(result.buffer, result.range, &cx));
+ }
+ snippets
+ });
+ snippets
+ } else {
+ Task::ready(Vec::new())
+ };
+
let mut model = settings::get::(cx)
.default_open_ai_model
.clone();
+ let model_name = model.full_name();
+
+ let prompt = cx.background().spawn(async move {
+ let snippets = snippets.await;
+
+ let language_name = language_name.as_deref();
+ generate_content_prompt(
+ user_prompt,
+ language_name,
+ buffer,
+ range,
+ snippets,
+ model_name,
+ project_name,
+ )
+ });
+
+ let mut messages = Vec::new();
if let Some(conversation) = conversation {
let conversation = conversation.read(cx);
let buffer = conversation.buffer.read(cx);
@@ -608,24 +736,24 @@ impl AssistantPanel {
model = conversation.model.clone();
}
- let prompt = cx.background().spawn(async move {
- let language_name = language_name.as_deref();
- generate_content_prompt(user_prompt, language_name, &buffer, range, codegen_kind)
- });
-
cx.spawn(|_, mut cx| async move {
- let prompt = prompt.await;
+ // I Don't know if we want to return a ? here.
+ let prompt = prompt.await?;
messages.push(RequestMessage {
role: Role::User,
content: prompt,
});
+
let request = OpenAIRequest {
model: model.full_name().into(),
messages,
stream: true,
+ stop: vec!["|END|>".to_string()],
+ temperature,
};
codegen.update(&mut cx, |codegen, cx| codegen.start(request, cx));
+ anyhow::Ok(())
})
.detach();
}
@@ -1514,12 +1642,14 @@ impl Conversation {
Role::Assistant => "assistant".into(),
Role::System => "system".into(),
},
- content: self
- .buffer
- .read(cx)
- .text_for_range(message.offset_range)
- .collect(),
+ content: Some(
+ self.buffer
+ .read(cx)
+ .text_for_range(message.offset_range)
+ .collect(),
+ ),
name: None,
+ function_call: None,
})
})
.collect::>();
@@ -1613,6 +1743,8 @@ impl Conversation {
.map(|message| message.to_open_ai_message(self.buffer.read(cx)))
.collect(),
stream: true,
+ stop: vec![],
+ temperature: 1.0,
};
let stream = stream_completion(api_key, cx.background().clone(), request);
@@ -1897,6 +2029,8 @@ impl Conversation {
model: self.model.full_name().to_string(),
messages: messages.collect(),
stream: true,
+ stop: vec![],
+ temperature: 1.0,
};
let stream = stream_completion(api_key, cx.background().clone(), request);
@@ -2638,12 +2772,16 @@ enum InlineAssistantEvent {
Confirmed {
prompt: String,
include_conversation: bool,
+ retrieve_context: bool,
},
Canceled,
Dismissed,
IncludeConversationToggled {
include_conversation: bool,
},
+ RetrieveContextToggled {
+ retrieve_context: bool,
+ },
}
struct InlineAssistant {
@@ -2659,6 +2797,11 @@ struct InlineAssistant {
pending_prompt: String,
codegen: ModelHandle,
_subscriptions: Vec,
+ retrieve_context: bool,
+ semantic_index: Option>,
+ semantic_permissioned: Option,
+ project: WeakModelHandle,
+ maintain_rate_limit: Option>,
}
impl Entity for InlineAssistant {
@@ -2675,51 +2818,65 @@ impl View for InlineAssistant {
let theme = theme::current(cx);
Flex::row()
- .with_child(
- Flex::row()
- .with_child(
- Button::action(ToggleIncludeConversation)
- .with_tooltip("Include Conversation", theme.tooltip.clone())
+ .with_children([Flex::row()
+ .with_child(
+ Button::action(ToggleIncludeConversation)
+ .with_tooltip("Include Conversation", theme.tooltip.clone())
+ .with_id(self.id)
+ .with_contents(theme::components::svg::Svg::new("icons/ai.svg"))
+ .toggleable(self.include_conversation)
+ .with_style(theme.assistant.inline.include_conversation.clone())
+ .element()
+ .aligned(),
+ )
+ .with_children(if SemanticIndex::enabled(cx) {
+ Some(
+ Button::action(ToggleRetrieveContext)
+ .with_tooltip("Retrieve Context", theme.tooltip.clone())
.with_id(self.id)
- .with_contents(theme::components::svg::Svg::new("icons/ai.svg"))
- .toggleable(self.include_conversation)
- .with_style(theme.assistant.inline.include_conversation.clone())
+ .with_contents(theme::components::svg::Svg::new(
+ "icons/magnifying_glass.svg",
+ ))
+ .toggleable(self.retrieve_context)
+ .with_style(theme.assistant.inline.retrieve_context.clone())
.element()
.aligned(),
)
- .with_children(if let Some(error) = self.codegen.read(cx).error() {
- Some(
- Svg::new("icons/error.svg")
- .with_color(theme.assistant.error_icon.color)
- .constrained()
- .with_width(theme.assistant.error_icon.width)
- .contained()
- .with_style(theme.assistant.error_icon.container)
- .with_tooltip::(
- self.id,
- error.to_string(),
- None,
- theme.tooltip.clone(),
- cx,
- )
- .aligned(),
- )
- } else {
- None
- })
- .aligned()
- .constrained()
- .dynamically({
- let measurements = self.measurements.clone();
- move |constraint, _, _| {
- let measurements = measurements.get();
- SizeConstraint {
- min: vec2f(measurements.gutter_width, constraint.min.y()),
- max: vec2f(measurements.gutter_width, constraint.max.y()),
- }
+ } else {
+ None
+ })
+ .with_children(if let Some(error) = self.codegen.read(cx).error() {
+ Some(
+ Svg::new("icons/error.svg")
+ .with_color(theme.assistant.error_icon.color)
+ .constrained()
+ .with_width(theme.assistant.error_icon.width)
+ .contained()
+ .with_style(theme.assistant.error_icon.container)
+ .with_tooltip::(
+ self.id,
+ error.to_string(),
+ None,
+ theme.tooltip.clone(),
+ cx,
+ )
+ .aligned(),
+ )
+ } else {
+ None
+ })
+ .aligned()
+ .constrained()
+ .dynamically({
+ let measurements = self.measurements.clone();
+ move |constraint, _, _| {
+ let measurements = measurements.get();
+ SizeConstraint {
+ min: vec2f(measurements.gutter_width, constraint.min.y()),
+ max: vec2f(measurements.gutter_width, constraint.max.y()),
}
- }),
- )
+ }
+ })])
.with_child(Empty::new().constrained().dynamically({
let measurements = self.measurements.clone();
move |constraint, _, _| {
@@ -2742,6 +2899,16 @@ impl View for InlineAssistant {
.left()
.flex(1., true),
)
+ .with_children(if self.retrieve_context {
+ Some(
+ Flex::row()
+ .with_children(self.retrieve_context_status(cx))
+ .flex(1., true)
+ .aligned(),
+ )
+ } else {
+ None
+ })
.contained()
.with_style(theme.assistant.inline.container)
.into_any()
@@ -2767,6 +2934,9 @@ impl InlineAssistant {
codegen: ModelHandle,
workspace: WeakViewHandle,
cx: &mut ViewContext,
+ retrieve_context: bool,
+ semantic_index: Option>,
+ project: ModelHandle,
) -> Self {
let prompt_editor = cx.add_view(|cx| {
let mut editor = Editor::single_line(
@@ -2780,11 +2950,16 @@ impl InlineAssistant {
editor.set_placeholder_text(placeholder, cx);
editor
});
- let subscriptions = vec![
+ let mut subscriptions = vec![
cx.observe(&codegen, Self::handle_codegen_changed),
cx.subscribe(&prompt_editor, Self::handle_prompt_editor_events),
];
- Self {
+
+ if let Some(semantic_index) = semantic_index.clone() {
+ subscriptions.push(cx.observe(&semantic_index, Self::semantic_index_changed));
+ }
+
+ let assistant = Self {
id,
prompt_editor,
workspace,
@@ -2797,7 +2972,33 @@ impl InlineAssistant {
pending_prompt: String::new(),
codegen,
_subscriptions: subscriptions,
+ retrieve_context,
+ semantic_permissioned: None,
+ semantic_index,
+ project: project.downgrade(),
+ maintain_rate_limit: None,
+ };
+
+ assistant.index_project(cx).log_err();
+
+ assistant
+ }
+
+ fn semantic_permissioned(&self, cx: &mut ViewContext) -> Task> {
+ if let Some(value) = self.semantic_permissioned {
+ return Task::ready(Ok(value));
}
+
+ let Some(project) = self.project.upgrade(cx) else {
+ return Task::ready(Err(anyhow!("project was dropped")));
+ };
+
+ self.semantic_index
+ .as_ref()
+ .map(|semantic| {
+ semantic.update(cx, |this, cx| this.project_previously_indexed(&project, cx))
+ })
+ .unwrap_or(Task::ready(Ok(false)))
}
fn handle_prompt_editor_events(
@@ -2812,6 +3013,37 @@ impl InlineAssistant {
}
}
+ fn semantic_index_changed(
+ &mut self,
+ semantic_index: ModelHandle,
+ cx: &mut ViewContext,
+ ) {
+ let Some(project) = self.project.upgrade(cx) else {
+ return;
+ };
+
+ let status = semantic_index.read(cx).status(&project);
+ match status {
+ SemanticIndexStatus::Indexing {
+ rate_limit_expiry: Some(_),
+ ..
+ } => {
+ if self.maintain_rate_limit.is_none() {
+ self.maintain_rate_limit = Some(cx.spawn(|this, mut cx| async move {
+ loop {
+ cx.background().timer(Duration::from_secs(1)).await;
+ this.update(&mut cx, |_, cx| cx.notify()).log_err();
+ }
+ }));
+ }
+ return;
+ }
+ _ => {
+ self.maintain_rate_limit = None;
+ }
+ }
+ }
+
fn handle_codegen_changed(&mut self, _: ModelHandle, cx: &mut ViewContext) {
let is_read_only = !self.codegen.read(cx).idle();
self.prompt_editor.update(cx, |editor, cx| {
@@ -2861,12 +3093,241 @@ impl InlineAssistant {
cx.emit(InlineAssistantEvent::Confirmed {
prompt,
include_conversation: self.include_conversation,
+ retrieve_context: self.retrieve_context,
});
self.confirmed = true;
cx.notify();
}
}
+ fn toggle_retrieve_context(&mut self, _: &ToggleRetrieveContext, cx: &mut ViewContext) {
+ let semantic_permissioned = self.semantic_permissioned(cx);
+
+ let Some(project) = self.project.upgrade(cx) else {
+ return;
+ };
+
+ let project_name = project
+ .read(cx)
+ .worktree_root_names(cx)
+ .collect::>()
+ .join("/");
+ let is_plural = project_name.chars().filter(|letter| *letter == '/').count() > 0;
+ let prompt_text = format!("Would you like to index the '{}' project{} for context retrieval? This requires sending code to the OpenAI API", project_name,
+ if is_plural {
+ "s"
+ } else {""});
+
+ cx.spawn(|this, mut cx| async move {
+ // If Necessary prompt user
+ if !semantic_permissioned.await.unwrap_or(false) {
+ let mut answer = this.update(&mut cx, |_, cx| {
+ cx.prompt(
+ PromptLevel::Info,
+ prompt_text.as_str(),
+ &["Continue", "Cancel"],
+ )
+ })?;
+
+ if answer.next().await == Some(0) {
+ this.update(&mut cx, |this, _| {
+ this.semantic_permissioned = Some(true);
+ })?;
+ } else {
+ return anyhow::Ok(());
+ }
+ }
+
+ // If permissioned, update context appropriately
+ this.update(&mut cx, |this, cx| {
+ this.retrieve_context = !this.retrieve_context;
+
+ cx.emit(InlineAssistantEvent::RetrieveContextToggled {
+ retrieve_context: this.retrieve_context,
+ });
+
+ if this.retrieve_context {
+ this.index_project(cx).log_err();
+ }
+
+ cx.notify();
+ })?;
+
+ anyhow::Ok(())
+ })
+ .detach_and_log_err(cx);
+ }
+
+ fn index_project(&self, cx: &mut ViewContext) -> anyhow::Result<()> {
+ let Some(project) = self.project.upgrade(cx) else {
+ return Err(anyhow!("project was dropped!"));
+ };
+
+ let semantic_permissioned = self.semantic_permissioned(cx);
+ if let Some(semantic_index) = SemanticIndex::global(cx) {
+ cx.spawn(|_, mut cx| async move {
+ // This has to be updated to accomodate for semantic_permissions
+ if semantic_permissioned.await.unwrap_or(false) {
+ semantic_index
+ .update(&mut cx, |index, cx| index.index_project(project, cx))
+ .await
+ } else {
+ Err(anyhow!("project is not permissioned for semantic indexing"))
+ }
+ })
+ .detach_and_log_err(cx);
+ }
+
+ anyhow::Ok(())
+ }
+
+ fn retrieve_context_status(
+ &self,
+ cx: &mut ViewContext,
+ ) -> Option> {
+ enum ContextStatusIcon {}
+
+ let Some(project) = self.project.upgrade(cx) else {
+ return None;
+ };
+
+ if let Some(semantic_index) = SemanticIndex::global(cx) {
+ let status = semantic_index.update(cx, |index, _| index.status(&project));
+ let theme = theme::current(cx);
+ match status {
+ SemanticIndexStatus::NotAuthenticated {} => Some(
+ Svg::new("icons/error.svg")
+ .with_color(theme.assistant.error_icon.color)
+ .constrained()
+ .with_width(theme.assistant.error_icon.width)
+ .contained()
+ .with_style(theme.assistant.error_icon.container)
+ .with_tooltip::(
+ self.id,
+ "Not Authenticated. Please ensure you have a valid 'OPENAI_API_KEY' in your environment variables.",
+ None,
+ theme.tooltip.clone(),
+ cx,
+ )
+ .aligned()
+ .into_any(),
+ ),
+ SemanticIndexStatus::NotIndexed {} => Some(
+ Svg::new("icons/error.svg")
+ .with_color(theme.assistant.inline.context_status.error_icon.color)
+ .constrained()
+ .with_width(theme.assistant.inline.context_status.error_icon.width)
+ .contained()
+ .with_style(theme.assistant.inline.context_status.error_icon.container)
+ .with_tooltip::(
+ self.id,
+ "Not Indexed",
+ None,
+ theme.tooltip.clone(),
+ cx,
+ )
+ .aligned()
+ .into_any(),
+ ),
+ SemanticIndexStatus::Indexing {
+ remaining_files,
+ rate_limit_expiry,
+ } => {
+
+ let mut status_text = if remaining_files == 0 {
+ "Indexing...".to_string()
+ } else {
+ format!("Remaining files to index: {remaining_files}")
+ };
+
+ if let Some(rate_limit_expiry) = rate_limit_expiry {
+ let remaining_seconds = rate_limit_expiry.duration_since(Instant::now());
+ if remaining_seconds > Duration::from_secs(0) && remaining_files > 0 {
+ write!(
+ status_text,
+ " (rate limit expires in {}s)",
+ remaining_seconds.as_secs()
+ )
+ .unwrap();
+ }
+ }
+ Some(
+ Svg::new("icons/update.svg")
+ .with_color(theme.assistant.inline.context_status.in_progress_icon.color)
+ .constrained()
+ .with_width(theme.assistant.inline.context_status.in_progress_icon.width)
+ .contained()
+ .with_style(theme.assistant.inline.context_status.in_progress_icon.container)
+ .with_tooltip::(
+ self.id,
+ status_text,
+ None,
+ theme.tooltip.clone(),
+ cx,
+ )
+ .aligned()
+ .into_any(),
+ )
+ }
+ SemanticIndexStatus::Indexed {} => Some(
+ Svg::new("icons/check.svg")
+ .with_color(theme.assistant.inline.context_status.complete_icon.color)
+ .constrained()
+ .with_width(theme.assistant.inline.context_status.complete_icon.width)
+ .contained()
+ .with_style(theme.assistant.inline.context_status.complete_icon.container)
+ .with_tooltip::(
+ self.id,
+ "Index up to date",
+ None,
+ theme.tooltip.clone(),
+ cx,
+ )
+ .aligned()
+ .into_any(),
+ ),
+ }
+ } else {
+ None
+ }
+ }
+
+ // fn retrieve_context_status(&self, cx: &mut ViewContext) -> String {
+ // let project = self.project.clone();
+ // if let Some(semantic_index) = self.semantic_index.clone() {
+ // let status = semantic_index.update(cx, |index, cx| index.status(&project));
+ // return match status {
+ // // This theoretically shouldnt be a valid code path
+ // // As the inline assistant cant be launched without an API key
+ // // We keep it here for safety
+ // semantic_index::SemanticIndexStatus::NotAuthenticated => {
+ // "Not Authenticated!\nPlease ensure you have an `OPENAI_API_KEY` in your environment variables.".to_string()
+ // }
+ // semantic_index::SemanticIndexStatus::Indexed => {
+ // "Indexing Complete!".to_string()
+ // }
+ // semantic_index::SemanticIndexStatus::Indexing { remaining_files, rate_limit_expiry } => {
+
+ // let mut status = format!("Remaining files to index for Context Retrieval: {remaining_files}");
+
+ // if let Some(rate_limit_expiry) = rate_limit_expiry {
+ // let remaining_seconds =
+ // rate_limit_expiry.duration_since(Instant::now());
+ // if remaining_seconds > Duration::from_secs(0) {
+ // write!(status, " (rate limit resets in {}s)", remaining_seconds.as_secs()).unwrap();
+ // }
+ // }
+ // status
+ // }
+ // semantic_index::SemanticIndexStatus::NotIndexed => {
+ // "Not Indexed for Context Retrieval".to_string()
+ // }
+ // };
+ // }
+
+ // "".to_string()
+ // }
+
fn toggle_include_conversation(
&mut self,
_: &ToggleIncludeConversation,
@@ -2929,6 +3390,7 @@ struct PendingInlineAssist {
inline_assistant: Option<(BlockId, ViewHandle)>,
codegen: ModelHandle,
_subscriptions: Vec,
+ project: WeakModelHandle,
}
fn merge_ranges(ranges: &mut Vec>, buffer: &MultiBufferSnapshot) {
diff --git a/crates/assistant/src/codegen.rs b/crates/assistant/src/codegen.rs
index b6ef6b5cfa..6b79daba42 100644
--- a/crates/assistant/src/codegen.rs
+++ b/crates/assistant/src/codegen.rs
@@ -1,10 +1,11 @@
use crate::streaming_diff::{Hunk, StreamingDiff};
use ai::completion::{CompletionProvider, OpenAIRequest};
use anyhow::Result;
-use editor::{multi_buffer, Anchor, MultiBuffer, MultiBufferSnapshot, ToOffset, ToPoint};
+use editor::{Anchor, MultiBuffer, MultiBufferSnapshot, ToOffset, ToPoint};
use futures::{channel::mpsc, SinkExt, Stream, StreamExt};
use gpui::{Entity, ModelContext, ModelHandle, Task};
use language::{Rope, TransactionId};
+use multi_buffer;
use std::{cmp, future, ops::Range, sync::Arc};
pub enum Event {
diff --git a/crates/assistant/src/prompts.rs b/crates/assistant/src/prompts.rs
index d326a7f445..dffcbc2923 100644
--- a/crates/assistant/src/prompts.rs
+++ b/crates/assistant/src/prompts.rs
@@ -1,8 +1,13 @@
-use crate::codegen::CodegenKind;
+use ai::models::{LanguageModel, OpenAILanguageModel};
+use ai::templates::base::{PromptArguments, PromptChain, PromptPriority, PromptTemplate};
+use ai::templates::file_context::FileContext;
+use ai::templates::generate::GenerateInlineContent;
+use ai::templates::preamble::EngineerPreamble;
+use ai::templates::repository_context::{PromptCodeSnippet, RepositoryContext};
use language::{BufferSnapshot, OffsetRangeExt, ToOffset};
use std::cmp::{self, Reverse};
-use std::fmt::Write;
use std::ops::Range;
+use std::sync::Arc;
#[allow(dead_code)]
fn summarize(buffer: &BufferSnapshot, selected_range: Range) -> String {
@@ -118,86 +123,50 @@ fn summarize(buffer: &BufferSnapshot, selected_range: Range) -> S
pub fn generate_content_prompt(
user_prompt: String,
language_name: Option<&str>,
- buffer: &BufferSnapshot,
- range: Range,
- kind: CodegenKind,
-) -> String {
- let range = range.to_offset(buffer);
- let mut prompt = String::new();
-
- // General Preamble
- if let Some(language_name) = language_name {
- writeln!(prompt, "You're an expert {language_name} engineer.\n").unwrap();
+ buffer: BufferSnapshot,
+ range: Range,
+ search_results: Vec,
+ model: &str,
+ project_name: Option,
+) -> anyhow::Result {
+ // Using new Prompt Templates
+ let openai_model: Arc = Arc::new(OpenAILanguageModel::load(model));
+ let lang_name = if let Some(language_name) = language_name {
+ Some(language_name.to_string())
} else {
- writeln!(prompt, "You're an expert engineer.\n").unwrap();
- }
+ None
+ };
- let mut content = String::new();
- content.extend(buffer.text_for_range(0..range.start));
- if range.start == range.end {
- content.push_str("<|START|>");
- } else {
- content.push_str("<|START|");
- }
- content.extend(buffer.text_for_range(range.clone()));
- if range.start != range.end {
- content.push_str("|END|>");
- }
- content.extend(buffer.text_for_range(range.end..buffer.len()));
+ let args = PromptArguments {
+ model: openai_model,
+ language_name: lang_name.clone(),
+ project_name,
+ snippets: search_results.clone(),
+ reserved_tokens: 1000,
+ buffer: Some(buffer),
+ selected_range: Some(range),
+ user_prompt: Some(user_prompt.clone()),
+ };
- writeln!(
- prompt,
- "The file you are currently working on has the following content:"
- )
- .unwrap();
- if let Some(language_name) = language_name {
- let language_name = language_name.to_lowercase();
- writeln!(prompt, "```{language_name}\n{content}\n```").unwrap();
- } else {
- writeln!(prompt, "```\n{content}\n```").unwrap();
- }
+ let templates: Vec<(PromptPriority, Box)> = vec![
+ (PromptPriority::Mandatory, Box::new(EngineerPreamble {})),
+ (
+ PromptPriority::Ordered { order: 1 },
+ Box::new(RepositoryContext {}),
+ ),
+ (
+ PromptPriority::Ordered { order: 0 },
+ Box::new(FileContext {}),
+ ),
+ (
+ PromptPriority::Mandatory,
+ Box::new(GenerateInlineContent {}),
+ ),
+ ];
+ let chain = PromptChain::new(args, templates);
+ let (prompt, _) = chain.generate(true)?;
- match kind {
- CodegenKind::Generate { position: _ } => {
- writeln!(prompt, "In particular, the user's cursor is current on the '<|START|>' span in the above outline, with no text selected.").unwrap();
- writeln!(
- prompt,
- "Assume the cursor is located where the `<|START|` marker is."
- )
- .unwrap();
- writeln!(
- prompt,
- "Text can't be replaced, so assume your answer will be inserted at the cursor."
- )
- .unwrap();
- writeln!(
- prompt,
- "Generate text based on the users prompt: {user_prompt}"
- )
- .unwrap();
- }
- CodegenKind::Transform { range: _ } => {
- writeln!(prompt, "In particular, the user has selected a section of the text between the '<|START|' and '|END|>' spans.").unwrap();
- writeln!(
- prompt,
- "Modify the users code selected text based upon the users prompt: {user_prompt}"
- )
- .unwrap();
- writeln!(
- prompt,
- "You MUST reply with only the adjusted code (within the '<|START|' and '|END|>' spans), not the entire file."
- )
- .unwrap();
- }
- }
-
- if let Some(language_name) = language_name {
- writeln!(prompt, "Your answer MUST always be valid {language_name}").unwrap();
- }
- writeln!(prompt, "Always wrap your response in a Markdown codeblock").unwrap();
- writeln!(prompt, "Never make remarks about the output.").unwrap();
-
- prompt
+ anyhow::Ok(prompt)
}
#[cfg(test)]
diff --git a/crates/call/src/call.rs b/crates/call/src/call.rs
index 0846341325..ca1a60bd63 100644
--- a/crates/call/src/call.rs
+++ b/crates/call/src/call.rs
@@ -10,7 +10,7 @@ use client::{
ZED_ALWAYS_ACTIVE,
};
use collections::HashSet;
-use futures::{future::Shared, FutureExt};
+use futures::{channel::oneshot, future::Shared, Future, FutureExt};
use gpui::{
AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle, Subscription, Task,
WeakModelHandle,
@@ -37,10 +37,42 @@ pub struct IncomingCall {
pub initial_project: Option,
}
+pub struct OneAtATime {
+ cancel: Option>,
+}
+
+impl OneAtATime {
+ /// spawn a task in the given context.
+ /// if another task is spawned before that resolves, or if the OneAtATime itself is dropped, the first task will be cancelled and return Ok(None)
+ /// otherwise you'll see the result of the task.
+ fn spawn(&mut self, cx: &mut AppContext, f: F) -> Task>>
+ where
+ F: 'static + FnOnce(AsyncAppContext) -> Fut,
+ Fut: Future