Limit project search to avoid unresponsive app (#9404)

This fixes #[#9135](https://github.com/zed-industries/zed/issues/9135)
by introducing file/results limit to project search.

It does this by changing how project search works in multiple ways.

User-facing changes:

- Number files that are being searched is now limited to 5000
- Number of search results in all files is now limited to 10000
- If a limit is reached, search is stopped and a message is displayed
  to the user

Under the hood, we also reworked `Project::search_local`:

- Code has been refactored so that the concurrency-logic is easier to
  distinguish from the search logic.
- We now limit the number of concurrent `open_buffer` operations, since
  that is being done on the main thread and can lead to beachballs when
  finding a lot of results.

Note for reviewer:

@SomeoneToIgnore since you know this code, can you take a look at this?
The changes might look bigger than they are in certain places because I
only extracted code into functions, but the middle part — the sorting of
file paths — has changed in order to avoid too many tasks opening
buffers at the same time and making app unresponsive.

What's also curious is that I think there was a bug in that we searched
ignored entries _twice_: once in `search_snapshots` and then later in
the dedicated `search_ignored_entry` function. I changed the `entries()`
call in `search_snapshots` so that it's always `false`, but that caused
tests to fail (see `test_search_in_gitignored_dirs`). @bennetbo and I
think that there's some state in the Project that made the tests pass
before, because the last of the 3 assertions in that test only passes
when the other two queries run. So we changed the test to be more
stateless and included the possible fix in `search_snapshots`.

Release Notes:

- Fixed project-wide search leading to unresponsive application when
searching in ignored files, by limiting the number of files that are
searched (to 5000) and the number of overall search results to 10000.
Additional performance improvements have also been made in order to
offload more work onto a background thread.
([#9135](https://github.com/zed-industries/zed/issues/9135)).

---------

Co-authored-by: Antonio Scandurra <antonio@zed.dev>
Co-authored-by: Bennet <bennetbo@gmx.de>
This commit is contained in:
Thorsten Ball 2024-03-18 10:49:27 +01:00 committed by GitHub
parent 52a9bc3e1b
commit a69eddc081
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 373 additions and 357 deletions

View File

@ -20,6 +20,7 @@ use live_kit_client::MacOSDisplay;
use lsp::LanguageServerId;
use project::{
search::SearchQuery, DiagnosticSummary, FormatTrigger, HoverBlockKind, Project, ProjectPath,
SearchResult,
};
use rand::prelude::*;
use serde_json::json;
@ -4772,8 +4773,15 @@ async fn test_project_search(
cx,
)
});
while let Some((buffer, ranges)) = search_rx.next().await {
results.entry(buffer).or_insert(ranges);
while let Some(result) = search_rx.next().await {
match result {
SearchResult::Buffer { buffer, ranges } => {
results.entry(buffer).or_insert(ranges);
}
SearchResult::LimitReached => {
panic!("Unexpectedly reached search limit in tests. If you do want to assert limit-reached, change this panic call.")
}
};
}
let mut ranges_by_path = results

View File

@ -13,7 +13,7 @@ use language::{
};
use lsp::FakeLanguageServer;
use pretty_assertions::assert_eq;
use project::{search::SearchQuery, Project, ProjectPath};
use project::{search::SearchQuery, Project, ProjectPath, SearchResult};
use rand::{
distributions::{Alphanumeric, DistString},
prelude::*,
@ -879,8 +879,10 @@ impl RandomizedTest for ProjectCollaborationTest {
drop(project);
let search = cx.executor().spawn(async move {
let mut results = HashMap::default();
while let Some((buffer, ranges)) = search.next().await {
results.entry(buffer).or_insert(ranges);
while let Some(result) = search.next().await {
if let SearchResult::Buffer { buffer, ranges } = result {
results.entry(buffer).or_insert(ranges);
}
}
results
});

View File

@ -479,6 +479,7 @@ impl FormatTrigger {
}
}
}
#[derive(Clone, Debug, PartialEq)]
enum SearchMatchCandidate {
OpenBuffer {
@ -493,7 +494,6 @@ enum SearchMatchCandidate {
},
}
type SearchMatchCandidateIndex = usize;
impl SearchMatchCandidate {
fn path(&self) -> Option<Arc<Path>> {
match self {
@ -501,6 +501,24 @@ impl SearchMatchCandidate {
SearchMatchCandidate::Path { path, .. } => Some(path.clone()),
}
}
fn is_ignored(&self) -> bool {
matches!(
self,
SearchMatchCandidate::Path {
is_ignored: true,
..
}
)
}
}
pub enum SearchResult {
Buffer {
buffer: Model<Buffer>,
ranges: Vec<Range<Anchor>>,
},
LimitReached,
}
impl Project {
@ -6098,7 +6116,7 @@ impl Project {
&self,
query: SearchQuery,
cx: &mut ModelContext<Self>,
) -> Receiver<(Model<Buffer>, Vec<Range<Anchor>>)> {
) -> Receiver<SearchResult> {
if self.is_local() {
self.search_local(query, cx)
} else if let Some(project_id) = self.remote_id() {
@ -6128,8 +6146,13 @@ impl Project {
.push(start..end)
}
for (buffer, ranges) in result {
let _ = tx.send((buffer, ranges)).await;
let _ = tx.send(SearchResult::Buffer { buffer, ranges }).await;
}
if response.limit_reached {
let _ = tx.send(SearchResult::LimitReached).await;
}
Result::<(), anyhow::Error>::Ok(())
})
.detach_and_log_err(cx);
@ -6143,7 +6166,7 @@ impl Project {
&self,
query: SearchQuery,
cx: &mut ModelContext<Self>,
) -> Receiver<(Model<Buffer>, Vec<Range<Anchor>>)> {
) -> Receiver<SearchResult> {
// Local search is split into several phases.
// TL;DR is that we do 2 passes; initial pass to pick files which contain at least one match
// and the second phase that finds positions of all the matches found in the candidate files.
@ -6246,104 +6269,90 @@ impl Project {
))
.detach();
let (buffers, buffers_rx) = Self::sort_candidates_and_open_buffers(matching_paths_rx, cx);
let background = cx.background_executor().clone();
let (result_tx, result_rx) = smol::channel::bounded(1024);
cx.background_executor()
.spawn(async move {
let Ok(buffers) = buffers.await else {
return;
};
let buffers_len = buffers.len();
if buffers_len == 0 {
return;
cx.spawn(|this, mut cx| async move {
const MAX_SEARCH_RESULT_FILES: usize = 5_000;
const MAX_SEARCH_RESULT_RANGES: usize = 10_000;
let mut matching_paths = matching_paths_rx
.take(MAX_SEARCH_RESULT_FILES + 1)
.collect::<Vec<_>>()
.await;
let mut limit_reached = if matching_paths.len() > MAX_SEARCH_RESULT_FILES {
matching_paths.pop();
true
} else {
false
};
matching_paths.sort_by_key(|candidate| (candidate.is_ignored(), candidate.path()));
let mut range_count = 0;
let query = Arc::new(query);
// Now that we know what paths match the query, we will load at most
// 64 buffers at a time to avoid overwhelming the main thread. For each
// opened buffer, we will spawn a background task that retrieves all the
// ranges in the buffer matched by the query.
'outer: for matching_paths_chunk in matching_paths.chunks(64) {
let mut chunk_results = Vec::new();
for matching_path in matching_paths_chunk {
let query = query.clone();
let buffer = match matching_path {
SearchMatchCandidate::OpenBuffer { buffer, .. } => {
Task::ready(Ok(buffer.clone()))
}
SearchMatchCandidate::Path {
worktree_id, path, ..
} => this.update(&mut cx, |this, cx| {
this.open_buffer((*worktree_id, path.clone()), cx)
})?,
};
chunk_results.push(cx.spawn(|cx| async move {
let buffer = buffer.await?;
let snapshot = buffer.read_with(&cx, |buffer, _| buffer.snapshot())?;
let ranges = cx
.background_executor()
.spawn(async move {
query
.search(&snapshot, None)
.await
.iter()
.map(|range| {
snapshot.anchor_before(range.start)
..snapshot.anchor_after(range.end)
})
.collect::<Vec<_>>()
})
.await;
anyhow::Ok((buffer, ranges))
}));
}
let query = &query;
let (finished_tx, mut finished_rx) = smol::channel::unbounded();
background
.scoped(|scope| {
#[derive(Clone)]
struct FinishedStatus {
entry: Option<(Model<Buffer>, Vec<Range<Anchor>>)>,
buffer_index: SearchMatchCandidateIndex,
}
for _ in 0..workers {
let finished_tx = finished_tx.clone();
let mut buffers_rx = buffers_rx.clone();
scope.spawn(async move {
while let Some((entry, buffer_index)) = buffers_rx.next().await {
let buffer_matches = if let Some((_, snapshot)) = entry.as_ref()
{
query
.search(snapshot, None)
.await
.iter()
.map(|range| {
snapshot.anchor_before(range.start)
..snapshot.anchor_after(range.end)
})
.collect()
} else {
Vec::new()
};
let status = if !buffer_matches.is_empty() {
let entry = if let Some((buffer, _)) = entry.as_ref() {
Some((buffer.clone(), buffer_matches))
} else {
None
};
FinishedStatus {
entry,
buffer_index,
}
} else {
FinishedStatus {
entry: None,
buffer_index,
}
};
if finished_tx.send(status).await.is_err() {
break;
}
}
});
let chunk_results = futures::future::join_all(chunk_results).await;
for result in chunk_results {
if let Some((buffer, ranges)) = result.log_err() {
range_count += ranges.len();
result_tx
.send(SearchResult::Buffer { buffer, ranges })
.await?;
if range_count > MAX_SEARCH_RESULT_RANGES {
limit_reached = true;
break 'outer;
}
// Report sorted matches
scope.spawn(async move {
let mut current_index = 0;
let mut scratch = vec![None; buffers_len];
while let Some(status) = finished_rx.next().await {
debug_assert!(
scratch[status.buffer_index].is_none(),
"Got match status of position {} twice",
status.buffer_index
);
let index = status.buffer_index;
scratch[index] = Some(status);
while current_index < buffers_len {
let Some(current_entry) = scratch[current_index].take() else {
// We intentionally **do not** increment `current_index` here. When next element arrives
// from `finished_rx`, we will inspect the same position again, hoping for it to be Some(_)
// this time.
break;
};
if let Some(entry) = current_entry.entry {
result_tx.send(entry).await.log_err();
}
current_index += 1;
}
if current_index == buffers_len {
break;
}
}
});
})
.await;
})
.detach();
}
}
}
if limit_reached {
result_tx.send(SearchResult::LimitReached).await?;
}
anyhow::Ok(())
})
.detach();
result_rx
}
@ -6365,7 +6374,6 @@ impl Project {
let query = &query;
let matching_paths_tx = &matching_paths_tx;
let snapshots = &snapshots;
let paths_per_worker = (path_count + workers - 1) / workers;
for buffer in unnamed_buffers {
matching_paths_tx
.send(SearchMatchCandidate::OpenBuffer {
@ -6384,6 +6392,9 @@ impl Project {
.await
.log_err();
}
let paths_per_worker = (path_count + workers - 1) / workers;
executor
.scoped(|scope| {
let max_concurrent_workers = Arc::new(Semaphore::new(workers));
@ -6391,166 +6402,40 @@ impl Project {
for worker_ix in 0..workers {
let worker_start_ix = worker_ix * paths_per_worker;
let worker_end_ix = worker_start_ix + paths_per_worker;
let unnamed_buffers = opened_buffers.clone();
let opened_buffers = opened_buffers.clone();
let limiter = Arc::clone(&max_concurrent_workers);
scope.spawn(async move {
let _guard = limiter.acquire().await;
let mut snapshot_start_ix = 0;
let mut abs_path = PathBuf::new();
for snapshot in snapshots {
let snapshot_end_ix = snapshot_start_ix
+ if query.include_ignored() {
snapshot.file_count()
} else {
snapshot.visible_file_count()
};
if worker_end_ix <= snapshot_start_ix {
break;
} else if worker_start_ix > snapshot_end_ix {
snapshot_start_ix = snapshot_end_ix;
continue;
} else {
let start_in_snapshot =
worker_start_ix.saturating_sub(snapshot_start_ix);
let end_in_snapshot =
cmp::min(worker_end_ix, snapshot_end_ix) - snapshot_start_ix;
for entry in snapshot
.files(query.include_ignored(), start_in_snapshot)
.take(end_in_snapshot - start_in_snapshot)
{
if matching_paths_tx.is_closed() {
break;
}
if unnamed_buffers.contains_key(&entry.path) {
continue;
}
let matched_path = if include_root {
let mut full_path = PathBuf::from(snapshot.root_name());
full_path.push(&entry.path);
query.file_matches(Some(&full_path))
} else {
query.file_matches(Some(&entry.path))
};
let matches = if matched_path {
abs_path.clear();
abs_path.push(&snapshot.abs_path());
abs_path.push(&entry.path);
if let Some(file) = fs.open_sync(&abs_path).await.log_err()
{
query.detect(file).unwrap_or(false)
} else {
false
}
} else {
false
};
if matches {
let project_path = SearchMatchCandidate::Path {
worktree_id: snapshot.id(),
path: entry.path.clone(),
is_ignored: entry.is_ignored,
};
if matching_paths_tx.send(project_path).await.is_err() {
break;
}
}
}
snapshot_start_ix = snapshot_end_ix;
}
scope.spawn({
async move {
let _guard = limiter.acquire().await;
search_snapshots(
snapshots,
worker_start_ix,
worker_end_ix,
query,
matching_paths_tx,
&opened_buffers,
include_root,
fs,
)
.await;
}
});
}
if query.include_ignored() {
for snapshot in snapshots {
for ignored_entry in snapshot
.entries(query.include_ignored())
.filter(|e| e.is_ignored)
{
for ignored_entry in snapshot.entries(true).filter(|e| e.is_ignored) {
let limiter = Arc::clone(&max_concurrent_workers);
scope.spawn(async move {
let _guard = limiter.acquire().await;
let mut ignored_paths_to_process =
VecDeque::from([snapshot.abs_path().join(&ignored_entry.path)]);
while let Some(ignored_abs_path) =
ignored_paths_to_process.pop_front()
{
if let Some(fs_metadata) = fs
.metadata(&ignored_abs_path)
.await
.with_context(|| {
format!("fetching fs metadata for {ignored_abs_path:?}")
})
.log_err()
.flatten()
{
if fs_metadata.is_dir {
if let Some(mut subfiles) = fs
.read_dir(&ignored_abs_path)
.await
.with_context(|| {
format!(
"listing ignored path {ignored_abs_path:?}"
)
})
.log_err()
{
while let Some(subfile) = subfiles.next().await {
if let Some(subfile) = subfile.log_err() {
ignored_paths_to_process.push_back(subfile);
}
}
}
} else if !fs_metadata.is_symlink {
if !query.file_matches(Some(&ignored_abs_path))
|| snapshot.is_path_excluded(
ignored_entry.path.to_path_buf(),
)
{
continue;
}
let matches = if let Some(file) = fs
.open_sync(&ignored_abs_path)
.await
.with_context(|| {
format!(
"Opening ignored path {ignored_abs_path:?}"
)
})
.log_err()
{
query.detect(file).unwrap_or(false)
} else {
false
};
if matches {
let project_path = SearchMatchCandidate::Path {
worktree_id: snapshot.id(),
path: Arc::from(
ignored_abs_path
.strip_prefix(snapshot.abs_path())
.expect(
"scanning worktree-related files",
),
),
is_ignored: true,
};
if matching_paths_tx
.send(project_path)
.await
.is_err()
{
return;
}
}
}
}
}
search_ignored_entry(
snapshot,
ignored_entry,
fs,
query,
matching_paths_tx,
)
.await;
});
}
}
@ -6648,76 +6533,6 @@ impl Project {
})
}
fn sort_candidates_and_open_buffers(
mut matching_paths_rx: Receiver<SearchMatchCandidate>,
cx: &mut ModelContext<Self>,
) -> (
futures::channel::oneshot::Receiver<Vec<SearchMatchCandidate>>,
Receiver<(
Option<(Model<Buffer>, BufferSnapshot)>,
SearchMatchCandidateIndex,
)>,
) {
let (buffers_tx, buffers_rx) = smol::channel::bounded(1024);
let (sorted_buffers_tx, sorted_buffers_rx) = futures::channel::oneshot::channel();
cx.spawn(move |this, cx| async move {
let mut buffers = Vec::new();
let mut ignored_buffers = Vec::new();
while let Some(entry) = matching_paths_rx.next().await {
if matches!(
entry,
SearchMatchCandidate::Path {
is_ignored: true,
..
}
) {
ignored_buffers.push(entry);
} else {
buffers.push(entry);
}
}
buffers.sort_by_key(|candidate| candidate.path());
ignored_buffers.sort_by_key(|candidate| candidate.path());
buffers.extend(ignored_buffers);
let matching_paths = buffers.clone();
let _ = sorted_buffers_tx.send(buffers);
for (index, candidate) in matching_paths.into_iter().enumerate() {
if buffers_tx.is_closed() {
break;
}
let this = this.clone();
let buffers_tx = buffers_tx.clone();
cx.spawn(move |mut cx| async move {
let buffer = match candidate {
SearchMatchCandidate::OpenBuffer { buffer, .. } => Some(buffer),
SearchMatchCandidate::Path {
worktree_id, path, ..
} => this
.update(&mut cx, |this, cx| {
this.open_buffer((worktree_id, path), cx)
})?
.await
.log_err(),
};
if let Some(buffer) = buffer {
let snapshot = buffer.update(&mut cx, |buffer, _| buffer.snapshot())?;
buffers_tx
.send((Some((buffer, snapshot)), index))
.await
.log_err();
} else {
buffers_tx.send((None, index)).await.log_err();
}
Ok::<_, anyhow::Error>(())
})
.detach();
}
})
.detach();
(sorted_buffers_rx, buffers_rx)
}
pub fn find_or_create_local_worktree(
&mut self,
abs_path: impl AsRef<Path>,
@ -8549,21 +8364,30 @@ impl Project {
cx.spawn(move |mut cx| async move {
let mut locations = Vec::new();
while let Some((buffer, ranges)) = result.next().await {
for range in ranges {
let start = serialize_anchor(&range.start);
let end = serialize_anchor(&range.end);
let buffer_id = this.update(&mut cx, |this, cx| {
this.create_buffer_for_peer(&buffer, peer_id, cx).into()
})?;
locations.push(proto::Location {
buffer_id,
start: Some(start),
end: Some(end),
});
let mut limit_reached = false;
while let Some(result) = result.next().await {
match result {
SearchResult::Buffer { buffer, ranges } => {
for range in ranges {
let start = serialize_anchor(&range.start);
let end = serialize_anchor(&range.end);
let buffer_id = this.update(&mut cx, |this, cx| {
this.create_buffer_for_peer(&buffer, peer_id, cx).into()
})?;
locations.push(proto::Location {
buffer_id,
start: Some(start),
end: Some(end),
});
}
}
SearchResult::LimitReached => limit_reached = true,
}
}
Ok(proto::SearchProjectResponse { locations })
Ok(proto::SearchProjectResponse {
locations,
limit_reached,
})
})
.await
}
@ -9320,6 +9144,154 @@ impl Project {
}
}
#[allow(clippy::too_many_arguments)]
async fn search_snapshots(
snapshots: &Vec<LocalSnapshot>,
worker_start_ix: usize,
worker_end_ix: usize,
query: &SearchQuery,
results_tx: &Sender<SearchMatchCandidate>,
opened_buffers: &HashMap<Arc<Path>, (Model<Buffer>, BufferSnapshot)>,
include_root: bool,
fs: &Arc<dyn Fs>,
) {
let mut snapshot_start_ix = 0;
let mut abs_path = PathBuf::new();
for snapshot in snapshots {
let snapshot_end_ix = snapshot_start_ix
+ if query.include_ignored() {
snapshot.file_count()
} else {
snapshot.visible_file_count()
};
if worker_end_ix <= snapshot_start_ix {
break;
} else if worker_start_ix > snapshot_end_ix {
snapshot_start_ix = snapshot_end_ix;
continue;
} else {
let start_in_snapshot = worker_start_ix.saturating_sub(snapshot_start_ix);
let end_in_snapshot = cmp::min(worker_end_ix, snapshot_end_ix) - snapshot_start_ix;
for entry in snapshot
.files(false, start_in_snapshot)
.take(end_in_snapshot - start_in_snapshot)
{
if results_tx.is_closed() {
break;
}
if opened_buffers.contains_key(&entry.path) {
continue;
}
let matched_path = if include_root {
let mut full_path = PathBuf::from(snapshot.root_name());
full_path.push(&entry.path);
query.file_matches(Some(&full_path))
} else {
query.file_matches(Some(&entry.path))
};
let matches = if matched_path {
abs_path.clear();
abs_path.push(&snapshot.abs_path());
abs_path.push(&entry.path);
if let Some(file) = fs.open_sync(&abs_path).await.log_err() {
query.detect(file).unwrap_or(false)
} else {
false
}
} else {
false
};
if matches {
let project_path = SearchMatchCandidate::Path {
worktree_id: snapshot.id(),
path: entry.path.clone(),
is_ignored: entry.is_ignored,
};
if results_tx.send(project_path).await.is_err() {
return;
}
}
}
snapshot_start_ix = snapshot_end_ix;
}
}
}
async fn search_ignored_entry(
snapshot: &LocalSnapshot,
ignored_entry: &Entry,
fs: &Arc<dyn Fs>,
query: &SearchQuery,
counter_tx: &Sender<SearchMatchCandidate>,
) {
let mut ignored_paths_to_process =
VecDeque::from([snapshot.abs_path().join(&ignored_entry.path)]);
while let Some(ignored_abs_path) = ignored_paths_to_process.pop_front() {
let metadata = fs
.metadata(&ignored_abs_path)
.await
.with_context(|| format!("fetching fs metadata for {ignored_abs_path:?}"))
.log_err()
.flatten();
if let Some(fs_metadata) = metadata {
if fs_metadata.is_dir {
let files = fs
.read_dir(&ignored_abs_path)
.await
.with_context(|| format!("listing ignored path {ignored_abs_path:?}"))
.log_err();
if let Some(mut subfiles) = files {
while let Some(subfile) = subfiles.next().await {
if let Some(subfile) = subfile.log_err() {
ignored_paths_to_process.push_back(subfile);
}
}
}
} else if !fs_metadata.is_symlink {
if !query.file_matches(Some(&ignored_abs_path))
|| snapshot.is_path_excluded(ignored_entry.path.to_path_buf())
{
continue;
}
let matches = if let Some(file) = fs
.open_sync(&ignored_abs_path)
.await
.with_context(|| format!("Opening ignored path {ignored_abs_path:?}"))
.log_err()
{
query.detect(file).unwrap_or(false)
} else {
false
};
if matches {
let project_path = SearchMatchCandidate::Path {
worktree_id: snapshot.id(),
path: Arc::from(
ignored_abs_path
.strip_prefix(snapshot.abs_path())
.expect("scanning worktree-related files"),
),
is_ignored: true,
};
if counter_tx.send(project_path).await.is_err() {
return;
}
}
}
}
}
}
fn subscribe_for_copilot_events(
copilot: &Model<Copilot>,
cx: &mut ModelContext<'_, Project>,

View File

@ -3897,10 +3897,10 @@ async fn test_search_with_inclusions(cx: &mut gpui::TestAppContext) {
.await
.unwrap(),
HashMap::from_iter([
("dir/two.ts".to_string(), vec![14..18]),
("dir/one.rs".to_string(), vec![8..12]),
("dir/one.ts".to_string(), vec![14..18]),
("dir/two.rs".to_string(), vec![8..12]),
("dir/two.ts".to_string(), vec![14..18]),
]),
"Rust and typescript search should give both Rust and TypeScript files, even if other inclusions don't match anything"
);
@ -4269,6 +4269,7 @@ async fn test_search_in_gitignored_dirs(cx: &mut gpui::TestAppContext) {
"Only one non-ignored file should have the query"
);
let project = Project::test(fs.clone(), ["/dir".as_ref()], cx).await;
assert_eq!(
search(
&project,
@ -4297,6 +4298,9 @@ async fn test_search_in_gitignored_dirs(cx: &mut gpui::TestAppContext) {
"Unrestricted search with ignored directories should find every file with the query"
);
let files_to_include = vec![PathMatcher::new("/dir/node_modules/prettier/**").unwrap()];
let files_to_exclude = vec![PathMatcher::new("*.ts").unwrap()];
let project = Project::test(fs.clone(), ["/dir".as_ref()], cx).await;
assert_eq!(
search(
&project,
@ -4305,8 +4309,8 @@ async fn test_search_in_gitignored_dirs(cx: &mut gpui::TestAppContext) {
false,
false,
true,
vec![PathMatcher::new("node_modules/prettier/**").unwrap()],
vec![PathMatcher::new("*.ts").unwrap()],
files_to_include,
files_to_exclude,
)
.unwrap(),
cx
@ -4404,11 +4408,16 @@ async fn search(
cx: &mut gpui::TestAppContext,
) -> Result<HashMap<String, Vec<Range<usize>>>> {
let mut search_rx = project.update(cx, |project, cx| project.search(query, cx));
let mut result = HashMap::default();
while let Some((buffer, range)) = search_rx.next().await {
result.entry(buffer).or_insert(range);
let mut results = HashMap::default();
while let Some(search_result) = search_rx.next().await {
match search_result {
SearchResult::Buffer { buffer, ranges } => {
results.entry(buffer).or_insert(ranges);
}
SearchResult::LimitReached => {}
}
}
Ok(result
Ok(results
.into_iter()
.map(|(buffer, ranges)| {
buffer.update(cx, |buffer, cx| {

View File

@ -947,6 +947,7 @@ message SearchProject {
message SearchProjectResponse {
repeated Location locations = 1;
bool limit_reached = 2;
}
message CodeAction {

View File

@ -143,6 +143,7 @@ struct ProjectSearch {
search_id: usize,
search_history: SearchHistory,
no_results: Option<bool>,
limit_reached: bool,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
@ -205,6 +206,7 @@ impl ProjectSearch {
search_id: 0,
search_history: SearchHistory::default(),
no_results: None,
limit_reached: false,
}
}
@ -220,6 +222,7 @@ impl ProjectSearch {
search_id: self.search_id,
search_history: self.search_history.clone(),
no_results: self.no_results,
limit_reached: self.limit_reached,
})
}
@ -238,27 +241,38 @@ impl ProjectSearch {
this.match_ranges.clear();
this.excerpts.update(cx, |this, cx| this.clear(cx));
this.no_results = Some(true);
this.limit_reached = false;
})
.ok()?;
while let Some((buffer, anchors)) = matches.next().await {
let mut ranges = this
.update(&mut cx, |this, cx| {
this.no_results = Some(false);
this.excerpts.update(cx, |excerpts, cx| {
excerpts.stream_excerpts_with_context_lines(buffer, anchors, 1, cx)
})
})
.ok()?;
let mut limit_reached = false;
while let Some(result) = matches.next().await {
match result {
project::SearchResult::Buffer { buffer, ranges } => {
let mut match_ranges = this
.update(&mut cx, |this, cx| {
this.no_results = Some(false);
this.excerpts.update(cx, |excerpts, cx| {
excerpts
.stream_excerpts_with_context_lines(buffer, ranges, 1, cx)
})
})
.ok()?;
while let Some(range) = ranges.next().await {
this.update(&mut cx, |this, _| this.match_ranges.push(range))
.ok()?;
while let Some(range) = match_ranges.next().await {
this.update(&mut cx, |this, _| this.match_ranges.push(range))
.ok()?;
}
this.update(&mut cx, |_, cx| cx.notify()).ok()?;
}
project::SearchResult::LimitReached => {
limit_reached = true;
}
}
this.update(&mut cx, |_, cx| cx.notify()).ok()?;
}
this.update(&mut cx, |this, cx| {
this.limit_reached = limit_reached;
this.pending_search.take();
cx.notify();
})
@ -718,6 +732,7 @@ impl ProjectSearchView {
self.model.update(cx, |model, cx| {
model.pending_search = None;
model.no_results = None;
model.limit_reached = false;
model.match_ranges.clear();
model.excerpts.update(cx, |excerpts, cx| {
@ -1811,6 +1826,8 @@ impl Render for ProjectSearchBar {
})
.unwrap_or_else(|| "No matches".to_string());
let limit_reached = search.model.read(cx).limit_reached;
let matches_column = h_flex()
.child(div().min_w(rems(6.)).child(Label::new(match_text)))
.child(
@ -1838,7 +1855,14 @@ impl Render for ProjectSearchBar {
}
}))
.tooltip(|cx| Tooltip::for_action("Go to next match", &SelectNextMatch, cx)),
);
)
.when(limit_reached, |this| {
this.child(
div()
.child(Label::new("Search limit reached").color(Color::Warning))
.ml_2(),
)
});
let search_line = h_flex()
.gap_2()