Distributed database pattern built.

Co-Authored-By: kay@zed.dev
This commit is contained in:
Mikayla Maki 2022-11-14 13:18:44 -08:00
parent 2a5565ca93
commit 4798161118
28 changed files with 893 additions and 937 deletions

4
Cargo.lock generated
View File

@ -1040,7 +1040,6 @@ dependencies = [
"client",
"collections",
"ctor",
"db",
"editor",
"env_logger",
"envy",
@ -2428,6 +2427,7 @@ dependencies = [
"simplelog",
"smallvec",
"smol",
"sqlez",
"sum_tree",
"time 0.3.17",
"tiny-skia",
@ -5307,6 +5307,7 @@ dependencies = [
"serde",
"serde_json",
"serde_path_to_error",
"sqlez",
"theme",
"toml",
"tree-sitter",
@ -7633,6 +7634,7 @@ dependencies = [
"gpui",
"indoc",
"language",
"lazy_static",
"log",
"menu",
"parking_lot 0.11.2",

View File

@ -2,17 +2,17 @@ mod update_notification;
use anyhow::{anyhow, Context, Result};
use client::{http::HttpClient, ZED_SECRET_CLIENT_TOKEN};
use db::{kvp::KeyValue, Db};
use db::kvp::KEY_VALUE_STORE;
use gpui::{
actions, platform::AppVersion, AppContext, AsyncAppContext, Entity, ModelContext, ModelHandle,
MutableAppContext, Task, WeakViewHandle,
};
use lazy_static::lazy_static;
use serde::Deserialize;
use settings::ReleaseChannel;
use smol::{fs::File, io::AsyncReadExt, process::Command};
use std::{env, ffi::OsString, path::PathBuf, sync::Arc, time::Duration};
use update_notification::UpdateNotification;
use util::channel::ReleaseChannel;
use workspace::Workspace;
const SHOULD_SHOW_UPDATE_NOTIFICATION_KEY: &str = "auto-updater-should-show-updated-notification";
@ -42,7 +42,6 @@ pub struct AutoUpdater {
current_version: AppVersion,
http_client: Arc<dyn HttpClient>,
pending_poll: Option<Task<()>>,
db: project::Db<KeyValue>,
server_url: String,
}
@ -56,16 +55,11 @@ impl Entity for AutoUpdater {
type Event = ();
}
pub fn init(
db: Db<KeyValue>,
http_client: Arc<dyn HttpClient>,
server_url: String,
cx: &mut MutableAppContext,
) {
pub fn init(http_client: Arc<dyn HttpClient>, server_url: String, cx: &mut MutableAppContext) {
if let Some(version) = (*ZED_APP_VERSION).or_else(|| cx.platform().app_version().ok()) {
let server_url = server_url;
let auto_updater = cx.add_model(|cx| {
let updater = AutoUpdater::new(version, db, http_client, server_url.clone());
let updater = AutoUpdater::new(version, http_client, server_url.clone());
updater.start_polling(cx).detach();
updater
});
@ -126,14 +120,12 @@ impl AutoUpdater {
fn new(
current_version: AppVersion,
db: project::Db<KeyValue>,
http_client: Arc<dyn HttpClient>,
server_url: String,
) -> Self {
Self {
status: AutoUpdateStatus::Idle,
current_version,
db,
http_client,
server_url,
pending_poll: None,
@ -303,20 +295,21 @@ impl AutoUpdater {
should_show: bool,
cx: &AppContext,
) -> Task<Result<()>> {
let db = self.db.clone();
cx.background().spawn(async move {
if should_show {
db.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?;
KEY_VALUE_STORE.write_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY, "")?;
} else {
db.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?;
KEY_VALUE_STORE.delete_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?;
}
Ok(())
})
}
fn should_show_update_notification(&self, cx: &AppContext) -> Task<Result<bool>> {
let db = self.db.clone();
cx.background()
.spawn(async move { Ok(db.read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?.is_some()) })
cx.background().spawn(async move {
Ok(KEY_VALUE_STORE
.read_kvp(SHOULD_SHOW_UPDATE_NOTIFICATION_KEY)?
.is_some())
})
}
}

View File

@ -5,7 +5,8 @@ use gpui::{
Element, Entity, MouseButton, View, ViewContext,
};
use menu::Cancel;
use settings::{ReleaseChannel, Settings};
use settings::Settings;
use util::channel::ReleaseChannel;
use workspace::Notification;
pub struct UpdateNotification {

View File

@ -11,7 +11,6 @@ use async_tungstenite::tungstenite::{
error::Error as WebsocketError,
http::{Request, StatusCode},
};
use db::{kvp::KeyValue, Db};
use futures::{future::LocalBoxFuture, AsyncReadExt, FutureExt, SinkExt, StreamExt, TryStreamExt};
use gpui::{
actions,
@ -27,7 +26,6 @@ use postage::watch;
use rand::prelude::*;
use rpc::proto::{AnyTypedEnvelope, EntityMessage, EnvelopedMessage, RequestMessage};
use serde::Deserialize;
use settings::ReleaseChannel;
use std::{
any::TypeId,
collections::HashMap,
@ -41,6 +39,7 @@ use std::{
use telemetry::Telemetry;
use thiserror::Error;
use url::Url;
use util::channel::ReleaseChannel;
use util::{ResultExt, TryFutureExt};
pub use rpc::*;
@ -1218,8 +1217,8 @@ impl Client {
self.peer.respond_with_error(receipt, error)
}
pub fn start_telemetry(&self, db: Db<KeyValue>) {
self.telemetry.start(db.clone());
pub fn start_telemetry(&self) {
self.telemetry.start();
}
pub fn report_event(&self, kind: &str, properties: Value) {

View File

@ -1,5 +1,5 @@
use crate::http::HttpClient;
use db::{kvp::KeyValue, Db};
use db::kvp::KEY_VALUE_STORE;
use gpui::{
executor::Background,
serde_json::{self, value::Map, Value},
@ -10,7 +10,6 @@ use lazy_static::lazy_static;
use parking_lot::Mutex;
use serde::Serialize;
use serde_json::json;
use settings::ReleaseChannel;
use std::{
io::Write,
mem,
@ -19,7 +18,7 @@ use std::{
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tempfile::NamedTempFile;
use util::{post_inc, ResultExt, TryFutureExt};
use util::{channel::ReleaseChannel, post_inc, ResultExt, TryFutureExt};
use uuid::Uuid;
pub struct Telemetry {
@ -148,18 +147,19 @@ impl Telemetry {
Some(self.state.lock().log_file.as_ref()?.path().to_path_buf())
}
pub fn start(self: &Arc<Self>, db: Db<KeyValue>) {
pub fn start(self: &Arc<Self>) {
let this = self.clone();
self.executor
.spawn(
async move {
let device_id = if let Ok(Some(device_id)) = db.read_kvp("device_id") {
device_id
} else {
let device_id = Uuid::new_v4().to_string();
db.write_kvp("device_id", &device_id)?;
device_id
};
let device_id =
if let Ok(Some(device_id)) = KEY_VALUE_STORE.read_kvp("device_id") {
device_id
} else {
let device_id = Uuid::new_v4().to_string();
KEY_VALUE_STORE.write_kvp("device_id", &device_id)?;
device_id
};
let device_id: Arc<str> = device_id.into();
let mut state = this.state.lock();

View File

@ -18,7 +18,6 @@ live_kit_server = { path = "../live_kit_server" }
rpc = { path = "../rpc" }
util = { path = "../util" }
db = { path = "../db" }
anyhow = "1.0.40"
async-trait = "0.1.50"
async-tungstenite = "0.16"

View File

@ -1,6 +1,6 @@
use crate::{
db::{Db, NewUserParams, ProjectId, UserId},
rpc::{Executor, Server},
db::{NewUserParams, ProjectId, TestDb, UserId},
rpc::{Executor, Server, Store},
AppState,
};
@ -12,7 +12,6 @@ use client::{
User, UserStore, RECEIVE_TIMEOUT,
};
use collections::{BTreeMap, HashMap, HashSet};
use db as SqliteDb;
use editor::{
self, ConfirmCodeAction, ConfirmCompletion, ConfirmRename, Editor, Redo, Rename, ToOffset,
ToggleCodeActions, Undo,
@ -5838,11 +5837,7 @@ impl TestServer {
Project::init(&client);
cx.update(|cx| {
workspace::init(
app_state.clone(),
cx,
SqliteDb::open_in_memory("integration tests"),
);
workspace::init(app_state.clone(), cx);
call::init(client.clone(), user_store.clone(), cx);
});

View File

@ -320,7 +320,7 @@ mod tests {
use super::*;
use editor::Editor;
use gpui::TestAppContext;
use project::{Db, Project};
use project::Project;
use workspace::{AppState, Workspace};
#[test]
@ -345,7 +345,7 @@ mod tests {
cx.update(|cx| {
editor::init(cx);
workspace::init(app_state.clone(), cx, Db::open_in_memory("test"));
workspace::init(app_state.clone(), cx);
init(cx);
});

View File

@ -1,11 +1,12 @@
pub mod kvp;
use std::fs;
use std::ops::Deref;
use std::path::Path;
#[cfg(any(test, feature = "test-support"))]
use anyhow::Result;
use indoc::indoc;
#[cfg(any(test, feature = "test-support"))]
use sqlez::connection::Connection;
use sqlez::domain::Domain;
use sqlez::thread_safe_connection::ThreadSafeConnection;
@ -17,47 +18,29 @@ const INITIALIZE_QUERY: &'static str = indoc! {"
PRAGMA case_sensitive_like=TRUE;
"};
#[derive(Clone)]
pub struct Db<D: Domain>(ThreadSafeConnection<D>);
/// Open or create a database at the given directory path.
pub fn open_file_db<D: Domain>() -> ThreadSafeConnection<D> {
// Use 0 for now. Will implement incrementing and clearing of old db files soon TM
let current_db_dir = (*util::paths::DB_DIR).join(Path::new(&format!(
"0-{}",
*util::channel::RELEASE_CHANNEL_NAME
)));
fs::create_dir_all(&current_db_dir).expect("Should be able to create the database directory");
let db_path = current_db_dir.join(Path::new("db.sqlite"));
impl<D: Domain> Deref for Db<D> {
type Target = sqlez::connection::Connection;
fn deref(&self) -> &Self::Target {
&self.0.deref()
}
ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true)
.with_initialize_query(INITIALIZE_QUERY)
}
impl<D: Domain> Db<D> {
/// Open or create a database at the given directory path.
pub fn open(db_dir: &Path, channel: &'static str) -> Self {
// Use 0 for now. Will implement incrementing and clearing of old db files soon TM
let current_db_dir = db_dir.join(Path::new(&format!("0-{}", channel)));
fs::create_dir_all(&current_db_dir)
.expect("Should be able to create the database directory");
let db_path = current_db_dir.join(Path::new("db.sqlite"));
Db(
ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true)
.with_initialize_query(INITIALIZE_QUERY),
)
}
/// Open a in memory database for testing and as a fallback.
pub fn open_in_memory(db_name: &str) -> Self {
Db(ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY))
}
pub fn persisting(&self) -> bool {
self.persistent()
}
pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {
let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref());
self.backup_main(&destination)
}
pub fn open_as<D2: Domain>(&self) -> Db<D2> {
Db(self.0.for_domain())
}
pub fn open_memory_db<D: Domain>(db_name: &str) -> ThreadSafeConnection<D> {
ThreadSafeConnection::new(db_name, false).with_initialize_query(INITIALIZE_QUERY)
}
#[cfg(any(test, feature = "test-support"))]
pub fn write_db_to<D: Domain, P: AsRef<Path>>(
conn: &ThreadSafeConnection<D>,
dest: P,
) -> Result<()> {
let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref());
conn.backup_main(&destination)
}

View File

@ -1,7 +1,11 @@
use super::Db;
use anyhow::Result;
use indoc::indoc;
use sqlez::{connection::Connection, domain::Domain, migrations::Migration};
use sqlez::{
connection::Connection, domain::Domain, migrations::Migration,
thread_safe_connection::ThreadSafeConnection,
};
use std::ops::Deref;
pub(crate) const KVP_MIGRATION: Migration = Migration::new(
"kvp",
@ -13,16 +17,29 @@ pub(crate) const KVP_MIGRATION: Migration = Migration::new(
"}],
);
#[derive(Clone)]
pub enum KeyValue {}
lazy_static::lazy_static! {
pub static ref KEY_VALUE_STORE: KeyValueStore =
KeyValueStore(crate::open_file_db());
}
impl Domain for KeyValue {
#[derive(Clone)]
pub struct KeyValueStore(ThreadSafeConnection<KeyValueStore>);
impl Domain for KeyValueStore {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
KVP_MIGRATION.run(conn)
}
}
impl Db<KeyValue> {
impl Deref for KeyValueStore {
type Target = ThreadSafeConnection<KeyValueStore>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl KeyValueStore {
pub fn read_kvp(&self, key: &str) -> Result<Option<String>> {
self.select_row_bound("SELECT value FROM kv_store WHERE key = (?)")?(key)
}
@ -44,11 +61,11 @@ impl Db<KeyValue> {
mod tests {
use anyhow::Result;
use super::*;
use crate::kvp::KeyValueStore;
#[test]
fn test_kvp() -> Result<()> {
let db = Db::open_in_memory("test_kvp");
let db = KeyValueStore(crate::open_memory_db("test_kvp"));
assert_eq!(db.read_kvp("key-1").unwrap(), None);

View File

@ -17,6 +17,7 @@ collections = { path = "../collections" }
gpui_macros = { path = "../gpui_macros" }
util = { path = "../util" }
sum_tree = { path = "../sum_tree" }
sqlez = { path = "../sqlez" }
async-task = "4.0.3"
backtrace = { version = "0.3", optional = true }
ctor = "0.1"

View File

@ -1,10 +1,10 @@
#include "nan.h"
#include "tree_sitter/parser.h"
#include <node.h>
#include "nan.h"
using namespace v8;
extern "C" TSLanguage * tree_sitter_context_predicate();
extern "C" TSLanguage *tree_sitter_context_predicate();
namespace {
@ -16,13 +16,15 @@ void Init(Local<Object> exports, Local<Object> module) {
tpl->InstanceTemplate()->SetInternalFieldCount(1);
Local<Function> constructor = Nan::GetFunction(tpl).ToLocalChecked();
Local<Object> instance = constructor->NewInstance(Nan::GetCurrentContext()).ToLocalChecked();
Local<Object> instance =
constructor->NewInstance(Nan::GetCurrentContext()).ToLocalChecked();
Nan::SetInternalFieldPointer(instance, 0, tree_sitter_context_predicate());
Nan::Set(instance, Nan::New("name").ToLocalChecked(), Nan::New("context_predicate").ToLocalChecked());
Nan::Set(instance, Nan::New("name").ToLocalChecked(),
Nan::New("context_predicate").ToLocalChecked());
Nan::Set(module, Nan::New("exports").ToLocalChecked(), instance);
}
NODE_MODULE(tree_sitter_context_predicate_binding, Init)
} // namespace
} // namespace

View File

@ -17,10 +17,15 @@ use crate::{
SceneBuilder, UpgradeModelHandle, UpgradeViewHandle, View, ViewHandle, WeakModelHandle,
WeakViewHandle,
};
use anyhow::bail;
use collections::{HashMap, HashSet};
use pathfinder_geometry::vector::{vec2f, Vector2F};
use serde_json::json;
use smallvec::SmallVec;
use sqlez::{
bindable::{Bind, Column},
statement::Statement,
};
use std::{
marker::PhantomData,
ops::{Deref, DerefMut, Range},
@ -895,6 +900,31 @@ impl ToJson for Axis {
}
}
impl Bind for Axis {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
Axis::Horizontal => "Horizontal",
Axis::Vertical => "Vertical",
}
.bind(statement, start_index)
}
}
impl Column for Axis {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(axis_text, next_index)| {
Ok((
match axis_text.as_str() {
"Horizontal" => Axis::Horizontal,
"Vertical" => Axis::Vertical,
_ => bail!("Stored serialized item kind is incorrect"),
},
next_index,
))
})
}
}
pub trait Vector2FExt {
fn along(self, axis: Axis) -> f32;
}

View File

@ -63,7 +63,6 @@ use std::{
use thiserror::Error;
use util::{defer, post_inc, ResultExt, TryFutureExt as _};
pub use db::{kvp::KeyValue, Db};
pub use fs::*;
pub use worktree::*;

View File

@ -14,6 +14,7 @@ test-support = []
assets = { path = "../assets" }
collections = { path = "../collections" }
gpui = { path = "../gpui" }
sqlez = { path = "../sqlez" }
fs = { path = "../fs" }
anyhow = "1.0.38"
futures = "0.3"

View File

@ -2,7 +2,7 @@ mod keymap_file;
pub mod settings_file;
pub mod watched_json;
use anyhow::Result;
use anyhow::{bail, Result};
use gpui::{
font_cache::{FamilyId, FontCache},
AssetSource,
@ -14,6 +14,10 @@ use schemars::{
};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use serde_json::Value;
use sqlez::{
bindable::{Bind, Column},
statement::Statement,
};
use std::{collections::HashMap, fmt::Write as _, num::NonZeroU32, str, sync::Arc};
use theme::{Theme, ThemeRegistry};
use tree_sitter::Query;
@ -55,24 +59,6 @@ pub struct FeatureFlags {
pub experimental_themes: bool,
}
#[derive(Copy, Clone, PartialEq, Eq, Default)]
pub enum ReleaseChannel {
#[default]
Dev,
Preview,
Stable,
}
impl ReleaseChannel {
pub fn name(&self) -> &'static str {
match self {
ReleaseChannel::Dev => "Zed Dev",
ReleaseChannel::Preview => "Zed Preview",
ReleaseChannel::Stable => "Zed",
}
}
}
impl FeatureFlags {
pub fn keymap_files(&self) -> Vec<&'static str> {
vec![]
@ -244,6 +230,33 @@ pub enum DockAnchor {
Expanded,
}
impl Bind for DockAnchor {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
DockAnchor::Bottom => "Bottom",
DockAnchor::Right => "Right",
DockAnchor::Expanded => "Expanded",
}
.bind(statement, start_index)
}
}
impl Column for DockAnchor {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
Ok((
match anchor_text.as_ref() {
"Bottom" => DockAnchor::Bottom,
"Right" => DockAnchor::Right,
"Expanded" => DockAnchor::Expanded,
_ => bail!("Stored dock anchor is incorrect"),
},
next_index,
))
})
}
}
#[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema)]
pub struct SettingsFileContent {
pub experiments: Option<FeatureFlags>,

View File

@ -45,8 +45,8 @@ impl<'a> Statement<'a> {
let sql = CString::new(query.as_ref())?;
let mut remaining_sql = sql.as_c_str();
while {
let remaining_sql_str = remaining_sql.to_str()?;
remaining_sql_str.trim() != ";" && !remaining_sql_str.is_empty()
let remaining_sql_str = remaining_sql.to_str()?.trim();
remaining_sql_str != ";" && !remaining_sql_str.is_empty()
} {
let mut raw_statement = 0 as *mut sqlite3_stmt;
let mut remaining_sql_ptr = ptr::null();

View File

@ -13,6 +13,9 @@ pub struct ThreadSafeConnection<D: Domain> {
_pd: PhantomData<D>,
}
unsafe impl<T: Domain> Send for ThreadSafeConnection<T> {}
unsafe impl<T: Domain> Sync for ThreadSafeConnection<T> {}
impl<D: Domain> ThreadSafeConnection<D> {
pub fn new(uri: &str, persistent: bool) -> Self {
Self {

View File

@ -0,0 +1,32 @@
use std::env;
use lazy_static::lazy_static;
lazy_static! {
pub static ref RELEASE_CHANNEL_NAME: String = env::var("ZED_RELEASE_CHANNEL")
.unwrap_or(include_str!("../../zed/RELEASE_CHANNEL").to_string());
pub static ref RELEASE_CHANNEL: ReleaseChannel = match RELEASE_CHANNEL_NAME.as_str() {
"dev" => ReleaseChannel::Dev,
"preview" => ReleaseChannel::Preview,
"stable" => ReleaseChannel::Stable,
_ => panic!("invalid release channel {}", *RELEASE_CHANNEL_NAME),
};
}
#[derive(Copy, Clone, PartialEq, Eq, Default)]
pub enum ReleaseChannel {
#[default]
Dev,
Preview,
Stable,
}
impl ReleaseChannel {
pub fn name(&self) -> &'static str {
match self {
ReleaseChannel::Dev => "Zed Dev",
ReleaseChannel::Preview => "Zed Preview",
ReleaseChannel::Stable => "Zed",
}
}
}

View File

@ -1,3 +1,4 @@
pub mod channel;
pub mod paths;
#[cfg(any(test, feature = "test-support"))]
pub mod test;

View File

@ -36,6 +36,7 @@ util = { path = "../util" }
bincode = "1.2.1"
anyhow = "1.0.38"
futures = "0.3"
lazy_static = "1.4"
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
parking_lot = "0.11.1"
postage = { version = "0.4.1", features = ["futures-traits"] }

View File

@ -0,0 +1,494 @@
#![allow(dead_code)]
pub mod model;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use anyhow::{bail, Context, Result};
use db::open_file_db;
use gpui::Axis;
use indoc::indoc;
use lazy_static::lazy_static;
use sqlez::thread_safe_connection::ThreadSafeConnection;
use sqlez::{connection::Connection, domain::Domain, migrations::Migration};
use util::{iife, unzip_option, ResultExt};
use super::Workspace;
use model::{
GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup,
SerializedWorkspace, WorkspaceId,
};
lazy_static! {
pub static ref DB: WorkspaceDb = WorkspaceDb(open_file_db());
}
pub struct WorkspaceDb(ThreadSafeConnection<Workspace>);
impl Deref for WorkspaceDb {
type Target = ThreadSafeConnection<Workspace>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
"workspace",
&[indoc! {"
CREATE TABLE workspaces(
workspace_id BLOB PRIMARY KEY,
dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded'
dock_visible INTEGER, -- Boolean
timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
) STRICT;
CREATE TABLE pane_groups(
group_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL indicates that this is a root node
position INTEGER, -- NULL indicates that this is a root node
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE panes(
pane_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL, this is a dock pane
position INTEGER, -- NULL, this is a dock pane
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
workspace_id BLOB NOT NULL,
pane_id INTEGER NOT NULL,
kind TEXT NOT NULL,
position INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
impl Domain for Workspace {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
WORKSPACES_MIGRATION.run(&conn)
}
}
impl WorkspaceDb {
/// Returns a serialized workspace for the given worktree_roots. If the passed array
/// is empty, the most recent workspace is returned instead. If no workspace for the
/// passed roots is stored, returns none.
pub fn workspace_for_roots<P: AsRef<Path>>(
&self,
worktree_roots: &[P],
) -> Option<SerializedWorkspace> {
let workspace_id: WorkspaceId = worktree_roots.into();
// Note that we re-assign the workspace_id here in case it's empty
// and we've grabbed the most recent workspace
let (workspace_id, dock_anchor, dock_visible) = iife!({
if worktree_roots.len() == 0 {
self.select_row(indoc! {"
SELECT workspace_id, dock_anchor, dock_visible
FROM workspaces
ORDER BY timestamp DESC LIMIT 1"})?()?
} else {
self.select_row_bound(indoc! {"
SELECT workspace_id, dock_anchor, dock_visible
FROM workspaces
WHERE workspace_id = ?"})?(&workspace_id)?
}
.context("No workspaces found")
})
.warn_on_err()
.flatten()?;
Some(SerializedWorkspace {
dock_pane: self
.get_dock_pane(&workspace_id)
.context("Getting dock pane")
.log_err()?,
center_group: self
.get_center_pane_group(&workspace_id)
.context("Getting center group")
.log_err()?,
dock_anchor,
dock_visible,
})
}
/// Saves a workspace using the worktree roots. Will garbage collect any workspaces
/// that used this workspace previously
pub fn save_workspace<P: AsRef<Path>>(
&self,
worktree_roots: &[P],
old_roots: Option<&[P]>,
workspace: &SerializedWorkspace,
) {
let workspace_id: WorkspaceId = worktree_roots.into();
self.with_savepoint("update_worktrees", || {
if let Some(old_roots) = old_roots {
let old_id: WorkspaceId = old_roots.into();
self.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?;
}
// Delete any previous workspaces with the same roots. This cascades to all
// other tables that are based on the same roots set.
// Insert new workspace into workspaces table if none were found
self.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?;
self.exec_bound(
"INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)",
)?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?;
// Save center pane group and dock pane
self.save_pane_group(&workspace_id, &workspace.center_group, None)?;
self.save_pane(&workspace_id, &workspace.dock_pane, None)?;
Ok(())
})
.with_context(|| {
format!(
"Update workspace with roots {:?}",
worktree_roots
.iter()
.map(|p| p.as_ref())
.collect::<Vec<_>>()
)
})
.log_err();
}
/// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
pub fn recent_workspaces(&self, limit: usize) -> Vec<Vec<PathBuf>> {
iife!({
// TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html
Ok::<_, anyhow::Error>(
self.select_bound::<usize, WorkspaceId>(
"SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?",
)?(limit)?
.into_iter()
.map(|id| id.paths())
.collect::<Vec<Vec<PathBuf>>>(),
)
})
.log_err()
.unwrap_or_default()
}
pub(crate) fn get_center_pane_group(
&self,
workspace_id: &WorkspaceId,
) -> Result<SerializedPaneGroup> {
self.get_pane_group_children(workspace_id, None)?
.into_iter()
.next()
.context("No center pane group")
}
fn get_pane_group_children<'a>(
&self,
workspace_id: &WorkspaceId,
group_id: Option<GroupId>,
) -> Result<Vec<SerializedPaneGroup>> {
self.select_bound::<(Option<GroupId>, &WorkspaceId), (Option<GroupId>, Option<Axis>, Option<PaneId>)>(indoc! {"
SELECT group_id, axis, pane_id
FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id
FROM pane_groups
UNION
SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id
FROM panes
-- Remove the dock panes from the union
WHERE parent_group_id IS NOT NULL and position IS NOT NULL)
WHERE parent_group_id IS ? AND workspace_id = ?
ORDER BY position
"})?((group_id, workspace_id))?
.into_iter()
.map(|(group_id, axis, pane_id)| {
if let Some((group_id, axis)) = group_id.zip(axis) {
Ok(SerializedPaneGroup::Group {
axis,
children: self.get_pane_group_children(
workspace_id,
Some(group_id),
)?,
})
} else if let Some(pane_id) = pane_id {
Ok(SerializedPaneGroup::Pane(SerializedPane {
children: self.get_items( pane_id)?,
}))
} else {
bail!("Pane Group Child was neither a pane group or a pane");
}
})
.collect::<Result<_>>()
}
pub(crate) fn save_pane_group(
&self,
workspace_id: &WorkspaceId,
pane_group: &SerializedPaneGroup,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) {
bail!("Pane groups must have a SerializedPaneGroup::Group at the root")
}
let (parent_id, position) = unzip_option(parent);
match pane_group {
SerializedPaneGroup::Group { axis, children } => {
let parent_id = self.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")?
((workspace_id, parent_id, position, *axis))?;
for (position, group) in children.iter().enumerate() {
self.save_pane_group(workspace_id, group, Some((parent_id, position)))?
}
Ok(())
}
SerializedPaneGroup::Pane(pane) => self.save_pane(workspace_id, pane, parent),
}
}
pub(crate) fn get_dock_pane(&self, workspace_id: &WorkspaceId) -> Result<SerializedPane> {
let pane_id = self.select_row_bound(indoc! {"
SELECT pane_id FROM panes
WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?(
workspace_id,
)?
.context("No dock pane for workspace")?;
Ok(SerializedPane::new(
self.get_items(pane_id).context("Reading items")?,
))
}
pub(crate) fn save_pane(
&self,
workspace_id: &WorkspaceId,
pane: &SerializedPane,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
let (parent_id, order) = unzip_option(parent);
let pane_id = self.insert_bound(
"INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)",
)?((workspace_id, parent_id, order))?;
self.save_items(workspace_id, pane_id, &pane.children)
.context("Saving items")
}
pub(crate) fn get_items(&self, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
Ok(self.select_bound(indoc! {"
SELECT item_id, kind FROM items
WHERE pane_id = ?
ORDER BY position"})?(pane_id)?
.into_iter()
.map(|(item_id, kind)| match kind {
SerializedItemKind::Terminal => SerializedItem::Terminal { item_id },
_ => unimplemented!(),
})
.collect())
}
pub(crate) fn save_items(
&self,
workspace_id: &WorkspaceId,
pane_id: PaneId,
items: &[SerializedItem],
) -> Result<()> {
let mut delete_old = self
.exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?")
.context("Preparing deletion")?;
let mut insert_new = self.exec_bound(
"INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)",
).context("Preparing insertion")?;
for (position, item) in items.iter().enumerate() {
delete_old((workspace_id, pane_id, item.item_id()))?;
insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use db::open_memory_db;
use settings::DockAnchor;
use super::*;
#[test]
fn test_workspace_assignment() {
// env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("test_basic_functionality"));
let workspace_1 = SerializedWorkspace {
dock_anchor: DockAnchor::Bottom,
dock_visible: true,
center_group: Default::default(),
dock_pane: Default::default(),
};
let workspace_2 = SerializedWorkspace {
dock_anchor: DockAnchor::Expanded,
dock_visible: false,
center_group: Default::default(),
dock_pane: Default::default(),
};
let workspace_3 = SerializedWorkspace {
dock_anchor: DockAnchor::Right,
dock_visible: true,
center_group: Default::default(),
dock_pane: Default::default(),
};
db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_1);
db.save_workspace(&["/tmp"], None, &workspace_2);
db::write_db_to(&db, "test.db").unwrap();
// Test that paths are treated as a set
assert_eq!(
db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
workspace_1
);
assert_eq!(
db.workspace_for_roots(&["/tmp2", "/tmp"]).unwrap(),
workspace_1
);
// Make sure that other keys work
assert_eq!(db.workspace_for_roots(&["/tmp"]).unwrap(), workspace_2);
assert_eq!(db.workspace_for_roots(&["/tmp3", "/tmp2", "/tmp4"]), None);
// Test 'mutate' case of updating a pre-existing id
db.save_workspace(&["/tmp", "/tmp2"], Some(&["/tmp", "/tmp2"]), &workspace_2);
assert_eq!(
db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
workspace_2
);
// Test other mechanism for mutating
db.save_workspace(&["/tmp", "/tmp2"], None, &workspace_3);
assert_eq!(
db.workspace_for_roots(&["/tmp", "/tmp2"]).unwrap(),
workspace_3
);
// Make sure that updating paths differently also works
db.save_workspace(
&["/tmp3", "/tmp4", "/tmp2"],
Some(&["/tmp", "/tmp2"]),
&workspace_3,
);
assert_eq!(db.workspace_for_roots(&["/tmp2", "tmp"]), None);
assert_eq!(
db.workspace_for_roots(&["/tmp2", "/tmp3", "/tmp4"])
.unwrap(),
workspace_3
);
}
use crate::persistence::model::SerializedWorkspace;
use crate::persistence::model::{SerializedItem, SerializedPane, SerializedPaneGroup};
fn default_workspace(
dock_pane: SerializedPane,
center_group: &SerializedPaneGroup,
) -> SerializedWorkspace {
SerializedWorkspace {
dock_anchor: DockAnchor::Right,
dock_visible: false,
center_group: center_group.clone(),
dock_pane,
}
}
#[test]
fn test_basic_dock_pane() {
// env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("basic_dock_pane"));
let dock_pane = crate::persistence::model::SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 2 },
SerializedItem::Terminal { item_id: 3 },
],
};
let workspace = default_workspace(dock_pane, &Default::default());
db.save_workspace(&["/tmp"], None, &workspace);
let new_workspace = db.workspace_for_roots(&["/tmp"]).unwrap();
assert_eq!(workspace.dock_pane, new_workspace.dock_pane);
}
#[test]
fn test_simple_split() {
// env_logger::try_init().ok();
let db = WorkspaceDb(open_memory_db("simple_split"));
// -----------------
// | 1,2 | 5,6 |
// | - - - | |
// | 3,4 | |
// -----------------
let center_pane = SerializedPaneGroup::Group {
axis: gpui::Axis::Horizontal,
children: vec![
SerializedPaneGroup::Group {
axis: gpui::Axis::Vertical,
children: vec![
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 2 },
],
}),
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 3 },
],
}),
],
},
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 5 },
SerializedItem::Terminal { item_id: 6 },
],
}),
],
};
let workspace = default_workspace(Default::default(), &center_pane);
db.save_workspace(&["/tmp"], None, &workspace);
assert_eq!(workspace.center_group, center_pane);
}
}

View File

@ -0,0 +1,188 @@
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use anyhow::{bail, Result};
use gpui::Axis;
use settings::DockAnchor;
use sqlez::{
bindable::{Bind, Column},
statement::Statement,
};
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct WorkspaceId(Vec<PathBuf>);
impl WorkspaceId {
pub fn paths(self) -> Vec<PathBuf> {
self.0
}
}
impl<P: AsRef<Path>, T: IntoIterator<Item = P>> From<T> for WorkspaceId {
fn from(iterator: T) -> Self {
let mut roots = iterator
.into_iter()
.map(|p| p.as_ref().to_path_buf())
.collect::<Vec<_>>();
roots.sort();
Self(roots)
}
}
impl Bind for &WorkspaceId {
fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
bincode::serialize(&self.0)
.expect("Bincode serialization of paths should not fail")
.bind(statement, start_index)
}
}
impl Column for WorkspaceId {
fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
let blob = statement.column_blob(start_index)?;
Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1))
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct SerializedWorkspace {
pub dock_anchor: DockAnchor,
pub dock_visible: bool,
pub center_group: SerializedPaneGroup,
pub dock_pane: SerializedPane,
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SerializedPaneGroup {
Group {
axis: Axis,
children: Vec<SerializedPaneGroup>,
},
Pane(SerializedPane),
}
impl Default for SerializedPaneGroup {
fn default() -> Self {
Self::Group {
axis: Axis::Horizontal,
children: vec![Self::Pane(Default::default())],
}
}
}
#[derive(Debug, PartialEq, Eq, Default, Clone)]
pub struct SerializedPane {
pub(crate) children: Vec<SerializedItem>,
}
impl SerializedPane {
pub fn new(children: Vec<SerializedItem>) -> Self {
SerializedPane { children }
}
}
pub type GroupId = i64;
pub type PaneId = i64;
pub type ItemId = usize;
pub(crate) enum SerializedItemKind {
Editor,
Diagnostics,
ProjectSearch,
Terminal,
}
impl Bind for SerializedItemKind {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
SerializedItemKind::Editor => "Editor",
SerializedItemKind::Diagnostics => "Diagnostics",
SerializedItemKind::ProjectSearch => "ProjectSearch",
SerializedItemKind::Terminal => "Terminal",
}
.bind(statement, start_index)
}
}
impl Column for SerializedItemKind {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(kind_text, next_index)| {
Ok((
match kind_text.as_ref() {
"Editor" => SerializedItemKind::Editor,
"Diagnostics" => SerializedItemKind::Diagnostics,
"ProjectSearch" => SerializedItemKind::ProjectSearch,
"Terminal" => SerializedItemKind::Terminal,
_ => bail!("Stored serialized item kind is incorrect"),
},
next_index,
))
})
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SerializedItem {
Editor { item_id: usize, path: Arc<Path> },
Diagnostics { item_id: usize },
ProjectSearch { item_id: usize, query: String },
Terminal { item_id: usize },
}
impl SerializedItem {
pub fn item_id(&self) -> usize {
match self {
SerializedItem::Editor { item_id, .. } => *item_id,
SerializedItem::Diagnostics { item_id } => *item_id,
SerializedItem::ProjectSearch { item_id, .. } => *item_id,
SerializedItem::Terminal { item_id } => *item_id,
}
}
pub(crate) fn kind(&self) -> SerializedItemKind {
match self {
SerializedItem::Editor { .. } => SerializedItemKind::Editor,
SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics,
SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch,
SerializedItem::Terminal { .. } => SerializedItemKind::Terminal,
}
}
}
#[cfg(test)]
mod tests {
use sqlez::connection::Connection;
use crate::persistence::model::DockAnchor;
use super::WorkspaceId;
#[test]
fn test_workspace_round_trips() {
let db = Connection::open_memory("workspace_id_round_trips");
db.exec(indoc::indoc! {"
CREATE TABLE workspace_id_test(
workspace_id BLOB,
dock_anchor TEXT
);"})
.unwrap()()
.unwrap();
let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]);
db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)")
.unwrap()((&workspace_id, DockAnchor::Bottom))
.unwrap();
assert_eq!(
db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1")
.unwrap()()
.unwrap(),
Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom))
);
}
}

View File

@ -5,19 +5,18 @@
pub mod dock;
pub mod pane;
pub mod pane_group;
mod persistence;
pub mod searchable;
pub mod shared_screen;
pub mod sidebar;
mod status_bar;
mod toolbar;
mod workspace_db;
use crate::workspace_db::model::SerializedWorkspace;
use crate::persistence::model::SerializedWorkspace;
use anyhow::{anyhow, Context, Result};
use call::ActiveCall;
use client::{proto, Client, PeerId, TypedEnvelope, UserStore};
use collections::{hash_map, HashMap, HashSet};
use db::{kvp::KeyValue, Db};
use dock::{DefaultItemFactory, Dock, ToggleDockButton};
use drag_and_drop::DragAndDrop;
use fs::{self, Fs};
@ -165,9 +164,7 @@ impl_internal_actions!(
);
impl_actions!(workspace, [ActivatePane]);
pub fn init(app_state: Arc<AppState>, cx: &mut MutableAppContext, db: Db<Workspace>) {
cx.set_global(db);
pub fn init(app_state: Arc<AppState>, cx: &mut MutableAppContext) {
pane::init(cx);
dock::init(cx);
@ -1291,12 +1288,8 @@ impl Workspace {
}
// Use the resolved worktree roots to get the serialized_db from the database
let serialized_workspace = cx.read(|cx| {
Workspace::workspace_for_roots(
cx.global::<Db<Workspace>>(),
&Vec::from_iter(worktree_roots.into_iter())[..],
)
});
let serialized_workspace = persistence::DB
.workspace_for_roots(&Vec::from_iter(worktree_roots.into_iter())[..]);
// Use the serialized workspace to construct the new window
let (_, workspace) = cx.add_window((app_state.build_window_options)(), |cx| {

View File

@ -1,765 +0,0 @@
use anyhow::{bail, Context, Result};
use db::Db;
use util::{iife, unzip_option, ResultExt};
use std::path::{Path, PathBuf};
use indoc::indoc;
use sqlez::{connection::Connection, domain::Domain, migrations::Migration};
use super::Workspace;
use self::model::{
Axis, GroupId, PaneId, SerializedItem, SerializedItemKind, SerializedPane, SerializedPaneGroup,
SerializedWorkspace, WorkspaceId,
};
// 1) Move all of this into Workspace crate
// 2) Deserialize items fully
// 3) Typed prepares (including how you expect to pull data out)
// 4) Investigate Tree column impls
pub(crate) const WORKSPACES_MIGRATION: Migration = Migration::new(
"workspace",
&[indoc! {"
CREATE TABLE workspaces(
workspace_id BLOB PRIMARY KEY,
dock_anchor TEXT, -- Enum: 'Bottom' / 'Right' / 'Expanded'
dock_visible INTEGER, -- Boolean
timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
) STRICT;
"}],
);
pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
"pane",
&[indoc! {"
CREATE TABLE pane_groups(
group_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL indicates that this is a root node
position INTEGER, -- NULL indicates that this is a root node
axis TEXT NOT NULL, -- Enum: 'Vertical' / 'Horizontal'
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE panes(
pane_id INTEGER PRIMARY KEY,
workspace_id BLOB NOT NULL,
parent_group_id INTEGER, -- NULL, this is a dock pane
position INTEGER, -- NULL, this is a dock pane
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE,
FOREIGN KEY(parent_group_id) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
"}],
);
pub(crate) const ITEM_MIGRATIONS: Migration = Migration::new(
"item",
&[indoc! {"
CREATE TABLE items(
item_id INTEGER NOT NULL, -- This is the item's view id, so this is not unique
workspace_id BLOB NOT NULL,
pane_id INTEGER NOT NULL,
kind TEXT NOT NULL,
position INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
FOREIGN KEY(pane_id) REFERENCES panes(pane_id) ON DELETE CASCADE
PRIMARY KEY(item_id, workspace_id)
) STRICT;
"}],
);
impl Domain for Workspace {
fn migrate(conn: &Connection) -> anyhow::Result<()> {
WORKSPACES_MIGRATION.run(&conn)?;
PANE_MIGRATIONS.run(&conn)?;
ITEM_MIGRATIONS.run(&conn)
}
}
impl Workspace {
/// Returns a serialized workspace for the given worktree_roots. If the passed array
/// is empty, the most recent workspace is returned instead. If no workspace for the
/// passed roots is stored, returns none.
pub fn workspace_for_roots<P: AsRef<Path>>(
db: &Db<Workspace>,
worktree_roots: &[P],
) -> Option<SerializedWorkspace> {
let workspace_id: WorkspaceId = worktree_roots.into();
// Note that we re-assign the workspace_id here in case it's empty
// and we've grabbed the most recent workspace
let (workspace_id, dock_anchor, dock_visible) = iife!({
if worktree_roots.len() == 0 {
db.select_row(indoc! {"
SELECT workspace_id, dock_anchor, dock_visible
FROM workspaces
ORDER BY timestamp DESC LIMIT 1"})?()?
} else {
db.select_row_bound(indoc! {"
SELECT workspace_id, dock_anchor, dock_visible
FROM workspaces
WHERE workspace_id = ?"})?(&workspace_id)?
}
.context("No workspaces found")
})
.warn_on_err()
.flatten()?;
Some(SerializedWorkspace {
dock_pane: Workspace::get_dock_pane(&db, &workspace_id)
.context("Getting dock pane")
.log_err()?,
center_group: Workspace::get_center_pane_group(&db, &workspace_id)
.context("Getting center group")
.log_err()?,
dock_anchor,
dock_visible,
})
}
/// Saves a workspace using the worktree roots. Will garbage collect any workspaces
/// that used this workspace previously
pub fn save_workspace<P: AsRef<Path>>(
db: &Db<Workspace>,
worktree_roots: &[P],
old_roots: Option<&[P]>,
workspace: &SerializedWorkspace,
) {
let workspace_id: WorkspaceId = worktree_roots.into();
db.with_savepoint("update_worktrees", || {
if let Some(old_roots) = old_roots {
let old_id: WorkspaceId = old_roots.into();
db.exec_bound("DELETE FROM WORKSPACES WHERE workspace_id = ?")?(&old_id)?;
}
// Delete any previous workspaces with the same roots. This cascades to all
// other tables that are based on the same roots set.
// Insert new workspace into workspaces table if none were found
db.exec_bound("DELETE FROM workspaces WHERE workspace_id = ?;")?(&workspace_id)?;
db.exec_bound(
"INSERT INTO workspaces(workspace_id, dock_anchor, dock_visible) VALUES (?, ?, ?)",
)?((&workspace_id, workspace.dock_anchor, workspace.dock_visible))?;
// Save center pane group and dock pane
Workspace::save_pane_group(db, &workspace_id, &workspace.center_group, None)?;
Workspace::save_pane(db, &workspace_id, &workspace.dock_pane, None)?;
Ok(())
})
.with_context(|| {
format!(
"Update workspace with roots {:?}",
worktree_roots
.iter()
.map(|p| p.as_ref())
.collect::<Vec<_>>()
)
})
.log_err();
}
/// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
pub fn recent_workspaces(conn: &Connection, limit: usize) -> Vec<Vec<PathBuf>> {
iife!({
// TODO, upgrade anyhow: https://docs.rs/anyhow/1.0.66/anyhow/fn.Ok.html
Ok::<_, anyhow::Error>(
conn.select_bound::<usize, WorkspaceId>(
"SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?",
)?(limit)?
.into_iter()
.map(|id| id.paths())
.collect::<Vec<Vec<PathBuf>>>(),
)
})
.log_err()
.unwrap_or_default()
}
pub(crate) fn get_center_pane_group(
db: &Db<Workspace>,
workspace_id: &WorkspaceId,
) -> Result<SerializedPaneGroup> {
Workspace::get_pane_group_children(&db, workspace_id, None)?
.into_iter()
.next()
.context("No center pane group")
}
fn get_pane_group_children<'a>(
db: &Db<Workspace>,
workspace_id: &WorkspaceId,
group_id: Option<GroupId>,
) -> Result<Vec<SerializedPaneGroup>> {
db.select_bound::<(Option<GroupId>, &WorkspaceId), (Option<GroupId>, Option<Axis>, Option<PaneId>)>(indoc! {"
SELECT group_id, axis, pane_id
FROM (SELECT group_id, axis, NULL as pane_id, position, parent_group_id, workspace_id
FROM pane_groups
UNION
SELECT NULL, NULL, pane_id, position, parent_group_id, workspace_id
FROM panes
-- Remove the dock panes from the union
WHERE parent_group_id IS NOT NULL and position IS NOT NULL)
WHERE parent_group_id IS ? AND workspace_id = ?
ORDER BY position
"})?((group_id, workspace_id))?
.into_iter()
.map(|(group_id, axis, pane_id)| {
if let Some((group_id, axis)) = group_id.zip(axis) {
Ok(SerializedPaneGroup::Group {
axis,
children: Workspace::get_pane_group_children(
db,
workspace_id,
Some(group_id),
)?,
})
} else if let Some(pane_id) = pane_id {
Ok(SerializedPaneGroup::Pane(SerializedPane {
children: Workspace::get_items(db, pane_id)?,
}))
} else {
bail!("Pane Group Child was neither a pane group or a pane");
}
})
.collect::<Result<_>>()
}
pub(crate) fn save_pane_group(
db: &Db<Workspace>,
workspace_id: &WorkspaceId,
pane_group: &SerializedPaneGroup,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
if parent.is_none() && !matches!(pane_group, SerializedPaneGroup::Group { .. }) {
bail!("Pane groups must have a SerializedPaneGroup::Group at the root")
}
let (parent_id, position) = unzip_option(parent);
match pane_group {
SerializedPaneGroup::Group { axis, children } => {
let parent_id = db.insert_bound("INSERT INTO pane_groups(workspace_id, parent_group_id, position, axis) VALUES (?, ?, ?, ?)")?
((workspace_id, parent_id, position, *axis))?;
for (position, group) in children.iter().enumerate() {
Workspace::save_pane_group(
db,
workspace_id,
group,
Some((parent_id, position)),
)?
}
Ok(())
}
SerializedPaneGroup::Pane(pane) => Workspace::save_pane(db, workspace_id, pane, parent),
}
}
pub(crate) fn get_dock_pane(
db: &Db<Workspace>,
workspace_id: &WorkspaceId,
) -> Result<SerializedPane> {
let pane_id = db.select_row_bound(indoc! {"
SELECT pane_id FROM panes
WHERE workspace_id = ? AND parent_group_id IS NULL AND position IS NULL"})?(
workspace_id,
)?
.context("No dock pane for workspace")?;
Ok(SerializedPane::new(
Workspace::get_items(db, pane_id).context("Reading items")?,
))
}
pub(crate) fn save_pane(
db: &Db<Workspace>,
workspace_id: &WorkspaceId,
pane: &SerializedPane,
parent: Option<(GroupId, usize)>,
) -> Result<()> {
let (parent_id, order) = unzip_option(parent);
let pane_id = db.insert_bound(
"INSERT INTO panes(workspace_id, parent_group_id, position) VALUES (?, ?, ?)",
)?((workspace_id, parent_id, order))?;
Workspace::save_items(db, workspace_id, pane_id, &pane.children).context("Saving items")
}
pub(crate) fn get_items(db: &Db<Workspace>, pane_id: PaneId) -> Result<Vec<SerializedItem>> {
Ok(db.select_bound(indoc! {"
SELECT item_id, kind FROM items
WHERE pane_id = ?
ORDER BY position"})?(pane_id)?
.into_iter()
.map(|(item_id, kind)| match kind {
SerializedItemKind::Terminal => SerializedItem::Terminal { item_id },
_ => unimplemented!(),
})
.collect())
}
pub(crate) fn save_items(
db: &Db<Workspace>,
workspace_id: &WorkspaceId,
pane_id: PaneId,
items: &[SerializedItem],
) -> Result<()> {
let mut delete_old = db
.exec_bound("DELETE FROM items WHERE workspace_id = ? AND pane_id = ? AND item_id = ?")
.context("Preparing deletion")?;
let mut insert_new = db.exec_bound(
"INSERT INTO items(item_id, workspace_id, pane_id, kind, position) VALUES (?, ?, ?, ?, ?)",
).context("Preparing insertion")?;
for (position, item) in items.iter().enumerate() {
delete_old((workspace_id, pane_id, item.item_id()))?;
insert_new((item.item_id(), workspace_id, pane_id, item.kind(), position))?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use crate::workspace_db::model::DockAnchor::{Bottom, Expanded, Right};
use crate::{Db, Workspace};
#[test]
fn test_workspace_assignment() {
// env_logger::try_init().ok();
let db = Db::open_in_memory("test_basic_functionality");
let workspace_1 = SerializedWorkspace {
dock_anchor: Bottom,
dock_visible: true,
center_group: Default::default(),
dock_pane: Default::default(),
};
let workspace_2 = SerializedWorkspace {
dock_anchor: Expanded,
dock_visible: false,
center_group: Default::default(),
dock_pane: Default::default(),
};
let workspace_3 = SerializedWorkspace {
dock_anchor: Right,
dock_visible: true,
center_group: Default::default(),
dock_pane: Default::default(),
};
Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_1);
Workspace::save_workspace(&db, &["/tmp"], None, &workspace_2);
db.write_to("test.db").unwrap();
// Test that paths are treated as a set
assert_eq!(
Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(),
workspace_1
);
assert_eq!(
Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp"]).unwrap(),
workspace_1
);
// Make sure that other keys work
assert_eq!(
Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap(),
workspace_2
);
assert_eq!(
Workspace::workspace_for_roots(&db, &["/tmp3", "/tmp2", "/tmp4"]),
None
);
// Test 'mutate' case of updating a pre-existing id
Workspace::save_workspace(
&db,
&["/tmp", "/tmp2"],
Some(&["/tmp", "/tmp2"]),
&workspace_2,
);
assert_eq!(
Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(),
workspace_2
);
// Test other mechanism for mutating
Workspace::save_workspace(&db, &["/tmp", "/tmp2"], None, &workspace_3);
assert_eq!(
Workspace::workspace_for_roots(&db, &["/tmp", "/tmp2"]).unwrap(),
workspace_3
);
// Make sure that updating paths differently also works
Workspace::save_workspace(
&db,
&["/tmp3", "/tmp4", "/tmp2"],
Some(&["/tmp", "/tmp2"]),
&workspace_3,
);
assert_eq!(Workspace::workspace_for_roots(&db, &["/tmp2", "tmp"]), None);
assert_eq!(
Workspace::workspace_for_roots(&db, &["/tmp2", "/tmp3", "/tmp4"]).unwrap(),
workspace_3
);
}
use crate::workspace_db::model::SerializedWorkspace;
use crate::workspace_db::model::{SerializedItem, SerializedPane, SerializedPaneGroup};
fn default_workspace(
dock_pane: SerializedPane,
center_group: &SerializedPaneGroup,
) -> SerializedWorkspace {
SerializedWorkspace {
dock_anchor: crate::workspace_db::model::DockAnchor::Right,
dock_visible: false,
center_group: center_group.clone(),
dock_pane,
}
}
#[test]
fn test_basic_dock_pane() {
// env_logger::try_init().ok();
let db = Db::open_in_memory("basic_dock_pane");
let dock_pane = crate::workspace_db::model::SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 2 },
SerializedItem::Terminal { item_id: 3 },
],
};
let workspace = default_workspace(dock_pane, &Default::default());
Workspace::save_workspace(&db, &["/tmp"], None, &workspace);
let new_workspace = Workspace::workspace_for_roots(&db, &["/tmp"]).unwrap();
assert_eq!(workspace.dock_pane, new_workspace.dock_pane);
}
#[test]
fn test_simple_split() {
// env_logger::try_init().ok();
let db = Db::open_in_memory("simple_split");
// -----------------
// | 1,2 | 5,6 |
// | - - - | |
// | 3,4 | |
// -----------------
let center_pane = SerializedPaneGroup::Group {
axis: crate::workspace_db::model::Axis::Horizontal,
children: vec![
SerializedPaneGroup::Group {
axis: crate::workspace_db::model::Axis::Vertical,
children: vec![
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 1 },
SerializedItem::Terminal { item_id: 2 },
],
}),
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 4 },
SerializedItem::Terminal { item_id: 3 },
],
}),
],
},
SerializedPaneGroup::Pane(SerializedPane {
children: vec![
SerializedItem::Terminal { item_id: 5 },
SerializedItem::Terminal { item_id: 6 },
],
}),
],
};
let workspace = default_workspace(Default::default(), &center_pane);
Workspace::save_workspace(&db, &["/tmp"], None, &workspace);
assert_eq!(workspace.center_group, center_pane);
}
}
pub mod model {
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use anyhow::{bail, Result};
use sqlez::{
bindable::{Bind, Column},
statement::Statement,
};
#[derive(Debug, Clone, PartialEq, Eq)]
pub(crate) struct WorkspaceId(Vec<PathBuf>);
impl WorkspaceId {
pub fn paths(self) -> Vec<PathBuf> {
self.0
}
}
impl<P: AsRef<Path>, T: IntoIterator<Item = P>> From<T> for WorkspaceId {
fn from(iterator: T) -> Self {
let mut roots = iterator
.into_iter()
.map(|p| p.as_ref().to_path_buf())
.collect::<Vec<_>>();
roots.sort();
Self(roots)
}
}
impl Bind for &WorkspaceId {
fn bind(&self, statement: &Statement, start_index: i32) -> Result<i32> {
bincode::serialize(&self.0)
.expect("Bincode serialization of paths should not fail")
.bind(statement, start_index)
}
}
impl Column for WorkspaceId {
fn column(statement: &mut Statement, start_index: i32) -> Result<(Self, i32)> {
let blob = statement.column_blob(start_index)?;
Ok((WorkspaceId(bincode::deserialize(blob)?), start_index + 1))
}
}
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum DockAnchor {
#[default]
Bottom,
Right,
Expanded,
}
impl Bind for DockAnchor {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
DockAnchor::Bottom => "Bottom",
DockAnchor::Right => "Right",
DockAnchor::Expanded => "Expanded",
}
.bind(statement, start_index)
}
}
impl Column for DockAnchor {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(anchor_text, next_index)| {
Ok((
match anchor_text.as_ref() {
"Bottom" => DockAnchor::Bottom,
"Right" => DockAnchor::Right,
"Expanded" => DockAnchor::Expanded,
_ => bail!("Stored dock anchor is incorrect"),
},
next_index,
))
})
}
}
#[derive(Debug, PartialEq, Eq)]
pub struct SerializedWorkspace {
pub dock_anchor: DockAnchor,
pub dock_visible: bool,
pub center_group: SerializedPaneGroup,
pub dock_pane: SerializedPane,
}
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub enum Axis {
#[default]
Horizontal,
Vertical,
}
impl Bind for Axis {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
Axis::Horizontal => "Horizontal",
Axis::Vertical => "Vertical",
}
.bind(statement, start_index)
}
}
impl Column for Axis {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(axis_text, next_index)| {
Ok((
match axis_text.as_str() {
"Horizontal" => Axis::Horizontal,
"Vertical" => Axis::Vertical,
_ => bail!("Stored serialized item kind is incorrect"),
},
next_index,
))
})
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SerializedPaneGroup {
Group {
axis: Axis,
children: Vec<SerializedPaneGroup>,
},
Pane(SerializedPane),
}
// Dock panes, and grouped panes combined?
// AND we're collapsing PaneGroup::Pane
// In the case where
impl Default for SerializedPaneGroup {
fn default() -> Self {
Self::Group {
axis: Axis::Horizontal,
children: vec![Self::Pane(Default::default())],
}
}
}
#[derive(Debug, PartialEq, Eq, Default, Clone)]
pub struct SerializedPane {
pub(crate) children: Vec<SerializedItem>,
}
impl SerializedPane {
pub fn new(children: Vec<SerializedItem>) -> Self {
SerializedPane { children }
}
}
pub type GroupId = i64;
pub type PaneId = i64;
pub type ItemId = usize;
pub(crate) enum SerializedItemKind {
Editor,
Diagnostics,
ProjectSearch,
Terminal,
}
impl Bind for SerializedItemKind {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
match self {
SerializedItemKind::Editor => "Editor",
SerializedItemKind::Diagnostics => "Diagnostics",
SerializedItemKind::ProjectSearch => "ProjectSearch",
SerializedItemKind::Terminal => "Terminal",
}
.bind(statement, start_index)
}
}
impl Column for SerializedItemKind {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
String::column(statement, start_index).and_then(|(kind_text, next_index)| {
Ok((
match kind_text.as_ref() {
"Editor" => SerializedItemKind::Editor,
"Diagnostics" => SerializedItemKind::Diagnostics,
"ProjectSearch" => SerializedItemKind::ProjectSearch,
"Terminal" => SerializedItemKind::Terminal,
_ => bail!("Stored serialized item kind is incorrect"),
},
next_index,
))
})
}
}
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum SerializedItem {
Editor { item_id: usize, path: Arc<Path> },
Diagnostics { item_id: usize },
ProjectSearch { item_id: usize, query: String },
Terminal { item_id: usize },
}
impl SerializedItem {
pub fn item_id(&self) -> usize {
match self {
SerializedItem::Editor { item_id, .. } => *item_id,
SerializedItem::Diagnostics { item_id } => *item_id,
SerializedItem::ProjectSearch { item_id, .. } => *item_id,
SerializedItem::Terminal { item_id } => *item_id,
}
}
pub(crate) fn kind(&self) -> SerializedItemKind {
match self {
SerializedItem::Editor { .. } => SerializedItemKind::Editor,
SerializedItem::Diagnostics { .. } => SerializedItemKind::Diagnostics,
SerializedItem::ProjectSearch { .. } => SerializedItemKind::ProjectSearch,
SerializedItem::Terminal { .. } => SerializedItemKind::Terminal,
}
}
}
#[cfg(test)]
mod tests {
use sqlez::connection::Connection;
use crate::workspace_db::model::DockAnchor;
use super::WorkspaceId;
#[test]
fn test_workspace_round_trips() {
let db = Connection::open_memory("workspace_id_round_trips");
db.exec(indoc::indoc! {"
CREATE TABLE workspace_id_test(
workspace_id BLOB,
dock_anchor TEXT
);"})
.unwrap()()
.unwrap();
let workspace_id: WorkspaceId = WorkspaceId::from(&["\test2", "\test1"]);
db.exec_bound("INSERT INTO workspace_id_test(workspace_id, dock_anchor) VALUES (?,?)")
.unwrap()((&workspace_id, DockAnchor::Bottom))
.unwrap();
assert_eq!(
db.select_row("SELECT workspace_id, dock_anchor FROM workspace_id_test LIMIT 1")
.unwrap()()
.unwrap(),
Some((WorkspaceId::from(&["\test1", "\test2"]), DockAnchor::Bottom))
);
}
}
}

BIN
crates/workspace/test.db Normal file

Binary file not shown.

View File

@ -23,7 +23,7 @@ use isahc::{config::Configurable, Request};
use language::LanguageRegistry;
use log::LevelFilter;
use parking_lot::Mutex;
use project::{Db, Fs, HomeDir, ProjectStore};
use project::{Fs, HomeDir, ProjectStore};
use serde_json::json;
use settings::{
self, settings_file::SettingsFile, KeymapFileContent, Settings, SettingsFileContent,
@ -37,12 +37,9 @@ use terminal::terminal_container_view::{get_working_directory, TerminalContainer
use fs::RealFs;
use settings::watched_json::{watch_keymap_file, watch_settings_file, WatchedJsonFile};
use theme::ThemeRegistry;
use util::{paths, ResultExt, TryFutureExt};
use util::{channel::RELEASE_CHANNEL, paths, ResultExt, TryFutureExt};
use workspace::{self, AppState, ItemHandle, NewFile, OpenPaths, Workspace};
use zed::{
self, build_window_options, initialize_workspace, languages, menus, RELEASE_CHANNEL,
RELEASE_CHANNEL_NAME,
};
use zed::{self, build_window_options, initialize_workspace, languages, menus};
fn main() {
let http = http::client();
@ -56,10 +53,6 @@ fn main() {
.map_or("dev".to_string(), |v| v.to_string());
init_panic_hook(app_version, http.clone(), app.background());
let db = app.background().spawn(async move {
project::Db::<project::KeyValue>::open(&*paths::DB_DIR, RELEASE_CHANNEL_NAME.as_str())
});
load_embedded_fonts(&app);
let fs = Arc::new(RealFs);
@ -147,10 +140,8 @@ fn main() {
.detach();
let project_store = cx.add_model(|_| ProjectStore::new());
let db = cx.background().block(db);
cx.set_global(db);
client.start_telemetry(cx.global::<Db<project::KeyValue>>().clone());
client.start_telemetry();
client.report_event("start app", Default::default());
let app_state = Arc::new(AppState {
@ -164,16 +155,9 @@ fn main() {
initialize_workspace,
default_item_factory,
});
auto_update::init(
cx.global::<Db<project::KeyValue>>().clone(),
http,
client::ZED_SERVER_URL.clone(),
cx,
);
auto_update::init(http, client::ZED_SERVER_URL.clone(), cx);
let workspace_db = cx.global::<Db<project::KeyValue>>().open_as::<Workspace>();
workspace::init(app_state.clone(), cx, workspace_db);
workspace::init(app_state.clone(), cx);
journal::init(app_state.clone(), cx);
theme_selector::init(app_state.clone(), cx);

View File

@ -12,7 +12,6 @@ use collab_ui::{CollabTitlebarItem, ToggleCollaborationMenu};
use collections::VecDeque;
pub use editor;
use editor::{Editor, MultiBuffer};
use lazy_static::lazy_static;
use gpui::{
actions,
@ -28,9 +27,9 @@ use project_panel::ProjectPanel;
use search::{BufferSearchBar, ProjectSearchBar};
use serde::Deserialize;
use serde_json::to_string_pretty;
use settings::{keymap_file_json_schema, settings_file_json_schema, ReleaseChannel, Settings};
use settings::{keymap_file_json_schema, settings_file_json_schema, Settings};
use std::{env, path::Path, str, sync::Arc};
use util::{paths, ResultExt};
use util::{channel::ReleaseChannel, paths, ResultExt};
pub use workspace;
use workspace::{sidebar::SidebarSide, AppState, Workspace};
@ -69,17 +68,6 @@ actions!(
const MIN_FONT_SIZE: f32 = 6.0;
lazy_static! {
pub static ref RELEASE_CHANNEL_NAME: String =
env::var("ZED_RELEASE_CHANNEL").unwrap_or(include_str!("../RELEASE_CHANNEL").to_string());
pub static ref RELEASE_CHANNEL: ReleaseChannel = match RELEASE_CHANNEL_NAME.as_str() {
"dev" => ReleaseChannel::Dev,
"preview" => ReleaseChannel::Preview,
"stable" => ReleaseChannel::Stable,
_ => panic!("invalid release channel {}", *RELEASE_CHANNEL_NAME),
};
}
pub fn init(app_state: &Arc<AppState>, cx: &mut gpui::MutableAppContext) {
cx.add_action(about);
cx.add_global_action(|_: &Hide, cx: &mut gpui::MutableAppContext| {
@ -629,7 +617,7 @@ mod tests {
use gpui::{
executor::Deterministic, AssetSource, MutableAppContext, TestAppContext, ViewHandle,
};
use project::{Db, Project, ProjectPath};
use project::{Project, ProjectPath};
use serde_json::json;
use std::{
collections::HashSet,
@ -774,6 +762,8 @@ mod tests {
async fn test_new_empty_workspace(cx: &mut TestAppContext) {
let app_state = init(cx);
cx.dispatch_global_action(workspace::NewFile);
cx.foreground().run_until_parked();
let window_id = *cx.window_ids().first().unwrap();
let workspace = cx.root_view::<Workspace>(window_id).unwrap();
let editor = workspace.update(cx, |workspace, cx| {
@ -1816,7 +1806,7 @@ mod tests {
state.initialize_workspace = initialize_workspace;
state.build_window_options = build_window_options;
call::init(app_state.client.clone(), app_state.user_store.clone(), cx);
workspace::init(app_state.clone(), cx, Db::open_in_memory("test"));
workspace::init(app_state.clone(), cx);
editor::init(cx);
pane::init(cx);
app_state