Rebase fix + Started writing the real SQL we're going to need

This commit is contained in:
Mikayla Maki 2022-10-24 16:55:32 -07:00
parent e5c6393f85
commit 500ecbf915
8 changed files with 109 additions and 81 deletions

View File

@ -18,8 +18,9 @@ async-trait = "0.1"
lazy_static = "1.4.0"
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
parking_lot = "0.11.1"
rusqlite = { version = "0.28.0", features = ["bundled", "serde_json"] }
rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] }
rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" }
>>>>>>> dd2ddc5e3 (Started writing the real SQL we're going to need)
serde = { workspace = true }
serde_rusqlite = "0.31.0"

5
crates/db/README.md Normal file
View File

@ -0,0 +1,5 @@
# Building Queries
First, craft your test data. The examples folder shows a template for building a test-db, and can be ran with `cargo run --example [your-example]`.
To actually use and test your queries, import the generated DB file into https://sqliteonline.com/

View File

@ -0,0 +1,22 @@
use std::{fs::File, path::Path};
const TEST_FILE: &'static str = "test-db.db";
fn main() -> anyhow::Result<()> {
let db = db::Db::open_in_memory();
if db.real().is_none() {
return Err(anyhow::anyhow!("Migrations failed"));
}
let file = Path::new(TEST_FILE);
let f = File::create(file)?;
drop(f);
db.write_kvp("test", "1")?;
db.write_kvp("test", "2")?;
db.write_to(file).ok();
println!("Wrote database!");
Ok(())
}

View File

@ -11,7 +11,7 @@ use std::sync::Arc;
use anyhow::Result;
use log::error;
use parking_lot::Mutex;
use rusqlite::Connection;
use rusqlite::{backup, Connection};
use migrations::MIGRATIONS;
pub use workspace::*;
@ -54,27 +54,6 @@ impl Db {
})
}
/// Open a in memory database for testing and as a fallback.
#[cfg(any(test, feature = "test-support"))]
pub fn open_in_memory() -> Self {
Connection::open_in_memory()
.map_err(Into::into)
.and_then(|connection| Self::initialize(connection))
.map(|connection| {
Db::Real(Arc::new(RealDb {
connection,
path: None,
}))
})
.unwrap_or_else(|e| {
error!(
"Connecting to in memory db failed. Reverting to null db. {}",
e
);
Self::Null
})
}
fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
MIGRATIONS.to_latest(&mut conn)?;
@ -96,6 +75,43 @@ impl Db {
_ => None,
}
}
/// Open a in memory database for testing and as a fallback.
pub fn open_in_memory() -> Self {
Connection::open_in_memory()
.map_err(Into::into)
.and_then(|connection| Self::initialize(connection))
.map(|connection| {
Db::Real(Arc::new(RealDb {
connection,
path: None,
}))
})
.unwrap_or_else(|e| {
error!(
"Connecting to in memory db failed. Reverting to null db. {}",
e
);
Self::Null
})
}
pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {
self.real()
.map(|db| {
if db.path.is_some() {
panic!("DB already exists");
}
let lock = db.connection.lock();
let mut dst = Connection::open(dest)?;
let backup = backup::Backup::new(&lock, &mut dst)?;
backup.step(-1)?;
Ok(())
})
.unwrap_or(Ok(()))
}
}
impl Drop for Db {

View File

@ -1,7 +1,7 @@
use rusqlite_migration::{Migrations, M};
// use crate::items::ITEMS_M_1;
use crate::kvp::KVP_M_1;
use crate::{kvp::KVP_M_1, WORKSPACE_M_1};
// This must be ordered by development time! Only ever add new migrations to the end!!
// Bad things will probably happen if you don't monotonically edit this vec!!!!
@ -10,5 +10,6 @@ use crate::kvp::KVP_M_1;
lazy_static::lazy_static! {
pub static ref MIGRATIONS: Migrations<'static> = Migrations::new(vec![
M::up(KVP_M_1),
M::up(WORKSPACE_M_1)
]);
}

View File

@ -28,6 +28,13 @@ CREATE TABLE pane_items(
index INTEGER,
KEY (workspace_id, pane_id)
) STRICT;
ALTER TABLE WORKSPACE
ADD THESE COLS:
center_group INTEGER NOT NULL,
dock_pane INTEGER NOT NULL,
-- FOREIGN KEY(center_group) REFERENCES pane_groups(group_id)
-- FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id)
";
#[derive(Debug, PartialEq, Eq, Copy, Clone)]

View File

@ -1,7 +1,6 @@
use std::{
path::{Path, PathBuf},
sync::Arc,
};
use anyhow::Result;
use std::{path::Path, sync::Arc};
use crate::pane::{PaneGroupId, PaneId, SerializedPane, SerializedPaneGroup};
@ -9,18 +8,15 @@ use super::Db;
pub(crate) const WORKSPACE_M_1: &str = "
CREATE TABLE workspaces(
workspace_id INTEGER PRIMARY KEY,
center_group INTEGER NOT NULL,
dock_pane INTEGER NOT NULL,
timestamp INTEGER,
FOREIGN KEY(center_group) REFERENCES pane_groups(group_id)
FOREIGN KEY(dock_pane) REFERENCES pane_items(pane_id)
workspace_id INTEGER PRIMARY KEY AUTOINCREMENT,
timestamp TEXT DEFAULT CURRENT_TIMESTAMP,
dummy_data INTEGER
) STRICT;
CREATE TABLE worktree_roots(
worktree_root BLOB NOT NULL,
workspace_id INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspace_ids(workspace_id)
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id)
) STRICT;
";
@ -35,18 +31,19 @@ CREATE TABLE worktree_roots(
// Case 4: Starting Zed with multiple project folders
// > Zed ~/projects/Zed ~/projects/Zed.dev
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct WorkspaceId(usize);
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
pub struct WorkspaceId(i64);
struct WorkspaceRow {
pub center_group_id: PaneGroupId,
pub dock_pane_id: PaneId,
}
#[derive(Default)]
pub struct SerializedWorkspace {
pub workspace_id: WorkspaceId,
pub center_group: SerializedPaneGroup,
pub dock_pane: Option<SerializedPane>,
// pub center_group: SerializedPaneGroup,
// pub dock_pane: Option<SerializedPane>,
}
impl Db {
@ -58,28 +55,33 @@ impl Db {
) -> SerializedWorkspace {
// Find the workspace id which is uniquely identified by this set of paths return it if found
if let Some(workspace_id) = self.workspace_id(worktree_roots) {
let workspace_row = self.get_workspace_row(workspace_id);
let center_group = self.get_pane_group(workspace_row.center_group_id);
let dock_pane = self.get_pane(workspace_row.dock_pane_id);
// TODO
// let workspace_row = self.get_workspace_row(workspace_id);
// let center_group = self.get_pane_group(workspace_row.center_group_id);
// let dock_pane = self.get_pane(workspace_row.dock_pane_id);
SerializedWorkspace {
workspace_id,
center_group,
dock_pane: Some(dock_pane),
// center_group,
// dock_pane: Some(dock_pane),
}
} else {
let workspace_id = self.get_next_workspace_id();
SerializedWorkspace {
workspace_id,
center_group: SerializedPaneGroup::empty_root(workspace_id),
dock_pane: None,
}
self.make_new_workspace()
}
}
fn get_next_workspace_id(&self) -> WorkspaceId {
unimplemented!()
fn make_new_workspace(&self) -> SerializedWorkspace {
self.real()
.map(|db| {
let lock = db.connection.lock();
match lock.execute("INSERT INTO workspaces(dummy_data) VALUES(1);", []) {
Ok(_) => SerializedWorkspace {
workspace_id: WorkspaceId(lock.last_insert_rowid()),
},
Err(_) => Default::default(),
}
})
.unwrap_or_default()
}
fn workspace_id(&self, worktree_roots: &[Arc<Path>]) -> Option<WorkspaceId> {
@ -128,6 +130,7 @@ mod tests {
PathBuf::from(path).into()
}
#[test]
fn test_detect_workspace_id() {
let data = &[
(WorkspaceId(1), vec![arc_path("/tmp")]),
@ -160,6 +163,7 @@ mod tests {
);
}
#[test]
fn test_tricky_overlapping_updates() {
// DB state:
// (/tree) -> ID: 1
@ -202,31 +206,3 @@ mod tests {
assert_eq!(recent_workspaces.get(2).unwrap().0, WorkspaceId(1));
}
}
// [/tmp, /tmp2] -> ID1?
// [/tmp] -> ID2?
/*
path | id
/tmp ID1
/tmp ID2
/tmp2 ID1
SELECT id
FROM workspace_ids
WHERE path IN (path1, path2)
INTERSECT
SELECT id
FROM workspace_ids
WHERE path = path_2
... and etc. for each element in path array
If contains row, yay! If not,
SELECT max(id) FROm workspace_ids
Select id WHERE path IN paths
SELECT MAX(id)
*/

0
test.rs Normal file
View File