WIP switching to sqlez

This commit is contained in:
Kay Simmons 2022-11-01 13:15:58 -07:00 committed by Mikayla Maki
parent 7744c9ba45
commit e3fdfe02e5
4 changed files with 428 additions and 501 deletions

712
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,7 @@ doctest = false
test-support = []
[dependencies]
indoc = "1.0.4"
collections = { path = "../collections" }
gpui = { path = "../gpui" }
anyhow = "1.0.57"
@ -18,10 +19,7 @@ async-trait = "0.1"
lazy_static = "1.4.0"
log = { version = "0.4.16", features = ["kv_unstable_serde"] }
parking_lot = "0.11.1"
rusqlite = { version = "0.28.0", features = ["bundled", "serde_json", "backup"] }
rusqlite_migration = { git = "https://github.com/cljoly/rusqlite_migration", rev = "c433555d7c1b41b103426e35756eb3144d0ebbc6" }
serde = { workspace = true }
serde_rusqlite = "0.31.0"
sqlez = { git = "https://github.com/Kethku/sqlez", ref = "c8c01fe6b82085bbfe81b2a9406718454a7839c4c" }
[dev-dependencies]
gpui = { path = "../gpui", features = ["test-support"] }

View File

@ -5,26 +5,25 @@ pub mod pane;
pub mod workspace;
use std::fs;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::ops::Deref;
use std::path::Path;
use anyhow::Result;
use log::error;
use parking_lot::Mutex;
use rusqlite::{backup, Connection};
use indoc::indoc;
use sqlez::connection::Connection;
use sqlez::thread_safe_connection::ThreadSafeConnection;
use migrations::MIGRATIONS;
pub use workspace::*;
#[derive(Clone)]
pub enum Db {
Real(Arc<RealDb>),
Null,
}
struct Db(ThreadSafeConnection);
pub struct RealDb {
connection: Mutex<Connection>,
path: Option<PathBuf>,
impl Deref for Db {
type Target = sqlez::connection::Connection;
fn deref(&self) -> &Self::Target {
&self.0.deref()
}
}
impl Db {
@ -36,104 +35,44 @@ impl Db {
.expect("Should be able to create the database directory");
let db_path = current_db_dir.join(Path::new("db.sqlite"));
Connection::open(db_path)
.map_err(Into::into)
.and_then(|connection| Self::initialize(connection))
.map(|connection| {
Db::Real(Arc::new(RealDb {
connection,
path: Some(db_dir.to_path_buf()),
}))
})
.unwrap_or_else(|e| {
error!(
"Connecting to file backed db failed. Reverting to null db. {}",
e
);
Self::Null
})
}
fn initialize(mut conn: Connection) -> Result<Mutex<Connection>> {
MIGRATIONS.to_latest(&mut conn)?;
conn.pragma_update(None, "journal_mode", "WAL")?;
conn.pragma_update(None, "synchronous", "NORMAL")?;
conn.pragma_update(None, "foreign_keys", true)?;
conn.pragma_update(None, "case_sensitive_like", true)?;
Ok(Mutex::new(conn))
Db(
ThreadSafeConnection::new(db_path.to_string_lossy().as_ref(), true)
.with_initialize_query(indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"}),
)
}
pub fn persisting(&self) -> bool {
self.real().and_then(|db| db.path.as_ref()).is_some()
}
pub fn real(&self) -> Option<&RealDb> {
match self {
Db::Real(db) => Some(&db),
_ => None,
}
self.persistent()
}
/// Open a in memory database for testing and as a fallback.
pub fn open_in_memory() -> Self {
Connection::open_in_memory()
.map_err(Into::into)
.and_then(|connection| Self::initialize(connection))
.map(|connection| {
Db::Real(Arc::new(RealDb {
connection,
path: None,
}))
})
.unwrap_or_else(|e| {
error!(
"Connecting to in memory db failed. Reverting to null db. {}",
e
);
Self::Null
})
Db(
ThreadSafeConnection::new("Zed DB", false).with_initialize_query(indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"}),
)
}
pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {
self.real()
.map(|db| {
if db.path.is_some() {
panic!("DB already exists");
}
let lock = db.connection.lock();
let mut dst = Connection::open(dest)?;
let backup = backup::Backup::new(&lock, &mut dst)?;
backup.step(-1)?;
Ok(())
})
.unwrap_or(Ok(()))
let destination = Connection::open_file(dest.as_ref().to_string_lossy().as_ref());
self.backup(&destination)
}
}
impl Drop for Db {
fn drop(&mut self) {
match self {
Db::Real(real_db) => {
let lock = real_db.connection.lock();
let _ = lock.pragma_update(None, "analysis_limit", "500");
let _ = lock.pragma_update(None, "optimize", "");
}
Db::Null => {}
}
}
}
#[cfg(test)]
mod tests {
use crate::migrations::MIGRATIONS;
#[test]
fn test_migrations() {
assert!(MIGRATIONS.validate().is_ok());
self.exec(indoc! {"
PRAGMA analysis_limit=500;
PRAGMA optimize"})
.ok();
}
}

View File

@ -1,8 +1,5 @@
use anyhow::Result;
use rusqlite::{params, Connection, OptionalExtension};
use serde::{Deserialize, Serialize};
use std::{
ffi::OsStr,
fmt::Debug,
@ -12,28 +9,34 @@ use std::{
time::{SystemTime, UNIX_EPOCH},
};
use anyhow::Result;
use indoc::indoc;
use sqlez::{connection::Connection, migrations::Migration};
use crate::pane::SerializedDockPane;
use super::Db;
// If you need to debug the worktree root code, change 'BLOB' here to 'TEXT' for easier debugging
// you might want to update some of the parsing code as well, I've left the variations in but commented
// out
pub(crate) const WORKSPACE_M_1: &str = "
CREATE TABLE workspaces(
workspace_id INTEGER PRIMARY KEY,
last_opened_timestamp INTEGER NOT NULL
) STRICT;
// out. This will panic if run on an existing db that has already been migrated
const WORKSPACES_MIGRATION: Migration = Migration::new(
"migrations",
&[indoc! {"
CREATE TABLE workspaces(
workspace_id INTEGER PRIMARY KEY,
timestamp TEXT DEFAULT CURRENT_TIMESTAMP NOT NULL
) STRICT;
CREATE TABLE worktree_roots(
worktree_root BLOB NOT NULL,
workspace_id INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
PRIMARY KEY(worktree_root, workspace_id)
) STRICT;"}],
);
CREATE TABLE worktree_roots(
worktree_root BLOB NOT NULL,
workspace_id INTEGER NOT NULL,
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
PRIMARY KEY(worktree_root, workspace_id)
) STRICT;
";
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default, Deserialize, Serialize)]
#[derive(Debug, PartialEq, Eq, Copy, Clone, Default)]
pub struct WorkspaceId(i64);
impl WorkspaceId {
@ -77,19 +80,9 @@ impl Db {
where
P: AsRef<Path> + Debug,
{
fn logic<P>(
connection: &mut Connection,
worktree_roots: &[P],
) -> Result<SerializedWorkspace>
where
P: AsRef<Path> + Debug,
{
let tx = connection.transaction()?;
tx.execute(
"INSERT INTO workspaces(last_opened_timestamp) VALUES (?)",
[current_millis()?],
)?;
let result = (|| {
let tx = self.transaction()?;
tx.execute("INSERT INTO workspaces(last_opened_timestamp) VALUES" (?), [current_millis()?])?;
let id = WorkspaceId(tx.last_insert_rowid());
@ -101,22 +94,15 @@ impl Db {
workspace_id: id,
dock_pane: None,
})
})();
match result {
Ok(serialized_workspace) => serialized_workspace,
Err(err) => {
log::error!("Failed to insert new workspace into DB: {}", err);
Default::default()
}
}
self.real()
.map(|db| {
let mut lock = db.connection.lock();
// No need to waste the memory caching this, should happen rarely.
match logic(&mut lock, worktree_roots) {
Ok(serialized_workspace) => serialized_workspace,
Err(err) => {
log::error!("Failed to insert new workspace into DB: {}", err);
Default::default()
}
}
})
.unwrap_or_default()
}
fn workspace_id<P>(&self, worktree_roots: &[P]) -> Option<WorkspaceId>