Converted to sqlez, so much nicer

This commit is contained in:
Mikayla Maki 2022-11-02 13:26:23 -07:00
parent c8face33fa
commit 406663c75e
12 changed files with 278 additions and 197 deletions

1
Cargo.lock generated
View File

@ -1559,6 +1559,7 @@ dependencies = [
"parking_lot 0.11.2",
"sqlez",
"tempdir",
"util",
]
[[package]]

View File

@ -11,11 +11,12 @@ doctest = false
test-support = []
[dependencies]
indoc = "1.0.4"
collections = { path = "../collections" }
gpui = { path = "../gpui" }
sqlez = { path = "../sqlez" }
util = { path = "../util" }
anyhow = "1.0.57"
indoc = "1.0.4"
async-trait = "0.1"
lazy_static = "1.4.0"
log = { version = "0.4.16", features = ["kv_unstable_serde"] }

View File

@ -7,10 +7,8 @@ const TEST_FILE: &'static str = "test-db.db";
fn main() -> anyhow::Result<()> {
env_logger::init();
let db = db::Db::open_in_memory();
if db.real().is_none() {
return Err(anyhow::anyhow!("Migrations failed"));
}
let db = db::Db::open_in_memory("db");
let file = Path::new(TEST_FILE);
let f = File::create(file)?;
@ -21,21 +19,21 @@ fn main() -> anyhow::Result<()> {
let workspace_3 = db.workspace_for_roots(&["/tmp3", "/tmp2"]);
db.save_dock_pane(
workspace_1.workspace_id,
&workspace_1.workspace_id,
&SerializedDockPane {
anchor_position: DockAnchor::Expanded,
visible: true,
},
);
db.save_dock_pane(
workspace_2.workspace_id,
&workspace_2.workspace_id,
&SerializedDockPane {
anchor_position: DockAnchor::Bottom,
visible: true,
},
);
db.save_dock_pane(
workspace_3.workspace_id,
&workspace_3.workspace_id,
&SerializedDockPane {
anchor_position: DockAnchor::Right,
visible: false,

View File

@ -4,10 +4,8 @@ const TEST_FILE: &'static str = "test-db.db";
fn main() -> anyhow::Result<()> {
env_logger::init();
let db = db::Db::open_in_memory();
if db.real().is_none() {
return Err(anyhow::anyhow!("Migrations failed"));
}
let db = db::Db::open_in_memory("db");
let file = Path::new(TEST_FILE);
let f = File::create(file)?;

View File

@ -18,7 +18,7 @@ use sqlez::thread_safe_connection::ThreadSafeConnection;
pub use workspace::*;
#[derive(Clone)]
struct Db(ThreadSafeConnection);
pub struct Db(ThreadSafeConnection);
impl Deref for Db {
type Target = sqlez::connection::Connection;
@ -54,15 +54,15 @@ impl Db {
}
/// Open a in memory database for testing and as a fallback.
pub fn open_in_memory() -> Self {
Db(
ThreadSafeConnection::new("Zed DB", false).with_initialize_query(indoc! {"
pub fn open_in_memory(db_name: &str) -> Self {
Db(ThreadSafeConnection::new(db_name, false)
.with_initialize_query(indoc! {"
PRAGMA journal_mode=WAL;
PRAGMA synchronous=NORMAL;
PRAGMA foreign_keys=TRUE;
PRAGMA case_sensitive_like=TRUE;
"}),
)
"})
.with_migrations(&[KVP_MIGRATION, WORKSPACES_MIGRATION, PANE_MIGRATIONS]))
}
pub fn write_to<P: AsRef<Path>>(&self, dest: P) -> Result<()> {

View File

@ -23,7 +23,7 @@ impl Db {
pub fn write_kvp(&self, key: &str, value: &str) -> Result<()> {
self.0
.prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES (?, ?)")?
.prepare("INSERT OR REPLACE INTO kv_store(key, value) VALUES ((?), (?))")?
.with_bindings((key, value))?
.exec()
}
@ -44,21 +44,21 @@ mod tests {
#[test]
fn test_kvp() -> Result<()> {
let db = Db::open_in_memory();
let db = Db::open_in_memory("test_kvp");
assert_eq!(db.read_kvp("key-1")?, None);
assert_eq!(db.read_kvp("key-1").unwrap(), None);
db.write_kvp("key-1", "one")?;
assert_eq!(db.read_kvp("key-1")?, Some("one".to_string()));
db.write_kvp("key-1", "one").unwrap();
assert_eq!(db.read_kvp("key-1").unwrap(), Some("one".to_string()));
db.write_kvp("key-1", "one-2")?;
assert_eq!(db.read_kvp("key-1")?, Some("one-2".to_string()));
db.write_kvp("key-1", "one-2").unwrap();
assert_eq!(db.read_kvp("key-1").unwrap(), Some("one-2".to_string()));
db.write_kvp("key-2", "two")?;
assert_eq!(db.read_kvp("key-2")?, Some("two".to_string()));
db.write_kvp("key-2", "two").unwrap();
assert_eq!(db.read_kvp("key-2").unwrap(), Some("two".to_string()));
db.delete_kvp("key-1")?;
assert_eq!(db.read_kvp("key-1")?, None);
db.delete_kvp("key-1").unwrap();
assert_eq!(db.read_kvp("key-1").unwrap(), None);
Ok(())
}

View File

@ -1,16 +1,21 @@
use std::str::FromStr;
use gpui::Axis;
use indoc::indoc;
use sqlez::{migrations::Migration, bindable::{Bind, Column}, connection::Connection, statement::Statement};
use sqlez::{
bindable::{Bind, Column},
migrations::Migration,
statement::Statement,
};
use util::{iife, ResultExt};
use crate::{items::ItemId, workspace::WorkspaceId};
use super::Db;
pub(crate) const PANE_MIGRATIONS: Migration = Migration::new("pane", &[indoc! {"
pub(crate) const PANE_MIGRATIONS: Migration = Migration::new(
"pane",
&[indoc! {"
CREATE TABLE dock_panes(
dock_pane_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
@ -19,7 +24,7 @@ CREATE TABLE dock_panes(
FOREIGN KEY(workspace_id) REFERENCES workspaces(workspace_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE pane_groups(
CREATE TABLE pane_groups( -- Inner nodes
group_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
parent_group INTEGER, -- NULL indicates that this is a root node
@ -28,7 +33,8 @@ CREATE TABLE pane_groups(
FOREIGN KEY(parent_group) REFERENCES pane_groups(group_id) ON DELETE CASCADE
) STRICT;
CREATE TABLE grouped_panes(
CREATE TABLE grouped_panes( -- Leaf nodes
pane_id INTEGER PRIMARY KEY,
workspace_id INTEGER NOT NULL,
group_id INTEGER NOT NULL,
@ -65,7 +71,8 @@ CREATE TABLE dock_items(
FOREIGN KEY(dock_pane_id) REFERENCES dock_panes(dock_pane_id) ON DELETE CASCADE,
FOREIGN KEY(item_id) REFERENCES items(item_id)ON DELETE CASCADE
) STRICT;
"}]);
"}],
);
// We have an many-branched, unbalanced tree with three types:
// Pane Groups
@ -137,10 +144,9 @@ pub struct SerializedPane {
children: Vec<ItemId>,
}
//********* CURRENTLY IN USE TYPES: *********
#[derive(Default, Debug, PartialEq, Eq)]
#[derive(Default, Debug, PartialEq, Eq, Clone, Copy)]
pub enum DockAnchor {
#[default]
Bottom,
@ -162,15 +168,28 @@ impl FromStr for DockAnchor {
type Err = anyhow::Error;
fn from_str(s: &str) -> anyhow::Result<Self> {
match s {
match s {
"Bottom" => Ok(DockAnchor::Bottom),
"Right" => Ok(DockAnchor::Right),
"Expanded" => Ok(DockAnchor::Expanded),
_ => anyhow::bail!("Not a valid dock anchor")
_ => anyhow::bail!("Not a valid dock anchor"),
}
}
}
impl Bind for DockAnchor {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
statement.bind(self.to_string(), start_index)
}
}
impl Column for DockAnchor {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
<String as Column>::column(statement, start_index)
.and_then(|(str, next_index)| Ok((DockAnchor::from_str(&str)?, next_index)))
}
}
#[derive(Default, Debug, PartialEq, Eq)]
pub struct SerializedDockPane {
pub anchor_position: DockAnchor,
@ -178,11 +197,30 @@ pub struct SerializedDockPane {
}
impl SerializedDockPane {
pub fn to_row(&self, workspace: WorkspaceId) -> DockRow {
DockRow { workspace_id: workspace, anchor_position: self.anchor_position, visible: self.visible }
fn to_row(&self, workspace: &WorkspaceId) -> DockRow {
DockRow {
workspace_id: *workspace,
anchor_position: self.anchor_position,
visible: self.visible,
}
}
}
impl Column for SerializedDockPane {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
<(DockAnchor, bool) as Column>::column(statement, start_index).map(
|((anchor_position, visible), next_index)| {
(
SerializedDockPane {
anchor_position,
visible,
},
next_index,
)
},
)
}
}
#[derive(Default, Debug, PartialEq, Eq)]
pub(crate) struct DockRow {
@ -191,24 +229,16 @@ pub(crate) struct DockRow {
visible: bool,
}
impl DockRow {
pub fn to_pane(&self) -> SerializedDockPane {
SerializedDockPane { anchor_position: self.anchor_position, visible: self.visible }
}
}
impl Bind for DockRow {
fn bind(&self, statement: &Statement, start_index: i32) -> anyhow::Result<i32> {
statement.bind((self.workspace_id, self.anchor_position.to_string(), self.visible), start_index)
}
}
impl Column for DockRow {
fn column(statement: &mut Statement, start_index: i32) -> anyhow::Result<(Self, i32)> {
<(WorkspaceId, &str, bool) as Column>::column(statement, start_index)
.map(|((workspace_id, anchor_position, visible), next_index)| {
})
statement.bind(
(
self.workspace_id,
self.anchor_position.to_string(),
self.visible,
),
start_index,
)
}
}
@ -267,75 +297,37 @@ impl Db {
}
pub fn get_dock_pane(&self, workspace: WorkspaceId) -> Option<SerializedDockPane> {
fn logic(conn: &Connection, workspace: WorkspaceId) -> anyhow::Result<Option<SerializedDockPane>> {
let mut stmt = conn.prepare("SELECT workspace_id, anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?
.maybe_row()
.map(|row| DockRow::col);
let dock_panes = stmt.query_row([workspace.raw_id()], |row_ref| from_row::<DockRow>).optional();
let mut dock_panes_iter = stmt.query_and_then([workspace.raw_id()], from_row::<DockRow>)?;
let dock_pane = dock_panes_iter
.next()
.and_then(|dock_row|
dock_row
.ok()
.map(|dock_row| dock_row.to_pane()));
Ok(dock_pane)
}
self.real()
.map(|db| {
let lock = db.connection.lock();
match logic(&lock, workspace) {
Ok(dock_pane) => dock_pane,
Err(err) => {
log::error!("Failed to get the dock pane: {}", err);
None
},
}
})
.unwrap_or(None)
iife!({
self.prepare("SELECT anchor_position, visible FROM dock_panes WHERE workspace_id = ?")?
.with_bindings(workspace)?
.maybe_row::<SerializedDockPane>()
})
.log_err()
.flatten()
}
pub fn save_dock_pane(&self, workspace: WorkspaceId, dock_pane: SerializedDockPane) {
to_params_named(dock_pane.to_row(workspace))
.map_err(|err| {
log::error!("Failed to parse params for the dock row: {}", err);
err
})
.ok()
.zip(self.real())
.map(|(params, db)| {
// TODO: overwrite old dock panes if need be
let query = "INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (:workspace_id, :anchor_position, :visible);";
db.connection
.lock()
.execute(query, params.to_slice().as_slice())
.map(|_| ()) // Eat the return value
.unwrap_or_else(|err| {
log::error!("Failed to insert new dock pane into DB: {}", err);
})
});
pub fn save_dock_pane(&self, workspace: &WorkspaceId, dock_pane: &SerializedDockPane) {
iife!({
self.prepare(
"INSERT INTO dock_panes (workspace_id, anchor_position, visible) VALUES (?, ?, ?);",
)?
.with_bindings(dock_pane.to_row(workspace))?
.insert()
})
.log_err();
}
}
#[cfg(test)]
mod tests {
use crate::Db;
use crate::{pane::SerializedPane, Db};
use super::{DockAnchor, SerializedDockPane};
#[test]
fn test_basic_dock_pane() {
let db = Db::open_in_memory();
let db = Db::open_in_memory("basic_dock_pane");
let workspace = db.workspace_for_roots(&["/tmp"]);
@ -344,7 +336,28 @@ mod tests {
visible: true,
};
db.save_dock_pane(workspace.workspace_id, dock_pane);
db.save_dock_pane(&workspace.workspace_id, &dock_pane);
let new_workspace = db.workspace_for_roots(&["/tmp"]);
assert_eq!(new_workspace.dock_pane.unwrap(), dock_pane);
}
#[test]
fn test_dock_simple_split() {
let db = Db::open_in_memory("simple_split");
let workspace = db.workspace_for_roots(&["/tmp"]);
let center_pane = SerializedPane {
pane_id: crate::pane::PaneId {
workspace_id: workspace.workspace_id,
pane_id: 1,
},
children: vec![],
};
db.save_dock_pane(&workspace.workspace_id, &dock_pane);
let new_workspace = db.workspace_for_roots(&["/tmp"]);

View File

@ -1,4 +1,4 @@
use anyhow::{Result, anyhow};
use anyhow::Result;
use std::{
ffi::OsStr,
@ -10,7 +10,9 @@ use std::{
use indoc::indoc;
use sqlez::{
connection::Connection, migrations::Migration, bindable::{Column, Bind},
bindable::{Bind, Column},
connection::Connection,
migrations::Migration,
};
use crate::pane::SerializedDockPane;
@ -47,13 +49,17 @@ impl WorkspaceId {
impl Bind for WorkspaceId {
fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result<i32> {
todo!();
statement.bind(self.raw_id(), start_index)
}
}
impl Column for WorkspaceId {
fn column(statement: &mut sqlez::statement::Statement, start_index: i32) -> Result<(Self, i32)> {
todo!();
fn column(
statement: &mut sqlez::statement::Statement,
start_index: i32,
) -> Result<(Self, i32)> {
<i64 as Column>::column(statement, start_index)
.map(|(id, next_index)| (WorkspaceId(id), next_index))
}
}
@ -154,10 +160,8 @@ impl Db {
fn last_workspace_id(&self) -> Option<WorkspaceId> {
let res = self
.prepare(
"SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT 1",
)
.and_then(|stmt| stmt.maybe_row())
.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT 1")
.and_then(|mut stmt| stmt.maybe_row())
.map(|row| row.map(|id| WorkspaceId(id)));
match res {
@ -172,28 +176,30 @@ impl Db {
/// Returns the previous workspace ids sorted by last modified along with their opened worktree roots
pub fn recent_workspaces(&self, limit: usize) -> Vec<(WorkspaceId, Vec<Arc<Path>>)> {
self.with_savepoint("recent_workspaces", |conn| {
let ids = conn.prepare("SELECT workspace_id FROM workspaces ORDER BY last_opened_timestamp DESC LIMIT ?")?
let rows = conn
.prepare("SELECT workspace_id FROM workspaces ORDER BY timestamp DESC LIMIT ?")?
.with_bindings(limit)?
.rows::<i64>()?
.iter()
.map(|row| WorkspaceId(*row));
let result = Vec::new();
let stmt = conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?;
.rows::<i64>()?;
let ids = rows.iter().map(|row| WorkspaceId(*row));
let mut result = Vec::new();
let mut stmt =
conn.prepare("SELECT worktree_root FROM worktree_roots WHERE workspace_id = ?")?;
for workspace_id in ids {
let roots = stmt.with_bindings(workspace_id.0)?
let roots = stmt
.with_bindings(workspace_id.0)?
.rows::<Vec<u8>>()?
.iter()
.map(|row| {
PathBuf::from(OsStr::from_bytes(&row)).into()
})
.map(|row| PathBuf::from(OsStr::from_bytes(&row)).into())
.collect();
result.push((workspace_id, roots))
}
Ok(result)
}).unwrap_or_else(|err| {
})
.unwrap_or_else(|err| {
log::error!("Failed to get recent workspaces, err: {}", err);
Vec::new()
})
@ -213,11 +219,10 @@ where
if let Some(preexisting_id) = preexisting_id {
if preexisting_id != *workspace_id {
// Should also delete fields in other tables with cascading updates
connection.prepare(
"DELETE FROM workspaces WHERE workspace_id = ?",
)?
.with_bindings(preexisting_id.0)?
.exec()?;
connection
.prepare("DELETE FROM workspaces WHERE workspace_id = ?")?
.with_bindings(preexisting_id.0)?
.exec()?;
}
}
@ -231,12 +236,14 @@ where
// If you need to debug this, here's the string parsing:
// let path = root.as_ref().to_string_lossy().to_string();
connection.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")?
connection
.prepare("INSERT INTO worktree_roots(workspace_id, worktree_root) VALUES (?, ?)")?
.with_bindings((workspace_id.0, path))?
.exec()?;
}
connection.prepare("UPDATE workspaces SET last_opened_timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")?
connection
.prepare("UPDATE workspaces SET timestamp = CURRENT_TIMESTAMP WHERE workspace_id = ?")?
.with_bindings(workspace_id.0)?
.exec()?;
@ -264,7 +271,7 @@ where
}
}
array_binding_stmt.push(')');
// Any workspace can have multiple independent paths, and these paths
// can overlap in the database. Take this test data for example:
//
@ -336,10 +343,14 @@ where
// Make sure we bound the parameters correctly
debug_assert!(worktree_roots.len() as i32 + 1 == stmt.parameter_count());
let root_bytes: Vec<&[u8]> = worktree_roots.iter()
.map(|root| root.as_ref().as_os_str().as_bytes()).collect();
stmt.with_bindings((root_bytes, root_bytes.len()))?
let root_bytes: Vec<&[u8]> = worktree_roots
.iter()
.map(|root| root.as_ref().as_os_str().as_bytes())
.collect();
let len = root_bytes.len();
stmt.with_bindings((root_bytes, len))?
.maybe_row()
.map(|row| row.map(|id| WorkspaceId(id)))
}
@ -360,7 +371,8 @@ mod tests {
#[test]
fn test_new_worktrees_for_roots() {
let db = Db::open_in_memory();
env_logger::init();
let db = Db::open_in_memory("test_new_worktrees_for_roots");
// Test creation in 0 case
let workspace_1 = db.workspace_for_roots::<String>(&[]);
@ -371,7 +383,7 @@ mod tests {
assert_eq!(workspace_1.workspace_id, WorkspaceId(1));
// Ensure the timestamps are different
sleep(Duration::from_millis(20));
sleep(Duration::from_secs(1));
db.make_new_workspace::<String>(&[]);
// Test pulling another value from recent workspaces
@ -379,7 +391,7 @@ mod tests {
assert_eq!(workspace_2.workspace_id, WorkspaceId(2));
// Ensure the timestamps are different
sleep(Duration::from_millis(20));
sleep(Duration::from_secs(1));
// Test creating a new workspace that doesn't exist already
let workspace_3 = db.workspace_for_roots(&["/tmp", "/tmp2"]);
@ -396,7 +408,7 @@ mod tests {
#[test]
fn test_empty_worktrees() {
let db = Db::open_in_memory();
let db = Db::open_in_memory("test_empty_worktrees");
assert_eq!(None, db.workspace_id::<String>(&[]));
@ -404,7 +416,6 @@ mod tests {
db.make_new_workspace::<String>(&[]); //ID 2
db.update_worktrees(&WorkspaceId(1), &["/tmp", "/tmp2"]);
db.write_to("test.db").unwrap();
// Sanity check
assert_eq!(db.workspace_id(&["/tmp", "/tmp2"]), Some(WorkspaceId(1)));
@ -436,7 +447,7 @@ mod tests {
(WorkspaceId(7), vec!["/tmp2"]),
];
let db = Db::open_in_memory();
let db = Db::open_in_memory("test_more_workspace_ids");
for (workspace_id, entries) in data {
db.make_new_workspace::<String>(&[]);
@ -470,7 +481,7 @@ mod tests {
(WorkspaceId(3), vec!["/tmp", "/tmp2", "/tmp3"]),
];
let db = Db::open_in_memory();
let db = Db::open_in_memory("test_detect_workspace_id");
for (workspace_id, entries) in data {
db.make_new_workspace::<String>(&[]);
@ -511,7 +522,7 @@ mod tests {
(WorkspaceId(3), vec!["/tmp2", "/tmp3"]),
];
let db = Db::open_in_memory();
let db = Db::open_in_memory("test_tricky_overlapping_update");
// Load in the test data
for (workspace_id, entries) in data {
@ -519,6 +530,7 @@ mod tests {
db.update_worktrees(workspace_id, entries);
}
sleep(Duration::from_secs(1));
// Execute the update
db.update_worktrees(&WorkspaceId(2), &["/tmp2", "/tmp3"]);

View File

@ -32,6 +32,9 @@ impl Connection {
0 as *const _,
);
// Turn on extended error codes
sqlite3_extended_result_codes(connection.sqlite3, 1);
connection.last_error()?;
}
@ -71,6 +74,7 @@ impl Connection {
0 as *mut _,
0 as *mut _,
);
sqlite3_errcode(self.sqlite3);
self.last_error()?;
}
Ok(())
@ -95,29 +99,7 @@ impl Connection {
}
pub(crate) fn last_error(&self) -> Result<()> {
const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW];
unsafe {
let code = sqlite3_errcode(self.sqlite3);
if NON_ERROR_CODES.contains(&code) {
return Ok(());
}
let message = sqlite3_errmsg(self.sqlite3);
let message = if message.is_null() {
None
} else {
Some(
String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes())
.into_owned(),
)
};
Err(anyhow!(
"Sqlite call failed with code {} and message: {:?}",
code as isize,
message
))
}
unsafe { error_to_result(sqlite3_errcode(self.sqlite3)) }
}
}
@ -127,12 +109,37 @@ impl Drop for Connection {
}
}
pub(crate) fn error_to_result(code: std::os::raw::c_int) -> Result<()> {
const NON_ERROR_CODES: &[i32] = &[SQLITE_OK, SQLITE_ROW];
unsafe {
if NON_ERROR_CODES.contains(&code) {
return Ok(());
}
let message = sqlite3_errstr(code);
let message = if message.is_null() {
None
} else {
Some(
String::from_utf8_lossy(CStr::from_ptr(message as *const _).to_bytes())
.into_owned(),
)
};
Err(anyhow!(
"Sqlite call failed with code {} and message: {:?}",
code as isize,
message
))
}
}
#[cfg(test)]
mod test {
use anyhow::Result;
use indoc::indoc;
use crate::connection::Connection;
use crate::{connection::Connection, migrations::Migration};
#[test]
fn string_round_trips() -> Result<()> {
@ -234,4 +241,34 @@ mod test {
.unwrap();
assert_eq!(read_blobs, vec![blob]);
}
#[test]
fn test_kv_store() -> anyhow::Result<()> {
let connection = Connection::open_memory("kv_store");
Migration::new(
"kv",
&["CREATE TABLE kv_store(
key TEXT PRIMARY KEY,
value TEXT NOT NULL
) STRICT;"],
)
.run(&connection)
.unwrap();
let mut stmt = connection.prepare("INSERT INTO kv_store(key, value) VALUES(?, ?)")?;
stmt.bind_text(1, "a").unwrap();
stmt.bind_text(2, "b").unwrap();
stmt.exec().unwrap();
let id = connection.last_insert_id();
let res = connection
.prepare("SELECT key, value FROM kv_store WHERE rowid = ?")?
.with_bindings(id)?
.row::<(String, String)>()?;
assert_eq!(res, ("a".to_string(), "b".to_string()));
Ok(())
}
}

View File

@ -6,9 +6,9 @@ impl Connection {
// Run a set of commands within the context of a `SAVEPOINT name`. If the callback
// returns Err(_), the savepoint will be rolled back. Otherwise, the save
// point is released.
pub fn with_savepoint<R, F>(&mut self, name: impl AsRef<str>, f: F) -> Result<R>
pub fn with_savepoint<R, F>(&self, name: impl AsRef<str>, f: F) -> Result<R>
where
F: FnOnce(&mut Connection) -> Result<R>,
F: FnOnce(&Connection) -> Result<R>,
{
let name = name.as_ref().to_owned();
self.exec(format!("SAVEPOINT {}", &name))?;
@ -28,13 +28,9 @@ impl Connection {
// Run a set of commands within the context of a `SAVEPOINT name`. If the callback
// returns Ok(None) or Err(_), the savepoint will be rolled back. Otherwise, the save
// point is released.
pub fn with_savepoint_rollback<R, F>(
&mut self,
name: impl AsRef<str>,
f: F,
) -> Result<Option<R>>
pub fn with_savepoint_rollback<R, F>(&self, name: impl AsRef<str>, f: F) -> Result<Option<R>>
where
F: FnOnce(&mut Connection) -> Result<Option<R>>,
F: FnOnce(&Connection) -> Result<Option<R>>,
{
let name = name.as_ref().to_owned();
self.exec(format!("SAVEPOINT {}", &name))?;
@ -60,7 +56,7 @@ mod tests {
#[test]
fn test_nested_savepoints() -> Result<()> {
let mut connection = Connection::open_memory("nested_savepoints");
let connection = Connection::open_memory("nested_savepoints");
connection
.exec(indoc! {"

View File

@ -6,7 +6,7 @@ use anyhow::{anyhow, Context, Result};
use libsqlite3_sys::*;
use crate::bindable::{Bind, Column};
use crate::connection::Connection;
use crate::connection::{error_to_result, Connection};
pub struct Statement<'a> {
raw_statement: *mut sqlite3_stmt,
@ -65,6 +65,7 @@ impl<'a> Statement<'a> {
}
pub fn bind_blob(&self, index: i32, blob: &[u8]) -> Result<()> {
// dbg!("bind blob", index);
let index = index as c_int;
let blob_pointer = blob.as_ptr() as *const _;
let len = blob.len() as c_int;
@ -94,6 +95,7 @@ impl<'a> Statement<'a> {
}
pub fn bind_double(&self, index: i32, double: f64) -> Result<()> {
// dbg!("bind double", index);
let index = index as c_int;
unsafe {
@ -110,6 +112,7 @@ impl<'a> Statement<'a> {
}
pub fn bind_int(&self, index: i32, int: i32) -> Result<()> {
// dbg!("bind int", index);
let index = index as c_int;
unsafe {
@ -126,6 +129,7 @@ impl<'a> Statement<'a> {
}
pub fn bind_int64(&self, index: i32, int: i64) -> Result<()> {
// dbg!("bind int64", index);
let index = index as c_int;
unsafe {
sqlite3_bind_int64(self.raw_statement, index, int);
@ -141,6 +145,7 @@ impl<'a> Statement<'a> {
}
pub fn bind_null(&self, index: i32) -> Result<()> {
// dbg!("bind null", index);
let index = index as c_int;
unsafe {
sqlite3_bind_null(self.raw_statement, index);
@ -149,11 +154,12 @@ impl<'a> Statement<'a> {
}
pub fn bind_text(&self, index: i32, text: &str) -> Result<()> {
// dbg!("bind text", index, text);
let index = index as c_int;
let text_pointer = text.as_ptr() as *const _;
let len = text.len() as c_int;
unsafe {
sqlite3_bind_blob(
sqlite3_bind_text(
self.raw_statement,
index,
text_pointer,
@ -304,10 +310,8 @@ impl<'a> Statement<'a> {
impl<'a> Drop for Statement<'a> {
fn drop(&mut self) {
unsafe {
sqlite3_finalize(self.raw_statement);
self.connection
.last_error()
.expect("sqlite3 finalize failed for statement :(");
let error = sqlite3_finalize(self.raw_statement);
error_to_result(error).expect("failed error");
};
}
}

View File

@ -204,6 +204,13 @@ impl<T: Rng> Iterator for RandomCharIter<T> {
}
}
#[macro_export]
macro_rules! iife {
($block:block) => {
(|| $block)()
};
}
#[cfg(test)]
mod tests {
use super::*;
@ -221,4 +228,18 @@ mod tests {
extend_sorted(&mut vec, vec![1000, 19, 17, 9, 5], 8, |a, b| b.cmp(a));
assert_eq!(vec, &[1000, 101, 21, 19, 17, 13, 9, 8]);
}
#[test]
fn test_iife() {
fn option_returning_function() -> Option<()> {
None
}
let foo = iife!({
option_returning_function()?;
Some(())
});
assert_eq!(foo, None);
}
}