diff --git a/crates/db/Cargo.toml b/crates/db/Cargo.toml index 69c90e02f9..8e12b06027 100644 --- a/crates/db/Cargo.toml +++ b/crates/db/Cargo.toml @@ -27,5 +27,5 @@ smol = "1.2" [dev-dependencies] gpui = { path = "../gpui", features = ["test-support"] } +env_logger = "0.9.1" tempdir = { version = "0.3.7" } -env_logger = "0.9.1" \ No newline at end of file diff --git a/crates/db/src/db.rs b/crates/db/src/db.rs index 6c6688b0d1..7b214cb3be 100644 --- a/crates/db/src/db.rs +++ b/crates/db/src/db.rs @@ -40,7 +40,7 @@ lazy_static::lazy_static! { static ref DB_FILE_OPERATIONS: Mutex<()> = Mutex::new(()); static ref DB_WIPED: RwLock = RwLock::new(false); pub static ref BACKUP_DB_PATH: RwLock> = RwLock::new(None); - pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); + pub static ref ALL_FILE_DB_FAILED: AtomicBool = AtomicBool::new(false); } /// Open or create a database at the given directory path. @@ -58,7 +58,6 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas let mut db_wiped = DB_WIPED.write(); if !*db_wiped { remove_dir_all(&main_db_dir).ok(); - *db_wiped = true; } } @@ -71,7 +70,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas // cause errors in the log and so should be observed by developers while writing // soon-to-be good migrations. If user databases are corrupted, we toss them out // and try again from a blank. As long as running all migrations from start to end - // is ok, this race condition will never be triggered. + // on a blank database is ok, this race condition will never be triggered. // // Basically: Don't ever push invalid migrations to stable or everyone will have // a bad time. @@ -137,7 +136,7 @@ pub async fn open_db(wipe_db: bool, db_dir: &Path, releas } async fn open_main_db(db_path: &PathBuf) -> Option> { - println!("Opening main db"); + log::info!("Opening main db"); ThreadSafeConnection::::builder(db_path.to_string_lossy().as_ref(), true) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) @@ -147,7 +146,7 @@ async fn open_main_db(db_path: &PathBuf) -> Option() -> ThreadSafeConnection { - println!("Opening fallback db"); + log::info!("Opening fallback db"); ThreadSafeConnection::::builder(FALLBACK_DB_NAME, false) .with_db_initialization_query(DB_INITIALIZE_QUERY) .with_connection_initialize_query(CONNECTION_INITIALIZE_QUERY) diff --git a/crates/sqlez/Cargo.toml b/crates/sqlez/Cargo.toml index 8a7f1ba415..c6c018b924 100644 --- a/crates/sqlez/Cargo.toml +++ b/crates/sqlez/Cargo.toml @@ -13,4 +13,4 @@ smol = "1.2" thread_local = "1.1.4" lazy_static = "1.4" parking_lot = "0.11.1" -futures = "0.3" \ No newline at end of file +futures = "0.3" diff --git a/crates/sqlez/src/thread_safe_connection.rs b/crates/sqlez/src/thread_safe_connection.rs index 77ba3406a2..7b89827979 100644 --- a/crates/sqlez/src/thread_safe_connection.rs +++ b/crates/sqlez/src/thread_safe_connection.rs @@ -266,12 +266,10 @@ pub fn background_thread_queue() -> WriteQueueConstructor { pub fn locking_queue() -> WriteQueueConstructor { Box::new(|| { - let mutex = Mutex::new(()); + let write_mutex = Mutex::new(()); Box::new(move |queued_write| { - eprintln!("Write started"); - let _ = mutex.lock(); + let _lock = write_mutex.lock(); queued_write(); - eprintln!("Write finished"); }) }) } diff --git a/crates/sqlez_macros/src/sqlez_macros.rs b/crates/sqlez_macros/src/sqlez_macros.rs index c937e704ae..429f45db7e 100644 --- a/crates/sqlez_macros/src/sqlez_macros.rs +++ b/crates/sqlez_macros/src/sqlez_macros.rs @@ -10,9 +10,37 @@ lazy_static::lazy_static! { #[proc_macro] pub fn sql(tokens: TokenStream) -> TokenStream { + let (spans, sql) = make_sql(tokens); + + let error = SQLITE.sql_has_syntax_error(sql.trim()); + let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); + + if let Some((error, error_offset)) = error { + create_error(spans, error_offset, error, &formatted_sql) + } else { + format!("r#\"{}\"#", &formatted_sql).parse().unwrap() + } +} + +fn create_error( + spans: Vec<(usize, Span)>, + error_offset: usize, + error: String, + formatted_sql: &String, +) -> TokenStream { + let error_span = spans + .into_iter() + .skip_while(|(offset, _)| offset <= &error_offset) + .map(|(_, span)| span) + .next() + .unwrap_or(Span::call_site()); + let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); + TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) +} + +fn make_sql(tokens: TokenStream) -> (Vec<(usize, Span)>, String) { let mut sql_tokens = vec![]; flatten_stream(tokens.clone(), &mut sql_tokens); - // Lookup of spans by offset at the end of the token let mut spans: Vec<(usize, Span)> = Vec::new(); let mut sql = String::new(); @@ -20,23 +48,7 @@ pub fn sql(tokens: TokenStream) -> TokenStream { sql.push_str(&token_text); spans.push((sql.len(), span)); } - - let error = SQLITE.sql_has_syntax_error(sql.trim()); - let formatted_sql = sqlformat::format(&sql, &sqlformat::QueryParams::None, Default::default()); - - if let Some((error, error_offset)) = error { - let error_span = spans - .into_iter() - .skip_while(|(offset, _)| offset <= &error_offset) - .map(|(_, span)| span) - .next() - .unwrap_or(Span::call_site()); - - let error_text = format!("Sql Error: {}\nFor Query: {}", error, formatted_sql); - TokenStream::from(Error::new(error_span.into(), error_text).into_compile_error()) - } else { - format!("r#\"{}\"#", &formatted_sql).parse().unwrap() - } + (spans, sql) } /// This method exists to normalize the representation of groups diff --git a/crates/workspace/Cargo.toml b/crates/workspace/Cargo.toml index b67ccdeeb7..917f821e4a 100644 --- a/crates/workspace/Cargo.toml +++ b/crates/workspace/Cargo.toml @@ -54,3 +54,4 @@ gpui = { path = "../gpui", features = ["test-support"] } project = { path = "../project", features = ["test-support"] } settings = { path = "../settings", features = ["test-support"] } fs = { path = "../fs", features = ["test-support"] } +db = { path = "../db", features = ["test-support"] } \ No newline at end of file