Merge pull request #3747 from gitbutlerapp/remove-duplicate-code-and-refactor

remove deltas calculation on file change
This commit is contained in:
Kiril Videlov 2024-05-12 03:07:42 +02:00 committed by GitHub
commit 1ba7bcedca
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 3 additions and 1493 deletions

View File

@ -3,7 +3,7 @@ use std::sync::Arc;
use anyhow::{Context, Result};
use futures::executor::block_on;
use gitbutler_core::projects::{self, Project, ProjectId};
use gitbutler_core::{assets, deltas, sessions, users, virtual_branches};
use gitbutler_core::{assets, virtual_branches};
use tauri::{AppHandle, Manager};
use tracing::instrument;
@ -113,25 +113,14 @@ pub struct Watchers {
}
fn handler_from_app(app: &AppHandle) -> anyhow::Result<gitbutler_watcher::Handler> {
let app_data_dir = app
.path_resolver()
.app_data_dir()
.context("failed to get app data dir")?;
let users = app.state::<users::Controller>().inner().clone();
let projects = app.state::<projects::Controller>().inner().clone();
let vbranches = app.state::<virtual_branches::Controller>().inner().clone();
let assets_proxy = app.state::<assets::Proxy>().inner().clone();
let sessions_db = app.state::<sessions::Database>().inner().clone();
let deltas_db = app.state::<deltas::Database>().inner().clone();
Ok(gitbutler_watcher::Handler::new(
app_data_dir.clone(),
users,
projects,
vbranches,
assets_proxy,
sessions_db,
deltas_db,
{
let app = app.clone();
move |change| ChangeForFrontend::from(change).send(&app)

View File

@ -1,179 +0,0 @@
use crate::Change;
use anyhow::{Context, Result};
use gitbutler_core::{
deltas, gb_repository, project_repository, projects::ProjectId, reader, sessions,
};
use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
use tracing::instrument;
impl super::Handler {
#[instrument(skip(self, paths, project_id))]
pub fn calculate_deltas(&self, paths: Vec<PathBuf>, project_id: ProjectId) -> Result<()> {
let make_processor = || -> Result<_> {
let project = self
.projects
.get(&project_id)
.context("failed to get project")?;
let project_repository = project_repository::Repository::open(&project)
.with_context(|| "failed to open project repository for project")?;
let user = self.users.get_user().context("failed to get user")?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open gb repository")?;
// If current session's branch is not the same as the project's head, flush it first.
if let Some(session) = gb_repository
.get_current_session()
.context("failed to get current session")?
{
let project_head = project_repository
.get_head()
.context("failed to get head")?;
if session.meta.branch != project_head.name().map(|n| n.to_string()) {
gb_repository
.flush_session(&project_repository, &session, user.as_ref())
.context(format!("failed to flush session {}", session.id))?;
}
}
let current_session = gb_repository
.get_or_create_current_session()
.context("failed to get or create current session")?;
let session = current_session.clone();
let process = move |path: PathBuf| -> Result<bool> {
let _span = tracing::span!(tracing::Level::TRACE, "processing", ?path).entered();
let current_session_reader =
sessions::Reader::open(&gb_repository, &current_session)
.context("failed to get session reader")?;
let deltas_reader = deltas::Reader::new(&current_session_reader);
let writer =
deltas::Writer::new(&gb_repository).context("failed to open deltas writer")?;
let current_wd_file_content = match Self::file_content(&project_repository, &path) {
Ok(content) => Some(content),
Err(reader::Error::NotFound) => None,
Err(err) => Err(err).context("failed to get file content")?,
};
let latest_file_content = match current_session_reader.file(&path) {
Ok(content) => Some(content),
Err(reader::Error::NotFound) => None,
Err(err) => Err(err).context("failed to get file content")?,
};
let current_deltas = deltas_reader
.read_file(&path)
.context("failed to get file deltas")?;
let mut text_doc = deltas::Document::new(
latest_file_content.as_ref(),
current_deltas.unwrap_or_default(),
)?;
let new_delta = text_doc
.update(current_wd_file_content.as_ref())
.context("failed to calculate new deltas")?;
let Some(new_delta) = new_delta else {
return Ok(false);
};
let deltas = text_doc.get_deltas();
writer
.write(&path, &deltas)
.context("failed to write deltas")?;
match &current_wd_file_content {
Some(reader::Content::UTF8(text)) => writer.write_wd_file(&path, text),
Some(_) => writer.write_wd_file(&path, ""),
None => writer.remove_wd_file(&path),
}?;
let session_id = current_session.id;
self.emit_session_file(project_id, session_id, &path, latest_file_content)?;
self.index_deltas(
project_id,
session_id,
&path,
std::slice::from_ref(&new_delta),
)
.context("failed to index deltas")?;
self.emit_app_event(Change::Deltas {
project_id,
session_id,
deltas: vec![new_delta],
relative_file_path: path,
})?;
Ok(true)
};
Ok((process, session))
};
let num_paths = paths.len();
let num_no_delta = std::thread::scope(|scope| -> Result<usize> {
let num_threads = std::thread::available_parallelism()
.unwrap_or(NonZeroUsize::new(1).unwrap())
.get()
.min(paths.len());
let mut num_no_delta = 0;
let current_session = if num_threads < 2 {
let (process, session) = make_processor()?;
for path in paths {
if !process(path)? {
num_no_delta += 1;
}
}
session
} else {
let (threads, tx) = {
let (tx, rx) = crossbeam_channel::bounded::<PathBuf>(num_threads);
let threads: Vec<_> = (0..num_threads)
.map(|id| {
std::thread::Builder::new()
.name(format!("gitbutler_delta_thread_{id}"))
.stack_size(512 * 1024)
.spawn_scoped(scope, {
let rx = rx.clone();
|| -> Result<usize> {
let mut num_no_delta = 0;
let (process, _) = make_processor()?;
for path in rx {
if !process(path)? {
num_no_delta += 1;
}
}
Ok(num_no_delta)
}
})
.expect("worker thread can be created")
})
.collect();
(threads, tx)
};
for path in paths {
tx.send(path).expect("many receivers");
}
drop(tx);
for thread in threads {
num_no_delta += thread.join().unwrap()?;
}
let (_, session) = make_processor()?;
session
};
self.index_session(project_id, current_session)?;
Ok(num_no_delta)
})?;
tracing::debug!(%project_id, paths_without_deltas = num_no_delta, paths_with_delta = num_paths - num_no_delta);
Ok(())
}
fn file_content(
project_repository: &project_repository::Repository,
path: &Path,
) -> Result<reader::Content, reader::Error> {
let full_path = project_repository.project().path.join(path);
if !full_path.exists() {
return Err(reader::Error::NotFound);
}
Ok(reader::Content::read_from_file(&full_path)?)
}
}

View File

@ -1,95 +0,0 @@
use std::path::Path;
use anyhow::{Context, Result};
use gitbutler_core::{
deltas, gb_repository, project_repository,
projects::ProjectId,
sessions::{self, SessionId},
};
use crate::Change;
impl super::Handler {
pub(super) fn index_deltas(
&self,
project_id: ProjectId,
session_id: SessionId,
file_path: &Path,
deltas: &[deltas::Delta],
) -> Result<()> {
self.deltas_db
.insert(&project_id, &session_id, file_path, deltas)
.context("failed to insert deltas into database")
}
pub(crate) fn reindex(&self, project_id: ProjectId) -> Result<()> {
let user = self.users.get_user()?;
let project = self.projects.get(&project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let sessions_iter = gb_repository.get_sessions_iterator()?;
for session in sessions_iter {
self.process_session(&gb_repository, session?)?;
}
Ok(())
}
pub(super) fn index_session(
&self,
project_id: ProjectId,
session: sessions::Session,
) -> Result<()> {
let project = self.projects.get(&project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let user = self.users.get_user()?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
self.process_session(&gb_repository, session)
}
fn process_session(
&self,
gb_repository: &gb_repository::Repository,
session: sessions::Session,
) -> Result<()> {
let project_id = gb_repository.get_project_id();
// now, index session if it has changed to the database.
let from_db = self.sessions_db.get_by_id(&session.id)?;
if from_db.map_or(false, |from_db| from_db == session) {
return Ok(());
}
self.sessions_db
.insert(project_id, &[&session])
.context("failed to insert session into database")?;
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
for (file_path, deltas) in deltas_reader
.read(None)
.context("could not list deltas for session")?
{
self.index_deltas(*project_id, session.id, &file_path, &deltas)?;
}
(self.send_event)(Change::Session {
project_id: *project_id,
session,
})?;
Ok(())
}
}

View File

@ -1,19 +1,12 @@
mod calculate_deltas;
mod index;
use std::path;
use std::path::{Path, PathBuf};
use std::path::PathBuf;
use std::sync::Arc;
use anyhow::{Context, Result};
use gitbutler_core::ops::entry::{OperationType, SnapshotDetails};
use gitbutler_core::ops::oplog::Oplog;
use gitbutler_core::projects::ProjectId;
use gitbutler_core::sessions::SessionId;
use gitbutler_core::virtual_branches::VirtualBranches;
use gitbutler_core::{
assets, deltas, git, project_repository, projects, reader, sessions, users, virtual_branches,
};
use gitbutler_core::{assets, git, project_repository, projects, virtual_branches};
use tracing::instrument;
use super::{events, Change};
@ -28,13 +21,9 @@ pub struct Handler {
// should be, and I can imagine having a top-level `app` handle that keeps the application state of
// the tauri app, assuming that such application would not be `Send + Sync` everywhere and thus would
// need extra protection.
users: users::Controller,
local_data_dir: path::PathBuf,
projects: projects::Controller,
vbranch_controller: virtual_branches::Controller,
assets_proxy: assets::Proxy,
sessions_db: sessions::Database,
deltas_db: deltas::Database,
/// A function to send events - decoupled from app-handle for testing purposes.
#[allow(clippy::type_complexity)]
@ -45,23 +34,15 @@ impl Handler {
/// A constructor whose primary use is the test-suite.
#[allow(clippy::too_many_arguments)]
pub fn new(
local_data_dir: PathBuf,
users: users::Controller,
projects: projects::Controller,
vbranch_controller: virtual_branches::Controller,
assets_proxy: assets::Proxy,
sessions_db: sessions::Database,
deltas_db: deltas::Database,
send_event: impl Fn(Change) -> Result<()> + Send + Sync + 'static,
) -> Self {
Handler {
local_data_dir,
users,
projects,
vbranch_controller,
assets_proxy,
sessions_db,
deltas_db,
send_event: Arc::new(send_event),
}
}
@ -92,21 +73,6 @@ impl Handler {
(self.send_event)(event).context("failed to send event")
}
fn emit_session_file(
&self,
project_id: ProjectId,
session_id: SessionId,
file_path: &Path,
contents: Option<reader::Content>,
) -> Result<()> {
self.emit_app_event(Change::File {
project_id,
session_id,
file_path: file_path.to_owned(),
contents,
})
}
#[instrument(skip(self, project_id))]
async fn calculate_virtual_branches(&self, project_id: ProjectId) -> Result<()> {
match self
@ -135,10 +101,6 @@ impl Handler {
paths: Vec<PathBuf>,
project_id: ProjectId,
) -> Result<()> {
let calc_deltas = tokio::task::spawn_blocking({
let this = self.clone();
move || this.calculate_deltas(paths, project_id)
});
// Create a snapshot every time there are more than a configurable number of new lines of code (default 20)
let handle_snapshots = tokio::task::spawn_blocking({
let this = self.clone();
@ -146,7 +108,6 @@ impl Handler {
});
self.calculate_virtual_branches(project_id).await?;
let _ = handle_snapshots.await;
calc_deltas.await??;
Ok(())
}

View File

@ -75,7 +75,6 @@ pub fn watch_in_background(
let (events_out, mut events_in) = unbounded_channel();
file_monitor::spawn(project_id, path.as_ref(), events_out.clone())?;
handler.reindex(project_id)?;
let cancellation_token = CancellationToken::new();
let handle = WatcherHandle {

View File

@ -1,982 +0,0 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::atomic::{AtomicUsize, Ordering},
};
use anyhow::Result;
use gitbutler_core::projects::ProjectId;
use gitbutler_core::{
deltas::{self, operations::Operation},
reader, sessions,
virtual_branches::{self, branch},
};
use once_cell::sync::Lazy;
use self::branch::BranchId;
use crate::handler::support::Fixture;
use gitbutler_testsupport::{commit_all, Case};
static TEST_TARGET_INDEX: Lazy<AtomicUsize> = Lazy::new(|| AtomicUsize::new(0));
#[derive(Clone)]
pub struct State {
inner: gitbutler_watcher::Handler,
}
impl State {
pub(super) fn from_fixture(fixture: &mut Fixture) -> Self {
Self {
inner: fixture.new_handler(),
}
}
pub(super) fn calculate_delta(
&self,
path: impl Into<PathBuf>,
project_id: ProjectId,
) -> Result<()> {
self.inner.calculate_deltas(vec![path.into()], project_id)?;
Ok(())
}
}
fn new_test_target() -> virtual_branches::target::Target {
virtual_branches::target::Target {
branch: format!(
"refs/remotes/remote name {}/branch name {}",
TEST_TARGET_INDEX.load(Ordering::Relaxed),
TEST_TARGET_INDEX.load(Ordering::Relaxed)
)
.parse()
.unwrap(),
remote_url: format!("remote url {}", TEST_TARGET_INDEX.load(Ordering::Relaxed)),
sha: format!(
"0123456789abcdef0123456789abcdef0123456{}",
TEST_TARGET_INDEX.load(Ordering::Relaxed)
)
.parse()
.unwrap(),
push_remote_name: None,
}
}
static TEST_INDEX: Lazy<AtomicUsize> = Lazy::new(|| AtomicUsize::new(0));
fn new_test_branch() -> branch::Branch {
TEST_INDEX.fetch_add(1, Ordering::Relaxed);
branch::Branch {
id: BranchId::generate(),
name: format!("branch_name_{}", TEST_INDEX.load(Ordering::Relaxed)),
notes: format!("branch_notes_{}", TEST_INDEX.load(Ordering::Relaxed)),
applied: true,
upstream: Some(
format!(
"refs/remotes/origin/upstream_{}",
TEST_INDEX.load(Ordering::Relaxed)
)
.parse()
.unwrap(),
),
upstream_head: None,
created_timestamp_ms: TEST_INDEX.load(Ordering::Relaxed) as u128,
updated_timestamp_ms: (TEST_INDEX.load(Ordering::Relaxed) + 100) as u128,
head: format!(
"0123456789abcdef0123456789abcdef0123456{}",
TEST_INDEX.load(Ordering::Relaxed)
)
.parse()
.unwrap(),
tree: format!(
"0123456789abcdef0123456789abcdef012345{}",
TEST_INDEX.load(Ordering::Relaxed) + 10
)
.parse()
.unwrap(),
ownership: branch::BranchOwnershipClaims::default(),
order: TEST_INDEX.load(Ordering::Relaxed),
selected_for_changes: None,
}
}
#[test]
fn register_existing_commited_file() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "test")]));
std::fs::write(project.path.join("test.txt"), "test2")?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].operations.len(), 1);
assert_eq!(
deltas[0].operations[0],
Operation::Insert((4, "2".to_string())),
);
assert_eq!(
std::fs::read_to_string(gb_repository.session_wd_path().join("test.txt"))?,
"test2"
);
Ok(())
}
#[test]
fn register_must_init_current_session() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.calculate_delta("test.txt", project.id)?;
assert!(gb_repository.get_current_session()?.is_some());
Ok(())
}
#[test]
fn register_must_not_override_current_session() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.calculate_delta("test.txt", project.id)?;
let session1 = gb_repository.get_current_session()?.unwrap();
std::fs::write(project.path.join("test.txt"), "test2")?;
listener.calculate_delta("test.txt", project.id)?;
let session2 = gb_repository.get_current_session()?.unwrap();
assert_eq!(session1.id, session2.id);
Ok(())
}
#[test]
fn register_binfile() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case();
std::fs::write(
project.path.join("test.bin"),
[0, 159, 146, 150, 159, 146, 150],
)?;
listener.calculate_delta("test.bin", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas = deltas_reader.read_file("test.bin")?.unwrap();
assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].operations.len(), 0);
assert_eq!(
std::fs::read_to_string(gb_repository.session_wd_path().join("test.bin"))?,
""
);
Ok(())
}
#[test]
fn register_empty_new_file() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "")?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].operations.len(), 0);
assert_eq!(
std::fs::read_to_string(gb_repository.session_wd_path().join("test.txt"))?,
""
);
Ok(())
}
#[test]
fn register_new_file() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].operations.len(), 1);
assert_eq!(
deltas[0].operations[0],
Operation::Insert((0, "test".to_string())),
);
assert_eq!(
std::fs::read_to_string(gb_repository.session_wd_path().join("test.txt"))?,
"test"
);
Ok(())
}
#[test]
fn register_no_changes_saved_thgoughout_flushes() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project_repository,
project,
..
} = &fixture.new_case();
// file change, wd and deltas are written
std::fs::write(project.path.join("test.txt"), "test")?;
listener.calculate_delta("test.txt", project.id)?;
// make two more sessions.
gb_repository.flush(project_repository, None)?;
gb_repository.get_or_create_current_session()?;
gb_repository.flush(project_repository, None)?;
// after some sessions, files from the first change are still there.
let session = gb_repository.get_or_create_current_session()?;
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let files = session_reader.files(None)?;
assert_eq!(files.len(), 1);
Ok(())
}
#[test]
fn register_new_file_twice() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].operations.len(), 1);
assert_eq!(
deltas[0].operations[0],
Operation::Insert((0, "test".to_string())),
);
assert_eq!(
std::fs::read_to_string(gb_repository.session_wd_path().join("test.txt"))?,
"test"
);
std::fs::write(project.path.join("test.txt"), "test2")?;
listener.calculate_delta("test.txt", project.id)?;
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 2);
assert_eq!(deltas[0].operations.len(), 1);
assert_eq!(
deltas[0].operations[0],
Operation::Insert((0, "test".to_string())),
);
assert_eq!(deltas[1].operations.len(), 1);
assert_eq!(
deltas[1].operations[0],
Operation::Insert((4, "2".to_string())),
);
assert_eq!(
std::fs::read_to_string(gb_repository.session_wd_path().join("test.txt"))?,
"test2"
);
Ok(())
}
#[test]
fn register_file_deleted() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project_repository,
project,
..
} = &fixture.new_case();
{
// write file
std::fs::write(project.path.join("test.txt"), "test")?;
listener.calculate_delta("test.txt", project.id)?;
}
{
// current session must have the deltas, but not the file (it didn't exist)
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].operations.len(), 1);
assert_eq!(
deltas[0].operations[0],
Operation::Insert((0, "test".to_string())),
);
assert_eq!(
std::fs::read_to_string(gb_repository.session_wd_path().join("test.txt"))?,
"test"
);
let files = session_reader.files(None).unwrap();
assert!(files.is_empty());
}
gb_repository.flush(project_repository, None)?;
{
// file should be available in the next session, but not deltas just yet.
let session = gb_repository.get_or_create_current_session()?;
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let files = session_reader.files(None).unwrap();
assert_eq!(files.len(), 1);
assert_eq!(
files[Path::new("test.txt")],
reader::Content::UTF8("test".to_string())
);
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas = deltas_reader.read(None)?;
assert!(deltas.is_empty());
// removing the file
std::fs::remove_file(project.path.join("test.txt"))?;
listener.calculate_delta("test.txt", project.id)?;
// deltas are recorded
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 1);
assert_eq!(deltas[0].operations.len(), 1);
assert_eq!(deltas[0].operations[0], Operation::Delete((0, 4)),);
}
gb_repository.flush(project_repository, None)?;
{
// since file was deleted in the previous session, it should not exist in the new one.
let session = gb_repository.get_or_create_current_session()?;
let session_reader = sessions::Reader::open(gb_repository, &session)?;
let files = session_reader.files(None).unwrap();
assert!(files.is_empty());
}
Ok(())
}
#[test]
fn flow_with_commits() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &fixture.new_case();
let size = 10;
let relative_file_path = Path::new("one/two/test.txt");
for i in 1..=size {
std::fs::create_dir_all(Path::new(&project.path).join("one/two"))?;
// create a session with a single file change and flush it
std::fs::write(
Path::new(&project.path).join(relative_file_path),
i.to_string(),
)?;
commit_all(&project_repository.git_repository);
listener.calculate_delta(relative_file_path, project.id)?;
assert!(gb_repository.flush(project_repository, None)?.is_some());
}
// get all the created sessions
let mut sessions: Vec<sessions::Session> = gb_repository
.get_sessions_iterator()?
.map(Result::unwrap)
.collect();
assert_eq!(sessions.len(), size);
// verify sessions order is correct
let mut last_start = sessions[0].meta.start_timestamp_ms;
let mut last_end = sessions[0].meta.start_timestamp_ms;
sessions[1..].iter().for_each(|session| {
assert!(session.meta.start_timestamp_ms < last_start);
assert!(session.meta.last_timestamp_ms < last_end);
last_start = session.meta.start_timestamp_ms;
last_end = session.meta.last_timestamp_ms;
});
sessions.reverse();
// try to reconstruct file state from operations for every session slice
for i in 0..sessions.len() {
let sessions_slice = &mut sessions[i..];
// collect all operations from sessions in the reverse order
let mut operations: Vec<Operation> = vec![];
for session in &mut *sessions_slice {
let session_reader = sessions::Reader::open(gb_repository, session).unwrap();
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas_by_filepath = deltas_reader.read(None).unwrap();
for deltas in deltas_by_filepath.values() {
for delta in deltas {
delta.operations.iter().for_each(|operation| {
operations.push(operation.clone());
});
}
}
}
let reader =
sessions::Reader::open(gb_repository, sessions_slice.first().unwrap()).unwrap();
let files = reader.files(None).unwrap();
if i == 0 {
assert_eq!(files.len(), 0);
} else {
assert_eq!(files.len(), 1);
}
let base_file = files.get(&relative_file_path.to_path_buf());
let mut text: Vec<char> = match base_file {
Some(reader::Content::UTF8(file)) => file.chars().collect(),
_ => vec![],
};
for operation in operations {
operation.apply(&mut text).unwrap();
}
assert_eq!(text.iter().collect::<String>(), size.to_string());
}
Ok(())
}
#[test]
fn flow_no_commits() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &fixture.new_case();
let size = 10;
let relative_file_path = Path::new("one/two/test.txt");
for i in 1..=size {
std::fs::create_dir_all(Path::new(&project.path).join("one/two"))?;
// create a session with a single file change and flush it
std::fs::write(
Path::new(&project.path).join(relative_file_path),
i.to_string(),
)?;
listener.calculate_delta(relative_file_path, project.id)?;
assert!(gb_repository.flush(project_repository, None)?.is_some());
}
// get all the created sessions
let mut sessions: Vec<sessions::Session> = gb_repository
.get_sessions_iterator()?
.map(Result::unwrap)
.collect();
assert_eq!(sessions.len(), size);
// verify sessions order is correct
let mut last_start = sessions[0].meta.start_timestamp_ms;
let mut last_end = sessions[0].meta.start_timestamp_ms;
sessions[1..].iter().for_each(|session| {
assert!(session.meta.start_timestamp_ms < last_start);
assert!(session.meta.last_timestamp_ms < last_end);
last_start = session.meta.start_timestamp_ms;
last_end = session.meta.last_timestamp_ms;
});
sessions.reverse();
// try to reconstruct file state from operations for every session slice
for i in 0..sessions.len() {
let sessions_slice = &mut sessions[i..];
// collect all operations from sessions in the reverse order
let mut operations: Vec<Operation> = vec![];
for session in &mut *sessions_slice {
let session_reader = sessions::Reader::open(gb_repository, session).unwrap();
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas_by_filepath = deltas_reader.read(None).unwrap();
for deltas in deltas_by_filepath.values() {
for delta in deltas {
delta.operations.iter().for_each(|operation| {
operations.push(operation.clone());
});
}
}
}
let reader =
sessions::Reader::open(gb_repository, sessions_slice.first().unwrap()).unwrap();
let files = reader.files(None).unwrap();
if i == 0 {
assert_eq!(files.len(), 0);
} else {
assert_eq!(files.len(), 1);
}
let base_file = files.get(&relative_file_path.to_path_buf());
let mut text: Vec<char> = match base_file {
Some(reader::Content::UTF8(file)) => file.chars().collect(),
_ => vec![],
};
for operation in operations {
operation.apply(&mut text).unwrap();
}
assert_eq!(text.iter().collect::<String>(), size.to_string());
}
Ok(())
}
#[test]
fn flow_signle_session() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &fixture.new_case();
let size = 10_i32;
let relative_file_path = Path::new("one/two/test.txt");
for i in 1_i32..=size {
std::fs::create_dir_all(Path::new(&project.path).join("one/two"))?;
// create a session with a single file change and flush it
std::fs::write(
Path::new(&project.path).join(relative_file_path),
i.to_string(),
)?;
listener.calculate_delta(relative_file_path, project.id)?;
}
// collect all operations from sessions in the reverse order
let mut operations: Vec<Operation> = vec![];
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session).unwrap();
let deltas_reader = deltas::Reader::new(&session_reader);
let deltas_by_filepath = deltas_reader.read(None).unwrap();
for deltas in deltas_by_filepath.values() {
for delta in deltas {
delta.operations.iter().for_each(|operation| {
operations.push(operation.clone());
});
}
}
let reader = sessions::Reader::open(gb_repository, &session).unwrap();
let files = reader.files(None).unwrap();
let base_file = files.get(&relative_file_path.to_path_buf());
let mut text: Vec<char> = match base_file {
Some(reader::Content::UTF8(file)) => file.chars().collect(),
_ => vec![],
};
for operation in operations {
operation.apply(&mut text).unwrap();
}
assert_eq!(text.iter().collect::<String>(), size.to_string());
Ok(())
}
#[test]
fn should_persist_branches_targets_state_between_sessions() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
project,
project_repository,
..
} = &fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "hello world")]));
let vb_state = project.virtual_branches();
let default_target = new_test_target();
vb_state.set_default_target(default_target.clone())?;
let vbranch0 = new_test_branch();
vb_state.set_branch(vbranch0.clone())?;
let vbranch1 = new_test_branch();
let vbranch1_target = new_test_target();
vb_state.set_branch(vbranch1.clone())?;
vb_state.set_branch_target(vbranch1.id, vbranch1_target.clone())?;
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.calculate_delta("test.txt", project.id)?;
let vb_state = project_repository.project().virtual_branches();
let branches = vb_state.list_branches().unwrap();
assert_eq!(branches.len(), 2);
let branch_ids = branches.iter().map(|b| b.id).collect::<Vec<_>>();
assert!(branch_ids.contains(&vbranch0.id));
assert!(branch_ids.contains(&vbranch1.id));
assert_eq!(vb_state.get_default_target().unwrap(), default_target);
assert_eq!(
vb_state.get_branch_target(&vbranch0.id).unwrap(),
default_target
);
assert_eq!(
vb_state.get_branch_target(&vbranch1.id).unwrap(),
vbranch1_target
);
Ok(())
}
#[test]
fn should_restore_branches_targets_state_from_head_session() -> Result<()> {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case { project, .. } =
&fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "hello world")]));
let vb_state = project.virtual_branches();
let default_target = new_test_target();
vb_state.set_default_target(default_target.clone())?;
let vbranch0 = new_test_branch();
vb_state.set_branch(vbranch0.clone())?;
let vbranch1 = new_test_branch();
let vbranch1_target = new_test_target();
vb_state.set_branch(vbranch1.clone())?;
vb_state.set_branch_target(vbranch1.id, vbranch1_target.clone())?;
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
let branches = vb_state.list_branches().unwrap();
assert_eq!(branches.len(), 2);
let branch_ids = branches.iter().map(|b| b.id).collect::<Vec<_>>();
assert!(branch_ids.contains(&vbranch0.id));
assert!(branch_ids.contains(&vbranch1.id));
assert_eq!(vb_state.get_default_target().unwrap(), default_target);
assert_eq!(
vb_state.get_branch_target(&vbranch0.id).unwrap(),
default_target
);
assert_eq!(
vb_state.get_branch_target(&vbranch1.id).unwrap(),
vbranch1_target
);
Ok(())
}
mod flush_wd {
use super::*;
#[test]
fn should_add_new_files_to_session_wd() {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &fixture.new_case();
// write a file into session
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
.unwrap()
.unwrap();
{
// after flush it should be flushed into the commit
let session_commit = gb_repository
.git_repository()
.find_commit(flushed_session.hash.unwrap())
.unwrap();
let commit_reader =
reader::Reader::from_commit(gb_repository.git_repository(), &session_commit)
.unwrap();
assert_eq!(
commit_reader.list_files(Path::new("wd")).unwrap(),
vec![Path::new("test.txt")]
);
assert_eq!(
commit_reader.read(Path::new("wd/test.txt")).unwrap(),
reader::Content::UTF8("hello world!".to_string())
);
}
// write another file into session
std::fs::create_dir_all(project.path.join("one/two")).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!").unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
.unwrap()
.unwrap();
{
// after flush, it should be flushed into the commit next to the previous one
let session_commit = gb_repository
.git_repository()
.find_commit(flushed_session.hash.unwrap())
.unwrap();
let commit_reader =
reader::Reader::from_commit(gb_repository.git_repository(), &session_commit)
.unwrap();
assert_eq!(
commit_reader.list_files(Path::new("wd")).unwrap(),
vec![Path::new("one/two/test2.txt"), Path::new("test.txt"),]
);
assert_eq!(
commit_reader.read(Path::new("wd/test.txt")).unwrap(),
reader::Content::UTF8("hello world!".to_string())
);
assert_eq!(
commit_reader
.read(Path::new("wd/one/two/test2.txt"))
.unwrap(),
reader::Content::UTF8("hello world!".to_string())
);
}
}
#[test]
fn should_remove_deleted_files_from_session_wd() {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &fixture.new_case();
// write a file into session
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::create_dir_all(project.path.join("one/two")).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!").unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
.unwrap()
.unwrap();
{
// after flush it should be flushed into the commit
let session_commit = gb_repository
.git_repository()
.find_commit(flushed_session.hash.unwrap())
.unwrap();
let commit_reader =
reader::Reader::from_commit(gb_repository.git_repository(), &session_commit)
.unwrap();
assert_eq!(
commit_reader.list_files(Path::new("wd")).unwrap(),
vec![Path::new("one/two/test2.txt"), Path::new("test.txt"),]
);
assert_eq!(
commit_reader.read(Path::new("wd/test.txt")).unwrap(),
reader::Content::UTF8("hello world!".to_string())
);
assert_eq!(
commit_reader
.read(Path::new("wd/one/two/test2.txt"))
.unwrap(),
reader::Content::UTF8("hello world!".to_string())
);
}
// rm the files
std::fs::remove_file(project.path.join("test.txt")).unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::remove_file(project.path.join("one/two/test2.txt")).unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
.unwrap()
.unwrap();
{
// after flush it should be removed from the commit
let session_commit = gb_repository
.git_repository()
.find_commit(flushed_session.hash.unwrap())
.unwrap();
let commit_reader =
reader::Reader::from_commit(gb_repository.git_repository(), &session_commit)
.unwrap();
assert!(commit_reader
.list_files(Path::new("wd"))
.unwrap()
.is_empty());
}
}
#[test]
fn should_update_updated_files_in_session_wd() {
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &fixture.new_case();
// write a file into session
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::create_dir_all(project.path.join("one/two")).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!").unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
.unwrap()
.unwrap();
{
// after flush it should be flushed into the commit
let session_commit = gb_repository
.git_repository()
.find_commit(flushed_session.hash.unwrap())
.unwrap();
let commit_reader =
reader::Reader::from_commit(gb_repository.git_repository(), &session_commit)
.unwrap();
assert_eq!(
commit_reader.list_files(Path::new("wd")).unwrap(),
vec![Path::new("one/two/test2.txt"), Path::new("test.txt"),]
);
assert_eq!(
commit_reader.read(Path::new("wd/test.txt")).unwrap(),
reader::Content::UTF8("hello world!".to_string())
);
assert_eq!(
commit_reader
.read(Path::new("wd/one/two/test2.txt"))
.unwrap(),
reader::Content::UTF8("hello world!".to_string())
);
}
// update the file
std::fs::write(project.path.join("test.txt"), "hello world!2").unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!2").unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
.unwrap()
.unwrap();
{
// after flush it should be updated in the commit
let session_commit = gb_repository
.git_repository()
.find_commit(flushed_session.hash.unwrap())
.unwrap();
let commit_reader =
reader::Reader::from_commit(gb_repository.git_repository(), &session_commit)
.unwrap();
assert_eq!(
commit_reader.list_files(Path::new("wd")).unwrap(),
vec![Path::new("one/two/test2.txt"), Path::new("test.txt"),]
);
assert_eq!(
commit_reader.read(Path::new("wd/test.txt")).unwrap(),
reader::Content::UTF8("hello world!2".to_string())
);
assert_eq!(
commit_reader
.read(Path::new("wd/one/two/test2.txt"))
.unwrap(),
reader::Content::UTF8("hello world!2".to_string())
);
}
}
}

View File

@ -1,93 +0,0 @@
use std::fs;
use anyhow::Result;
use gitbutler_core::projects;
use crate::handler::support::Fixture;
use gitbutler_testsupport::Case;
use gitbutler_watcher::Change;
#[tokio::test]
async fn flush_session() -> Result<()> {
let mut fixture = Fixture::default();
{
let case = fixture.new_case();
let Case {
project,
gb_repository,
..
} = &case;
assert!(gb_repository.get_current_session()?.is_none());
let handler = create_new_session_via_new_file(project, &mut fixture);
assert!(gb_repository.get_current_session()?.is_some());
let flush_file_path = project.path.join(".git/GB_FLUSH");
fs::write(flush_file_path.as_path(), "")?;
handler.git_file_change("GB_FLUSH", project.id).await?;
assert!(!flush_file_path.exists(), "flush file deleted");
}
let events = fixture.events();
assert_eq!(events.len(), 4);
assert!(matches!(events[0], Change::File { .. }));
assert!(matches!(events[1], Change::Deltas { .. }));
assert!(matches!(events[2], Change::Session { .. }));
assert!(matches!(events[3], Change::Session { .. }));
Ok(())
}
#[tokio::test]
async fn do_not_flush_session_if_file_is_missing() -> Result<()> {
let mut fixture = Fixture::default();
{
let Case {
project,
gb_repository,
..
} = &fixture.new_case();
assert!(gb_repository.get_current_session()?.is_none());
let handler = create_new_session_via_new_file(project, &mut fixture);
assert!(gb_repository.get_current_session()?.is_some());
handler.git_file_change("GB_FLUSH", project.id).await?;
}
let events = fixture.events();
assert_eq!(events.len(), 3);
assert!(matches!(events[0], Change::File { .. }));
assert!(matches!(events[1], Change::Deltas { .. }));
assert!(matches!(events[2], Change::Session { .. }));
Ok(())
}
#[tokio::test]
async fn flush_deletes_flush_file_without_session_to_flush() -> Result<()> {
let mut fixture = Fixture::default();
{
let handler = fixture.new_handler();
let Case { project, .. } = &fixture.new_case();
let flush_file_path = project.path.join(".git/GB_FLUSH");
fs::write(flush_file_path.as_path(), "")?;
handler.git_file_change("GB_FLUSH", project.id).await?;
assert!(!flush_file_path.exists(), "flush file deleted");
}
assert_eq!(fixture.events().len(), 0);
Ok(())
}
fn create_new_session_via_new_file(
project: &projects::Project,
fixture: &mut Fixture,
) -> gitbutler_watcher::Handler {
fs::write(project.path.join("test.txt"), "test").unwrap();
let handler = fixture.new_handler();
handler
.calculate_deltas(vec!["test.txt".into()], project.id)
.unwrap();
handler
}

View File

@ -1,89 +0,0 @@
mod support {
use gitbutler_core::{assets, deltas, git, sessions, virtual_branches};
use tempfile::TempDir;
/// Like [`gitbutler_testsupport::Suite`], but with all the instances needed to build a handler
pub struct Fixture {
inner: gitbutler_testsupport::Suite,
pub sessions_db: sessions::Database,
pub deltas_db: deltas::Database,
pub vbranch_controller: virtual_branches::Controller,
pub assets_proxy: assets::Proxy,
/// Keeps changes emitted from the last created handler.
changes: Option<std::sync::mpsc::Receiver<gitbutler_watcher::Change>>,
/// Storage for the databases, to be dropped last.
_tmp: TempDir,
}
impl std::ops::Deref for Fixture {
type Target = gitbutler_testsupport::Suite;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl Default for Fixture {
fn default() -> Self {
let (db, tmp) = gitbutler_testsupport::test_database();
let inner = gitbutler_testsupport::Suite::default();
let sessions_db = sessions::Database::new(db.clone());
let deltas_db = deltas::Database::new(db);
let git_credentials_helper =
git::credentials::Helper::new(inner.keys.clone(), inner.users.clone(), None);
let vbranch_controller = virtual_branches::Controller::new(
inner.projects.clone(),
inner.users.clone(),
inner.keys.clone(),
git_credentials_helper,
);
let assets_proxy = assets::Proxy::new(tmp.path().to_owned());
Fixture {
inner,
sessions_db,
deltas_db,
vbranch_controller,
assets_proxy,
changes: None,
_tmp: tmp,
}
}
}
impl Fixture {
/// Must be mut as handler events are collected into the fixture automatically.
///
/// Note that this only works for the most recent created handler.
pub fn new_handler(&mut self) -> gitbutler_watcher::Handler {
let (tx, rx) = std::sync::mpsc::channel();
self.changes = Some(rx);
gitbutler_watcher::Handler::new(
self.local_app_data().to_owned(),
self.users.clone(),
self.projects.clone(),
self.vbranch_controller.clone(),
self.assets_proxy.clone(),
self.sessions_db.clone(),
self.deltas_db.clone(),
move |event| tx.send(event.clone()).map_err(Into::into),
)
}
/// Returns the events that were emitted to the tauri app.
pub fn events(&mut self) -> Vec<gitbutler_watcher::Change> {
let Some(rx) = self.changes.as_ref() else {
return Vec::new();
};
let mut out = Vec::new();
// For safety, in case the `handler` is still alive, blocking consumption.
while let Ok(event) = rx.try_recv() {
out.push(event);
}
out
}
}
}
mod calculate_delta;
mod git_file_change;

View File

@ -1 +0,0 @@
mod handler;