Merge pull request #3471 from Byron/watcher-refactor

watcher refactor
This commit is contained in:
Josh Junon 2024-04-15 17:58:56 +02:00 committed by GitHub
commit f9f1f3d2ee
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
43 changed files with 1650 additions and 2364 deletions

1
Cargo.lock generated
View File

@ -2097,6 +2097,7 @@ dependencies = [
"backtrace",
"chrono",
"console-subscriber",
"crossbeam-channel",
"futures",
"git2",
"gitbutler-core",

View File

@ -1,3 +1,4 @@
import { invoke } from '$lib/backend/ipc';
import { BranchService } from '$lib/branches/service';
import { getFetchNotifications } from '$lib/stores/fetches';
import { getHeads } from '$lib/stores/head';
@ -27,6 +28,7 @@ export async function load({ params, parent }) {
let project: Project | undefined = undefined;
try {
project = await projectService.getProject(projectId);
invoke('set_project_active', { id: projectId }).then((_r) => {});
} catch (err: any) {
throw error(400, {
message: err.message

View File

@ -20,7 +20,7 @@ impl Database {
project_id: &ProjectId,
session_id: &SessionId,
file_path: &path::Path,
deltas: &Vec<delta::Delta>,
deltas: &[delta::Delta],
) -> Result<()> {
self.database.transaction(|tx| -> Result<()> {
let mut stmt = insert_stmt(tx).context("Failed to prepare insert statement")?;

View File

@ -26,7 +26,7 @@ impl<'writer> DeltasWriter<'writer> {
self.writer
.write_string(PathBuf::from("session/deltas").join(path), &raw_deltas)?;
tracing::debug!(
tracing::trace!(
project_id = %self.repository.get_project_id(),
path = %path.display(),
"wrote deltas"
@ -43,7 +43,7 @@ impl<'writer> DeltasWriter<'writer> {
let path = path.as_ref();
self.writer.remove(PathBuf::from("session/wd").join(path))?;
tracing::debug!(
tracing::trace!(
project_id = %self.repository.get_project_id(),
path = %path.display(),
"deleted session wd file"
@ -61,7 +61,7 @@ impl<'writer> DeltasWriter<'writer> {
self.writer
.write_string(PathBuf::from("session/wd").join(path), contents)?;
tracing::debug!(
tracing::trace!(
project_id = %self.repository.get_project_id(),
path = %path.display(),
"wrote session wd file"

View File

@ -15,10 +15,12 @@ use crate::{
#[async_trait]
pub trait Watchers {
/// Watch for filesystem changes on the given project.
fn watch(&self, project: &Project) -> anyhow::Result<()>;
async fn stop(&self, id: ProjectId) -> anyhow::Result<()>;
async fn fetch(&self, id: ProjectId) -> anyhow::Result<()>;
async fn push(&self, id: ProjectId) -> anyhow::Result<()>;
/// Stop watching filesystem changes.
async fn stop(&self, id: ProjectId);
async fn fetch_gb_data(&self, id: ProjectId) -> anyhow::Result<()>;
async fn push_gb_data(&self, id: ProjectId) -> anyhow::Result<()>;
}
#[derive(Clone)]
@ -109,8 +111,8 @@ impl Controller {
tracing::error!(project_id = %project.id, ?error, "failed to create {:?} on project add", project.gb_dir());
}
if let Some(watchers) = &self.watchers {
watchers.watch(&project)?;
if let Some(watcher) = &self.watchers {
watcher.watch(&project)?;
}
Ok(project)
@ -161,7 +163,7 @@ impl Controller {
if let Some(watchers) = &self.watchers {
if let Some(api) = &project.api {
if api.sync {
if let Err(error) = watchers.fetch(project.id).await {
if let Err(error) = watchers.fetch_gb_data(project.id).await {
tracing::error!(
project_id = %project.id,
?error,
@ -170,7 +172,7 @@ impl Controller {
}
}
if let Err(error) = watchers.push(project.id).await {
if let Err(error) = watchers.push_gb_data(project.id).await {
tracing::error!(
project_id = %project.id,
?error,
@ -231,13 +233,7 @@ impl Controller {
}?;
if let Some(watchers) = &self.watchers {
if let Err(error) = watchers.stop(*id).await {
tracing::error!(
project_id = %id,
?error,
"failed to stop watcher for project",
);
}
watchers.stop(*id).await;
}
self.projects_storage

View File

@ -66,6 +66,8 @@ pub struct Project {
pub id: ProjectId,
pub title: String,
pub description: Option<String>,
// TODO(ST): Keep track of the `git_dir` separately and use it, particularly in `file_monitor.rs` (#3062)
/// The worktree path of the projects repository.
pub path: path::PathBuf,
#[serde(default)]
pub preferred_key: AuthKey,

View File

@ -55,8 +55,8 @@ mod database {
))],
};
database.insert(&project_id, &session_id, &file_path, &vec![delta1])?;
database.insert(&project_id, &session_id, &file_path, &vec![delta2.clone()])?;
database.insert(&project_id, &session_id, &file_path, &[delta1])?;
database.insert(&project_id, &session_id, &file_path, &[delta2.clone()])?;
assert_eq!(
database.list_by_project_id_session_id(&project_id, &session_id, &None)?,
@ -89,9 +89,9 @@ mod database {
))],
};
database.insert(&project_id, &session_id, &file_path1, &vec![delta1.clone()])?;
database.insert(&project_id, &session_id, &file_path2, &vec![delta1.clone()])?;
database.insert(&project_id, &session_id, &file_path2, &vec![delta2.clone()])?;
database.insert(&project_id, &session_id, &file_path1, &[delta1.clone()])?;
database.insert(&project_id, &session_id, &file_path2, &[delta1.clone()])?;
database.insert(&project_id, &session_id, &file_path2, &[delta2.clone()])?;
assert_eq!(
database.list_by_project_id_session_id(&project_id, &session_id, &None)?,

View File

@ -275,7 +275,7 @@ async fn remote_syncronization() {
})
.await
.unwrap();
let case_one = case_one.refresh();
let case_one = case_one.refresh(&suite);
let writer = deltas::Writer::new(&case_one.gb_repository).unwrap();
writer
@ -305,7 +305,7 @@ async fn remote_syncronization() {
})
.await
.unwrap();
let case_two = case_two.refresh();
let case_two = case_two.refresh(&suite);
case_two.gb_repository.fetch(Some(&user)).unwrap();
@ -365,7 +365,7 @@ async fn remote_sync_order() {
})
.await
.unwrap();
let case_one = case_one.refresh();
let case_one = case_one.refresh(&suite);
let case_two = suite.new_case();
suite
@ -377,7 +377,7 @@ async fn remote_sync_order() {
})
.await
.unwrap();
let case_two = case_two.refresh();
let case_two = case_two.refresh(&suite);
let user = suite.sign_in();

View File

@ -242,7 +242,7 @@ async fn remote_syncronization() {
})
.await
.unwrap();
let case_one = case_one.refresh();
let case_one = case_one.refresh(&suite);
let writer = deltas::Writer::new(&case_one.gb_repository).unwrap();
writer
@ -272,7 +272,7 @@ async fn remote_syncronization() {
})
.await
.unwrap();
let case_two = case_two.refresh();
let case_two = case_two.refresh(&suite);
case_two.gb_repository.fetch(Some(&user)).unwrap();
@ -332,7 +332,7 @@ async fn remote_sync_order() {
})
.await
.unwrap();
let case_one = case_one.refresh();
let case_one = case_one.refresh(&suite);
let case_two = suite.new_case();
suite
@ -344,7 +344,7 @@ async fn remote_sync_order() {
})
.await
.unwrap();
let case_two = case_two.refresh();
let case_two = case_two.refresh(&suite);
let user = suite.sign_in();

View File

@ -29,6 +29,7 @@ backoff = "0.4.0"
backtrace = { version = "0.3.71", optional = true }
chrono = { version = "0.4.37", features = ["serde"] }
console-subscriber = "0.2.0"
crossbeam-channel = "0.5.12"
futures = "0.3"
git2.workspace = true
governor = "0.6.3"

View File

@ -33,9 +33,9 @@ impl fmt::Display for Event {
}
impl Event {
pub fn project_id(&self) -> &ProjectId {
pub fn project_id(&self) -> ProjectId {
match self {
Event::HeadChange { project_id, .. } => project_id,
Event::HeadChange { project_id, .. } => *project_id,
}
}
@ -55,14 +55,16 @@ impl Event {
}
}
/// NOTE: Needs to be `Clone` only because the watcher wants to obtain it from `tauri`.
/// It's just for dependency injection.
#[derive(Clone)]
pub struct Client {
client: Arc<Box<dyn posthog::Client + Sync + Send>>,
client: Arc<dyn posthog::Client + Sync + Send>,
}
impl Client {
pub fn new(app_handle: &AppHandle, config: &Config) -> Self {
let client: Box<dyn posthog::Client + Sync + Send> =
let client: Arc<dyn posthog::Client + Sync + Send> =
if let Some(posthog_token) = config.posthog_token {
let real = posthog::real::Client::new(posthog::real::ClientOptions {
api_key: posthog_token.to_string(),
@ -70,30 +72,29 @@ impl Client {
app_version: app_handle.package_info().version.to_string(),
});
let real_with_retry = posthog::retry::Client::new(real);
Box::new(real_with_retry)
Arc::new(real_with_retry)
} else {
Box::<posthog::mock::Client>::default()
Arc::<posthog::mock::Client>::default()
};
Client {
client: Arc::new(client),
}
Client { client }
}
pub async fn send(&self, user: &User, event: &Event) {
if let Err(error) = self
.client
.capture(&[event.clone().into_posthog_event(user)])
.await
{
tracing::warn!(?error, "failed to send analytics");
}
/// Send `event` to analytics and associate it with `user` without blocking.
pub fn send_non_anonymous_event_nonblocking(&self, user: &User, event: &Event) {
let client = self.client.clone();
let event = event.clone().into_posthog_event(user);
tokio::spawn(async move {
if let Err(error) = client.capture(&[event]).await {
tracing::warn!(?error, "failed to send analytics");
}
});
}
}
impl Default for Client {
fn default() -> Self {
Self {
client: Arc::new(Box::<posthog::mock::Client>::default()),
client: Arc::new(posthog::mock::Client),
}
}
}

View File

@ -14,14 +14,12 @@ use gitbutler_core::{
};
use crate::error::Error;
use crate::watcher;
#[derive(Clone)]
pub struct App {
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
watchers: watcher::Watchers,
sessions_database: sessions::Database,
}
@ -30,40 +28,16 @@ impl App {
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
watchers: watcher::Watchers,
sessions_database: sessions::Database,
) -> Self {
Self {
local_data_dir,
projects,
users,
watchers,
sessions_database,
}
}
pub fn init_project(&self, project: &projects::Project) -> Result<()> {
self.watchers.watch(project).context(format!(
"failed to start watcher for project {}",
&project.id
))?;
Ok(())
}
pub fn init(&self) -> Result<()> {
for project in self
.projects
.list()
.with_context(|| "failed to list projects")?
{
if let Err(error) = self.init_project(&project) {
tracing::error!(%project.id, ?error, "failed to init project");
}
}
Ok(())
}
pub fn list_session_files(
&self,
project_id: &ProjectId,

View File

@ -6,40 +6,7 @@ use gitbutler_core::{
sessions::{self, SessionId},
virtual_branches,
};
use tauri::{AppHandle, Manager};
#[derive(Clone)]
pub struct Sender {
app_handle: tauri::AppHandle,
}
impl TryFrom<&AppHandle> for Sender {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> Result<Self, Self::Error> {
if let Some(sender) = value.try_state::<Sender>() {
Ok(sender.inner().clone())
} else {
let sender = Sender::new(value.clone());
value.manage(sender.clone());
Ok(sender)
}
}
}
impl Sender {
fn new(app_handle: AppHandle) -> Sender {
Sender { app_handle }
}
pub fn send(&self, event: &Event) -> Result<()> {
self.app_handle
.emit_all(&event.name, Some(&event.payload))
.context("emit event")?;
tracing::debug!(event_name = event.name, "sent event");
Ok(())
}
}
use tauri::Manager;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Event {
@ -49,49 +16,57 @@ pub struct Event {
}
impl Event {
pub fn send(&self, app_handle: &tauri::AppHandle) -> Result<()> {
app_handle
.emit_all(&self.name, Some(&self.payload))
.context("emit event")?;
tracing::trace!(event_name = self.name);
Ok(())
}
pub fn name(&self) -> &str {
&self.name
}
pub fn project_id(&self) -> &ProjectId {
&self.project_id
pub fn project_id(&self) -> ProjectId {
self.project_id
}
pub fn git_index(project_id: &ProjectId) -> Self {
pub fn git_index(project_id: ProjectId) -> Self {
Event {
name: format!("project://{}/git/index", project_id),
payload: serde_json::json!({}),
project_id: *project_id,
project_id,
}
}
pub fn git_fetch(project_id: &ProjectId) -> Self {
pub fn git_fetch(project_id: ProjectId) -> Self {
Event {
name: format!("project://{}/git/fetch", project_id),
payload: serde_json::json!({}),
project_id: *project_id,
project_id,
}
}
pub fn git_head(project_id: &ProjectId, head: &str) -> Self {
pub fn git_head(project_id: ProjectId, head: &str) -> Self {
Event {
name: format!("project://{}/git/head", project_id),
payload: serde_json::json!({ "head": head }),
project_id: *project_id,
project_id,
}
}
pub fn git_activity(project_id: &ProjectId) -> Self {
pub fn git_activity(project_id: ProjectId) -> Self {
Event {
name: format!("project://{}/git/activity", project_id),
payload: serde_json::json!({}),
project_id: *project_id,
project_id,
}
}
pub fn file(
project_id: &ProjectId,
session_id: &SessionId,
project_id: ProjectId,
session_id: SessionId,
file_path: &str,
contents: Option<&reader::Content>,
) -> Self {
@ -101,22 +76,22 @@ impl Event {
"filePath": file_path,
"contents": contents,
}),
project_id: *project_id,
project_id,
}
}
pub fn session(project_id: &ProjectId, session: &sessions::Session) -> Self {
pub fn session(project_id: ProjectId, session: &sessions::Session) -> Self {
Event {
name: format!("project://{}/sessions", project_id),
payload: serde_json::to_value(session).unwrap(),
project_id: *project_id,
project_id,
}
}
pub fn deltas(
project_id: &ProjectId,
session_id: &SessionId,
deltas: &Vec<deltas::Delta>,
project_id: ProjectId,
session_id: SessionId,
deltas: &[deltas::Delta],
relative_file_path: &std::path::Path,
) -> Self {
Event {
@ -125,18 +100,18 @@ impl Event {
"deltas": deltas,
"filePath": relative_file_path,
}),
project_id: *project_id,
project_id,
}
}
pub fn virtual_branches(
project_id: &ProjectId,
project_id: ProjectId,
virtual_branches: &virtual_branches::VirtualBranches,
) -> Self {
Event {
name: format!("project://{}/virtual-branches", project_id),
payload: serde_json::json!(virtual_branches),
project_id: *project_id,
project_id,
}
}
}

View File

@ -16,7 +16,10 @@
pub mod analytics;
pub mod app;
pub mod commands;
pub mod events;
mod events;
pub use events::Event;
pub mod logs;
pub mod menu;
pub mod watcher;

View File

@ -195,12 +195,9 @@ fn main() {
app_data_dir,
projects_controller,
users_controller,
watcher_controller,
sessions_database_controller,
);
app.init().context("failed to init app")?;
app_handle.manage(app);
Ok(())
@ -233,6 +230,7 @@ fn main() {
projects::commands::update_project,
projects::commands::delete_project,
projects::commands::list_projects,
projects::commands::set_project_active,
projects::commands::git_get_local_config,
projects::commands::git_set_local_config,
sessions::commands::list_sessions,

View File

@ -2,13 +2,13 @@ pub mod commands {
use anyhow::Context;
use std::path;
use gitbutler_core::error;
use gitbutler_core::error::Code;
use gitbutler_core::projects::{self, controller::Controller};
use gitbutler_core::projects::{self, controller::Controller, ProjectId};
use tauri::Manager;
use tracing::instrument;
use crate::error::Error;
use crate::watcher::Watchers;
#[tauri::command(async)]
#[instrument(skip(handle), err(Debug))]
@ -39,12 +39,8 @@ pub mod commands {
#[instrument(skip(handle), err(Debug))]
pub async fn get_project(
handle: tauri::AppHandle,
id: &str,
id: ProjectId,
) -> Result<projects::Project, Error> {
let id = id.parse().context(error::Context::new_static(
Code::Validation,
"Malformed project id",
))?;
handle
.state::<Controller>()
.get(&id)
@ -57,13 +53,22 @@ pub mod commands {
handle.state::<Controller>().list().map_err(Into::into)
}
/// This trigger is the GUI telling us that the project with `id` is now displayed.
///
/// We use it to start watching for filesystem events.
#[tauri::command(async)]
#[instrument(skip(handle), err(Debug))]
pub async fn delete_project(handle: tauri::AppHandle, id: &str) -> Result<(), Error> {
let id = id.parse().context(error::Context::new_static(
Code::Validation,
"Malformed project id",
))?;
pub async fn set_project_active(handle: tauri::AppHandle, id: ProjectId) -> Result<(), Error> {
let project = handle
.state::<Controller>()
.get(&id)
.context("project not found")?;
Ok(handle.state::<Watchers>().watch(&project)?)
}
#[tauri::command(async)]
#[instrument(skip(handle), err(Debug))]
pub async fn delete_project(handle: tauri::AppHandle, id: ProjectId) -> Result<(), Error> {
handle
.state::<Controller>()
.delete(&id)
@ -75,13 +80,9 @@ pub mod commands {
#[instrument(skip(handle), err(Debug))]
pub async fn git_get_local_config(
handle: tauri::AppHandle,
id: &str,
id: ProjectId,
key: &str,
) -> Result<Option<String>, Error> {
let id = id.parse().context(error::Context::new_static(
Code::Validation,
"Malformed project id",
))?;
Ok(handle
.state::<Controller>()
.get_local_config(&id, key)
@ -92,14 +93,10 @@ pub mod commands {
#[instrument(skip(handle), err(Debug))]
pub async fn git_set_local_config(
handle: tauri::AppHandle,
id: &str,
id: ProjectId,
key: &str,
value: &str,
) -> Result<(), Error> {
let id = id.parse().context(error::Context::new_static(
Code::Validation,
"Malformed project id",
))?;
Ok(handle
.state::<Controller>()
.set_local_config(&id, key, value)

View File

@ -1,80 +1,75 @@
mod dispatchers;
mod events;
pub mod handlers;
pub use events::Event;
use events::InternalEvent;
use std::{collections::HashMap, path, sync::Arc, time};
mod file_monitor;
mod handler;
pub use handler::Handler;
use std::path::Path;
use std::{sync::Arc, time};
use anyhow::{Context, Result};
pub use events::Event;
use futures::executor::block_on;
use gitbutler_core::projects::{self, Project, ProjectId};
use tauri::AppHandle;
use tokio::{
sync::{
mpsc::{unbounded_channel, UnboundedSender},
Mutex,
},
sync::mpsc::{unbounded_channel, UnboundedSender},
task,
};
use tokio_util::sync::CancellationToken;
use tracing::instrument;
/// Note that this type is managed in Tauri and thus needs to be send and sync.
#[derive(Clone)]
pub struct Watchers {
/// NOTE: This handle is required for this type to be self-contained as it's used by `core` through a trait.
app_handle: AppHandle,
watchers: Arc<Mutex<HashMap<ProjectId, Watcher>>>,
/// The watcher of the currently active project.
/// NOTE: This is a `tokio` mutex as this needs to lock the inner option from within async.
watcher: Arc<tokio::sync::Mutex<Option<WatcherHandle>>>,
}
impl Watchers {
pub fn new(app_handle: AppHandle) -> Self {
Self {
app_handle,
watchers: Arc::new(Mutex::new(HashMap::new())),
watcher: Default::default(),
}
}
#[instrument(skip(self, project), err(Debug))]
pub fn watch(&self, project: &projects::Project) -> Result<()> {
let watcher = Watcher::try_from(&self.app_handle)?;
let handler = handler::Handler::from_app(&self.app_handle)?;
let project_id = project.id;
let project_path = project.path.clone();
task::spawn({
let watchers = Arc::clone(&self.watchers);
let watcher = watcher.clone();
async move {
watchers.lock().await.insert(project_id, watcher.clone());
match watcher.run(&project_path, &project_id).await {
Ok(()) => {
tracing::debug!(%project_id, "watcher stopped");
}
Err(RunError::PathNotFound(path)) => {
tracing::warn!(%project_id, path = %path.display(), "watcher stopped: project path not found");
watchers.lock().await.remove(&project_id);
}
Err(error) => {
tracing::error!(?error, %project_id, "watcher error");
watchers.lock().await.remove(&project_id);
}
}
}
});
let handle = watch_in_background(handler, project_path, project_id)?;
block_on(self.watcher.lock()).replace(handle);
Ok(())
}
pub async fn post(&self, event: Event) -> Result<()> {
let watchers = self.watchers.lock().await;
if let Some(watcher) = watchers.get(event.project_id()) {
watcher.post(event).await.context("failed to post event")
let watcher = self.watcher.lock().await;
if let Some(handle) = watcher
.as_ref()
.filter(|watcher| watcher.project_id == event.project_id())
{
handle.post(event).await.context("failed to post event")
} else {
Err(anyhow::anyhow!("watcher not found",))
}
}
pub async fn stop(&self, project_id: &ProjectId) -> Result<()> {
if let Some((_, watcher)) = self.watchers.lock().await.remove_entry(project_id) {
watcher.stop();
};
Ok(())
pub async fn stop(&self, project_id: ProjectId) {
let mut handle = self.watcher.lock().await;
if handle
.as_ref()
.map_or(false, |handle| handle.project_id == project_id)
{
handle.take();
}
}
}
@ -84,169 +79,91 @@ impl gitbutler_core::projects::Watchers for Watchers {
Watchers::watch(self, project)
}
async fn stop(&self, id: ProjectId) -> Result<()> {
Watchers::stop(self, &id).await
async fn stop(&self, id: ProjectId) {
Watchers::stop(self, id).await
}
async fn fetch(&self, id: ProjectId) -> Result<()> {
async fn fetch_gb_data(&self, id: ProjectId) -> Result<()> {
self.post(Event::FetchGitbutlerData(id)).await
}
async fn push(&self, id: ProjectId) -> Result<()> {
async fn push_gb_data(&self, id: ProjectId) -> Result<()> {
self.post(Event::PushGitbutlerData(id)).await
}
}
#[derive(Clone)]
struct Watcher {
inner: Arc<WatcherInner>,
}
impl TryFrom<&AppHandle> for Watcher {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
Ok(Self {
inner: Arc::new(WatcherInner::try_from(value)?),
})
}
}
#[derive(Debug, thiserror::Error)]
pub enum RunError {
#[error("{0} not found")]
PathNotFound(path::PathBuf),
#[error(transparent)]
Other(#[from] anyhow::Error),
}
impl Watcher {
pub fn stop(&self) {
self.inner.stop();
}
pub async fn post(&self, event: Event) -> Result<()> {
self.inner.post(event).await
}
pub async fn run<P: AsRef<path::Path>>(
&self,
path: P,
project_id: &ProjectId,
) -> Result<(), RunError> {
self.inner.run(path, project_id).await
}
}
struct WatcherInner {
handler: handlers::Handler,
dispatcher: dispatchers::Dispatcher,
/// An abstraction over a link to the spawned watcher, which runs in the background.
struct WatcherHandle {
/// A way to post events and interact with the actual handler in the background.
tx: UnboundedSender<InternalEvent>,
/// The id of the project we are watching.
project_id: ProjectId,
/// A way to tell the background process to stop handling events.
cancellation_token: CancellationToken,
proxy_tx: Arc<tokio::sync::Mutex<Option<UnboundedSender<Event>>>>,
}
impl TryFrom<&AppHandle> for WatcherInner {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
Ok(Self {
handler: handlers::Handler::try_from(value)?,
dispatcher: dispatchers::Dispatcher::new(),
cancellation_token: CancellationToken::new(),
proxy_tx: Arc::new(tokio::sync::Mutex::new(None)),
})
}
}
impl WatcherInner {
pub fn stop(&self) {
impl Drop for WatcherHandle {
fn drop(&mut self) {
self.cancellation_token.cancel();
}
}
impl WatcherHandle {
pub async fn post(&self, event: Event) -> Result<()> {
let tx = self.proxy_tx.lock().await;
if tx.is_some() {
tx.as_ref()
.unwrap()
.send(event)
.context("failed to send event")?;
Ok(())
} else {
Err(anyhow::anyhow!("watcher is not started"))
}
self.tx.send(event.into()).context("failed to send event")?;
Ok(())
}
}
pub async fn run<P: AsRef<path::Path>>(
&self,
path: P,
project_id: &ProjectId,
) -> Result<(), RunError> {
let (proxy_tx, mut proxy_rx) = unbounded_channel();
self.proxy_tx.lock().await.replace(proxy_tx.clone());
/// Run our file watcher processing loop in the background and let `handler` deal with them.
/// Return a handle to the watcher to allow interactions while it's running in the background.
/// Drop the handle to stop the watcher.
///
/// ### Important
///
/// It runs in such a way that each filesystem event is processed concurrently with others, which is why
/// spamming massive amounts of events should be avoided!
fn watch_in_background(
handler: handler::Handler,
path: impl AsRef<Path>,
project_id: ProjectId,
) -> Result<WatcherHandle, anyhow::Error> {
let (events_out, mut events_in) = unbounded_channel();
let dispatcher = self.dispatcher.clone();
let mut dispatcher_rx = match dispatcher.run(project_id, path.as_ref()) {
Ok(dispatcher_rx) => Ok(dispatcher_rx),
Err(dispatchers::RunError::PathNotFound(path)) => Err(RunError::PathNotFound(path)),
Err(error) => Err(error).context("failed to run dispatcher")?,
}?;
file_monitor::spawn(project_id, path.as_ref(), events_out.clone())?;
handler.reindex(project_id)?;
proxy_tx
.send(Event::IndexAll(*project_id))
.context("failed to send event")?;
let handle_event = |event: &Event| -> Result<()> {
task::spawn_blocking({
let project_id = project_id.to_string();
let handler = self.handler.clone();
let tx = proxy_tx.clone();
let event = event.clone();
move || {
futures::executor::block_on(async move {
match handler.handle(&event, time::SystemTime::now()).await {
Err(error) => tracing::error!(
project_id,
%event,
?error,
"failed to handle event",
),
Ok(events) => {
for e in events {
if let Err(error) = tx.send(e.clone()) {
tracing::error!(
project_id,
%event,
?error,
"failed to post event",
);
} else {
tracing::debug!(
project_id,
%event,
"sent response event",
);
}
}
}
}
});
}
let cancellation_token = CancellationToken::new();
let handle = WatcherHandle {
tx: events_out,
project_id,
cancellation_token: cancellation_token.clone(),
};
let handle_event = move |event: InternalEvent| -> Result<()> {
let handler = handler.clone();
// NOTE: Traditional parallelization (blocking) is required as `tokio::spawn()` on
// the `handler.handle()` future isn't `Send` as it keeps non-Send things
// across await points. Further, there is a fair share of `sync` IO happening
// as well, so nothing can really be done here.
task::spawn_blocking(move || {
futures::executor::block_on(async move {
handler.handle(event, time::SystemTime::now()).await.ok();
});
Ok(())
};
});
Ok(())
};
tokio::spawn(async move {
loop {
tokio::select! {
Some(event) = dispatcher_rx.recv() => handle_event(&event)?,
Some(event) = proxy_rx.recv() => handle_event(&event)?,
() = self.cancellation_token.cancelled() => {
self.dispatcher.stop();
Some(event) = events_in.recv() => handle_event(event)?,
() = cancellation_token.cancelled() => {
break;
}
}
}
Ok::<_, anyhow::Error>(())
});
Ok(())
}
Ok(handle)
}

View File

@ -1,75 +0,0 @@
mod file_change;
use std::path;
use anyhow::{Context, Result};
use gitbutler_core::projects::ProjectId;
use tokio::{
select,
sync::mpsc::{channel, Receiver},
task,
};
use tokio_util::sync::CancellationToken;
use super::events;
#[derive(Clone)]
pub struct Dispatcher {
file_change_dispatcher: file_change::Dispatcher,
cancellation_token: CancellationToken,
}
#[derive(Debug, thiserror::Error)]
pub enum RunError {
#[error("{0} not found")]
PathNotFound(path::PathBuf),
#[error(transparent)]
Other(#[from] anyhow::Error),
}
impl Dispatcher {
pub fn new() -> Self {
Self {
file_change_dispatcher: file_change::Dispatcher::new(),
cancellation_token: CancellationToken::new(),
}
}
pub fn stop(&self) {
self.file_change_dispatcher.stop();
}
pub fn run<P: AsRef<path::Path>>(
self,
project_id: &ProjectId,
path: P,
) -> Result<Receiver<events::Event>, RunError> {
let path = path.as_ref();
let mut file_change_rx = match self.file_change_dispatcher.run(project_id, path) {
Ok(file_change_rx) => Ok(file_change_rx),
Err(file_change::RunError::PathNotFound(path)) => Err(RunError::PathNotFound(path)),
Err(error) => Err(error).context("failed to run file change dispatcher")?,
}?;
let (tx, rx) = channel(1);
let project_id = *project_id;
task::spawn(async move {
loop {
select! {
() = self.cancellation_token.cancelled() => {
break;
}
Some(event) = file_change_rx.recv() => {
if let Err(error) = tx.send(event).await {
tracing::error!(%project_id, ?error,"failed to send file change");
}
}
}
}
tracing::debug!(%project_id, "dispatcher stopped");
});
Ok(rx)
}
}

View File

@ -1,185 +0,0 @@
use std::{
path,
sync::{Arc, Mutex},
time::Duration,
};
use anyhow::{Context, Result};
use futures::executor::block_on;
use gitbutler_core::{git, projects::ProjectId};
use notify::{RecommendedWatcher, Watcher};
use notify_debouncer_full::{new_debouncer, Debouncer, FileIdMap};
use tokio::{
sync::mpsc::{channel, Receiver},
task,
};
use crate::watcher::events;
#[derive(Debug, Clone)]
pub struct Dispatcher {
watcher: Arc<Mutex<Option<Debouncer<RecommendedWatcher, FileIdMap>>>>,
}
/// The timeout for debouncing file change events.
/// This is used to prevent multiple events from being sent for a single file change.
static DEBOUNCE_TIMEOUT: Duration = Duration::from_millis(100);
#[derive(Debug, thiserror::Error)]
pub enum RunError {
#[error("{0} not found")]
PathNotFound(path::PathBuf),
#[error(transparent)]
Other(#[from] anyhow::Error),
}
impl Dispatcher {
pub fn new() -> Self {
Self {
watcher: Arc::new(Mutex::new(None)),
}
}
pub fn stop(&self) {
self.watcher.lock().unwrap().take();
}
pub fn run(
self,
project_id: &ProjectId,
path: &path::Path,
) -> Result<Receiver<events::Event>, RunError> {
let (notify_tx, notify_rx) = std::sync::mpsc::channel();
let mut debouncer = new_debouncer(DEBOUNCE_TIMEOUT, None, notify_tx)
.context("failed to create debouncer")?;
let policy = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(std::time::Duration::from_secs(30)))
.build();
backoff::retry(policy, || {
debouncer
.watcher()
.watch(path, notify::RecursiveMode::Recursive)
.map_err(|error| match error.kind {
notify::ErrorKind::PathNotFound => {
backoff::Error::permanent(RunError::PathNotFound(path.to_path_buf()))
}
notify::ErrorKind::Io(_) | notify::ErrorKind::InvalidConfig(_) => {
backoff::Error::permanent(RunError::Other(error.into()))
}
_ => backoff::Error::transient(RunError::Other(error.into())),
})
})
.context("failed to start watcher")?;
let repo = git::Repository::open(path).context(format!(
"failed to open project repository: {}",
path.display()
))?;
self.watcher.lock().unwrap().replace(debouncer);
tracing::debug!(%project_id, "file watcher started");
let (tx, rx) = channel(1);
task::spawn_blocking({
let path = path.to_path_buf();
let project_id = *project_id;
move || {
for result in notify_rx {
match result {
Err(errors) => {
tracing::error!(?errors, "file watcher error");
}
Ok(events) => {
let file_paths = events
.into_iter()
.filter(|event| is_interesting_kind(event.kind))
.flat_map(|event| event.paths.clone())
.filter(|file| is_interesting_file(&repo, file));
for file_path in file_paths {
match file_path.strip_prefix(&path) {
Ok(relative_file_path)
if relative_file_path.display().to_string().is_empty() =>
{ /* noop */ }
Ok(relative_file_path) => {
let event = if relative_file_path.starts_with(".git") {
tracing::info!(
%project_id,
file_path = %relative_file_path.display(),
"git file change",
);
events::Event::GitFileChange(
project_id,
relative_file_path
.strip_prefix(".git")
.unwrap()
.to_path_buf(),
)
} else {
tracing::info!(
%project_id,
file_path = %relative_file_path.display(),
"project file change",
);
events::Event::ProjectFileChange(
project_id,
relative_file_path.to_path_buf(),
)
};
if let Err(error) = block_on(tx.send(event)) {
tracing::error!(
%project_id,
?error,
"failed to send file change event",
);
}
}
Err(error) => {
tracing::error!(%project_id, ?error, "failed to strip prefix");
}
}
}
}
}
}
tracing::debug!(%project_id, "file watcher stopped");
}
});
Ok(rx)
}
}
#[cfg(target_family = "unix")]
fn is_interesting_kind(kind: notify::EventKind) -> bool {
matches!(
kind,
notify::EventKind::Create(notify::event::CreateKind::File)
| notify::EventKind::Modify(notify::event::ModifyKind::Data(_))
| notify::EventKind::Modify(notify::event::ModifyKind::Name(_))
| notify::EventKind::Remove(notify::event::RemoveKind::File)
)
}
#[cfg(target_os = "windows")]
fn is_interesting_kind(kind: notify::EventKind) -> bool {
matches!(
kind,
notify::EventKind::Create(_) | notify::EventKind::Modify(_) | notify::EventKind::Remove(_)
)
}
fn is_interesting_file(git_repo: &git::Repository, file_path: &path::Path) -> bool {
if file_path.starts_with(git_repo.path()) {
let check_file_path = file_path.strip_prefix(git_repo.path()).unwrap();
check_file_path.ends_with("FETCH_HEAD")
|| check_file_path.eq(path::Path::new("logs/HEAD"))
|| check_file_path.eq(path::Path::new("HEAD"))
|| check_file_path.eq(path::Path::new("GB_FLUSH"))
|| check_file_path.eq(path::Path::new("index"))
} else {
!git_repo.is_path_ignored(file_path).unwrap_or(false)
}
}

View File

@ -1,102 +1,98 @@
use std::{fmt::Display, path};
use std::fmt::Display;
use std::path::PathBuf;
use gitbutler_core::{
deltas,
projects::ProjectId,
reader,
sessions::{self, SessionId},
};
use gitbutler_core::{projects::ProjectId, sessions};
use crate::{analytics, events};
/// An event for internal use, as merge between [super::file_monitor::Event] and [Event].
#[derive(Debug)]
pub(super) enum InternalEvent {
// From public API
Flush(ProjectId, sessions::Session),
CalculateVirtualBranches(ProjectId),
FetchGitbutlerData(ProjectId),
PushGitbutlerData(ProjectId),
// From file monitor
GitFilesChange(ProjectId, Vec<PathBuf>),
ProjectFilesChange(ProjectId, Vec<PathBuf>),
}
/// This type captures all operations that can be fed into a watcher that runs in the background.
// TODO(ST): This should not have to be implemented in the Watcher, figure out how this can be moved
// to application logic at least. However, it's called through a trait in `core`.
#[derive(Debug, PartialEq, Clone)]
pub enum Event {
Flush(ProjectId, sessions::Session),
CalculateVirtualBranches(ProjectId),
FetchGitbutlerData(ProjectId),
PushGitbutlerData(ProjectId),
PushProjectToGitbutler(ProjectId),
GitFileChange(ProjectId, path::PathBuf),
ProjectFileChange(ProjectId, path::PathBuf),
Session(ProjectId, sessions::Session),
SessionFile((ProjectId, SessionId, path::PathBuf, Option<reader::Content>)),
SessionDelta((ProjectId, SessionId, path::PathBuf, deltas::Delta)),
IndexAll(ProjectId),
Emit(events::Event),
Analytics(analytics::Event),
CalculateVirtualBranches(ProjectId),
CalculateDeltas(ProjectId, path::PathBuf),
FilterIgnoredFiles(ProjectId, path::PathBuf),
}
impl Event {
pub fn project_id(&self) -> &ProjectId {
pub fn project_id(&self) -> ProjectId {
match self {
Event::Analytics(event) => event.project_id(),
Event::Emit(event) => event.project_id(),
Event::IndexAll(project_id)
| Event::FetchGitbutlerData(project_id)
Event::FetchGitbutlerData(project_id)
| Event::Flush(project_id, _)
| Event::GitFileChange(project_id, _)
| Event::ProjectFileChange(project_id, _)
| Event::Session(project_id, _)
| Event::SessionFile((project_id, _, _, _))
| Event::SessionDelta((project_id, _, _, _))
| Event::CalculateVirtualBranches(project_id)
| Event::CalculateDeltas(project_id, _)
| Event::FilterIgnoredFiles(project_id, _)
| Event::PushGitbutlerData(project_id)
| Event::PushProjectToGitbutler(project_id) => project_id,
| Event::PushGitbutlerData(project_id) => *project_id,
}
}
}
impl Display for Event {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Event::Analytics(event) => write!(f, "Analytics({})", event),
Event::Emit(event) => write!(f, "Emit({})", event.name()),
Event::FetchGitbutlerData(pid) => {
write!(f, "FetchGitbutlerData({})", pid,)
}
Event::Flush(project_id, session) => write!(f, "Flush({}, {})", project_id, session.id),
Event::GitFileChange(project_id, path) => {
write!(f, "GitFileChange({}, {})", project_id, path.display())
}
Event::ProjectFileChange(project_id, path) => {
write!(f, "ProjectFileChange({}, {})", project_id, path.display())
}
Event::Session(pid, session) => write!(f, "Session({}, {})", pid, session.id),
Event::SessionFile((pid, session_id, path, _)) => {
write!(f, "File({}, {}, {})", pid, session_id, path.display())
}
Event::SessionDelta((pid, session_id, path, delta)) => {
write!(
f,
"Deltas({}, {}, {}, {})",
pid,
session_id,
path.display(),
delta.timestamp_ms
)
}
Event::CalculateVirtualBranches(pid) => write!(f, "VirtualBranch({})", pid),
Event::CalculateDeltas(project_id, path) => {
write!(f, "SessionProcessing({}, {})", project_id, path.display())
}
Event::FilterIgnoredFiles(project_id, path) => {
write!(f, "FilterIgnoredFiles({}, {})", project_id, path.display())
}
Event::PushGitbutlerData(pid) => write!(f, "PushGitbutlerData({})", pid),
Event::PushProjectToGitbutler(pid) => write!(f, "PushProjectToGitbutler({})", pid),
Event::IndexAll(pid) => write!(f, "IndexAll({})", pid),
impl From<Event> for InternalEvent {
fn from(value: Event) -> Self {
match value {
Event::Flush(a, b) => InternalEvent::Flush(a, b),
Event::CalculateVirtualBranches(v) => InternalEvent::CalculateVirtualBranches(v),
Event::FetchGitbutlerData(v) => InternalEvent::FetchGitbutlerData(v),
Event::PushGitbutlerData(v) => InternalEvent::PushGitbutlerData(v),
}
}
}
impl Display for InternalEvent {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
InternalEvent::FetchGitbutlerData(pid) => {
write!(f, "FetchGitbutlerData({})", pid,)
}
InternalEvent::Flush(project_id, session) => {
write!(f, "Flush({}, {})", project_id, session.id)
}
InternalEvent::GitFilesChange(project_id, paths) => {
write!(
f,
"GitFileChange({}, {})",
project_id,
comma_separated_paths(paths)
)
}
InternalEvent::ProjectFilesChange(project_id, paths) => {
write!(
f,
"ProjectFileChange({}, {})",
project_id,
comma_separated_paths(paths)
)
}
InternalEvent::CalculateVirtualBranches(pid) => write!(f, "VirtualBranch({})", pid),
InternalEvent::PushGitbutlerData(pid) => write!(f, "PushGitbutlerData({})", pid),
}
}
}
fn comma_separated_paths(paths: &[PathBuf]) -> String {
const MAX_LISTING: usize = 5;
let listing = paths
.iter()
.take(MAX_LISTING)
.filter_map(|path| path.to_str())
.collect::<Vec<_>>()
.join(", ");
let remaining = paths.len().saturating_sub(MAX_LISTING);
if remaining > 0 {
format!("{listing} […{remaining} more]")
} else {
listing
}
}

View File

@ -0,0 +1,226 @@
use std::collections::HashSet;
use std::path::Path;
use std::time::Duration;
use crate::watcher::events::InternalEvent;
use anyhow::{anyhow, Context, Result};
use gitbutler_core::{git, projects::ProjectId};
use notify::Watcher;
use notify_debouncer_full::new_debouncer;
use tokio::task;
use tracing::Level;
/// The timeout for debouncing file change events.
/// This is used to prevent multiple events from being sent for a single file change.
const DEBOUNCE_TIMEOUT: Duration = Duration::from_millis(100);
/// This error is required only because `anyhow::Error` isn't implementing `std::error::Error`, and [`spawn()`]
/// needs to wrap it into a `backoff::Error` which also has to implement the `Error` trait.
#[derive(Debug, thiserror::Error)]
#[error(transparent)]
struct RunError {
#[from]
source: anyhow::Error,
}
/// Listen to interesting filesystem events of files in `path` that are not `.gitignore`d,
/// turn them into [`Events`](Event) which classifies it, and associates it with `project_id`.
/// These are sent through the passed `out` channel, to indicate either **Git** repository changes
/// or **ProjectWorktree** changes
///
/// ### Why is this not an iterator?
///
/// The internal `notify_rx` could be an iterator, which performs all transformations and returns them as item.
/// However, due to closures being continuously created each time events come in, nested closures need to own
/// their resources which means they are `Clone` or `Copy`. This isn't the case for `git::Repository`.
/// Even though `gix::Repository` is `Clone`, an efficient implementation of `is_path_ignored()` requires more state
/// that ideally is kept between invocations. For that reason, the current channel-based 'worker' architecture
/// is chosen to allow all this state to live on the stack.
///
/// Additionally, a channel plays better with how events are handled downstream.
pub fn spawn(
project_id: ProjectId,
worktree_path: &std::path::Path,
out: tokio::sync::mpsc::UnboundedSender<InternalEvent>,
) -> Result<()> {
let (notify_tx, notify_rx) = std::sync::mpsc::channel();
let mut debouncer =
new_debouncer(DEBOUNCE_TIMEOUT, None, notify_tx).context("failed to create debouncer")?;
let policy = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(std::time::Duration::from_secs(30)))
.build();
// Start the watcher, but retry if there are transient errors.
backoff::retry(policy, || {
debouncer
.watcher()
.watch(worktree_path, notify::RecursiveMode::Recursive)
.map_err(|err| match err.kind {
notify::ErrorKind::PathNotFound => backoff::Error::permanent(RunError::from(
anyhow!("{} not found", worktree_path.display()),
)),
notify::ErrorKind::Io(_) | notify::ErrorKind::InvalidConfig(_) => {
backoff::Error::permanent(RunError::from(anyhow::Error::from(err)))
}
_ => backoff::Error::transient(RunError::from(anyhow::Error::from(err))),
})
})
.context("failed to start watcher")?;
let worktree_path = worktree_path.to_owned();
task::spawn_blocking(move || {
tracing::debug!(%project_id, "file watcher started");
let _debouncer = debouncer;
let _runtime = tracing::span!(Level::INFO, "file monitor", %project_id ).entered();
'outer: for result in notify_rx {
let stats = tracing::span!(
Level::INFO,
"handle debounced events",
ignored = tracing::field::Empty,
project = tracing::field::Empty,
project_dedup = tracing::field::Empty,
git = tracing::field::Empty,
git_dedup = tracing::field::Empty,
git_noop = tracing::field::Empty,
fs_events = tracing::field::Empty,
)
.entered();
let (mut ignored, mut git_noop) = (0, 0);
match result {
Err(err) => {
tracing::error!(?err, "ignored file watcher error");
}
Ok(events) => {
let maybe_repo = git::Repository::open(&worktree_path).with_context(
|| {
format!(
"failed to open project repository: {}",
worktree_path.display()
)
},
).map(Some).unwrap_or_else(|err| {
tracing::error!(?err, "will consider changes to all files as repository couldn't be opened");
None
});
let num_events = events.len();
let classified_file_paths = events
.into_iter()
.filter(|event| is_interesting_kind(event.kind))
.flat_map(|event| event.event.paths)
.map(|file| {
let kind = maybe_repo
.as_ref()
.map_or(FileKind::Project, |repo| classify_file(repo, &file));
(file, kind)
});
let (mut stripped_git_paths, mut worktree_relative_paths) =
(HashSet::new(), HashSet::new());
for (file_path, kind) in classified_file_paths {
match kind {
FileKind::ProjectIgnored => ignored += 1,
FileKind::GitUninteresting => git_noop += 1,
FileKind::Project | FileKind::Git => {
match file_path.strip_prefix(&worktree_path) {
Ok(relative_file_path) => {
if relative_file_path.as_os_str().is_empty() {
continue;
}
if let Ok(stripped) =
relative_file_path.strip_prefix(".git")
{
stripped_git_paths.insert(stripped.to_owned());
} else {
worktree_relative_paths
.insert(relative_file_path.to_owned());
};
}
Err(err) => {
tracing::error!(%project_id, ?err, "failed to strip prefix");
}
}
}
}
}
stats.record("fs_events", num_events);
stats.record("ignored", ignored);
stats.record("git_noop", git_noop);
stats.record("git", stripped_git_paths.len());
stats.record("project", worktree_relative_paths.len());
if !stripped_git_paths.is_empty() {
let paths_dedup: Vec<_> = stripped_git_paths.into_iter().collect();
stats.record("git_dedup", paths_dedup.len());
let event = InternalEvent::GitFilesChange(project_id, paths_dedup);
if out.send(event).is_err() {
tracing::info!("channel closed - stopping file watcher");
break 'outer;
}
}
if !worktree_relative_paths.is_empty() {
let paths_dedup: Vec<_> = worktree_relative_paths.into_iter().collect();
stats.record("project_dedup", paths_dedup.len());
let event = InternalEvent::ProjectFilesChange(project_id, paths_dedup);
if out.send(event).is_err() {
tracing::info!("channel closed - stopping file watcher");
break 'outer;
}
}
}
}
}
});
Ok(())
}
#[cfg(target_family = "unix")]
fn is_interesting_kind(kind: notify::EventKind) -> bool {
matches!(
kind,
notify::EventKind::Create(notify::event::CreateKind::File)
| notify::EventKind::Modify(notify::event::ModifyKind::Data(_))
| notify::EventKind::Modify(notify::event::ModifyKind::Name(_))
| notify::EventKind::Remove(notify::event::RemoveKind::File)
)
}
#[cfg(target_os = "windows")]
fn is_interesting_kind(kind: notify::EventKind) -> bool {
matches!(
kind,
notify::EventKind::Create(_) | notify::EventKind::Modify(_) | notify::EventKind::Remove(_)
)
}
/// A classification for a changed file.
enum FileKind {
/// A file in the `.git` repository of the current project itself.
Git,
/// Like `Git`, but shouldn't have any effect.
GitUninteresting,
/// A file in the worktree of the current project.
Project,
/// A file that was ignored in the project, and thus shouldn't trigger a computation.
ProjectIgnored,
}
fn classify_file(git_repo: &git::Repository, file_path: &Path) -> FileKind {
if let Ok(check_file_path) = file_path.strip_prefix(git_repo.path()) {
if check_file_path == Path::new("FETCH_HEAD")
|| check_file_path == Path::new("logs/HEAD")
|| check_file_path == Path::new("HEAD")
|| check_file_path == Path::new("GB_FLUSH")
|| check_file_path == Path::new("index")
{
FileKind::Git
} else {
FileKind::GitUninteresting
}
} else if git_repo.is_path_ignored(file_path).unwrap_or(false) {
FileKind::ProjectIgnored
} else {
FileKind::Project
}
}

View File

@ -0,0 +1,425 @@
mod calculate_deltas;
mod index;
mod push_project_to_gitbutler;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::{path, time};
use anyhow::{bail, Context, Result};
use gitbutler_core::projects::ProjectId;
use gitbutler_core::sessions::SessionId;
use gitbutler_core::virtual_branches::VirtualBranches;
use gitbutler_core::{
assets, deltas, gb_repository, git, project_repository, projects, reader, sessions, users,
virtual_branches,
};
use tauri::{AppHandle, Manager};
use tracing::instrument;
use super::events;
use crate::{analytics, events as app_events};
// NOTE: This is `Clone` as each incoming event is spawned onto a thread for processing.
#[derive(Clone)]
pub struct Handler {
// The following fields our currently required state as we are running in the background
// and access it as filesystem events are processed. It's still to be decided how granular it
// should be, and I can imagine having a top-level `app` handle that keeps the application state of
// the tauri app, assuming that such application would not be `Send + Sync` everywhere and thus would
// need extra protection.
users: users::Controller,
analytics: analytics::Client,
local_data_dir: path::PathBuf,
projects: projects::Controller,
vbranch_controller: virtual_branches::Controller,
assets_proxy: assets::Proxy,
sessions_db: sessions::Database,
deltas_db: deltas::Database,
/// A function to send events - decoupled from app-handle for testing purposes.
#[allow(clippy::type_complexity)]
send_event: Arc<dyn Fn(&crate::events::Event) -> Result<()> + Send + Sync + 'static>,
}
impl Handler {
pub fn from_app(app: &AppHandle) -> Result<Self, anyhow::Error> {
let app_data_dir = app
.path_resolver()
.app_data_dir()
.context("failed to get app data dir")?;
let analytics = app
.try_state::<analytics::Client>()
.map_or(analytics::Client::default(), |client| {
client.inner().clone()
});
let users = app.state::<users::Controller>().inner().clone();
let projects = app.state::<projects::Controller>().inner().clone();
let vbranches = app.state::<virtual_branches::Controller>().inner().clone();
let assets_proxy = app.state::<assets::Proxy>().inner().clone();
let sessions_db = app.state::<sessions::Database>().inner().clone();
let deltas_db = app.state::<deltas::Database>().inner().clone();
Ok(Handler::new(
app_data_dir.clone(),
analytics,
users,
projects,
vbranches,
assets_proxy,
sessions_db,
deltas_db,
{
let app = app.clone();
move |event: &crate::events::Event| event.send(&app)
},
))
}
}
impl Handler {
/// A constructor whose primary use is the test-suite.
#[allow(clippy::too_many_arguments)]
pub fn new(
local_data_dir: PathBuf,
analytics: analytics::Client,
users: users::Controller,
projects: projects::Controller,
vbranch_controller: virtual_branches::Controller,
assets_proxy: assets::Proxy,
sessions_db: sessions::Database,
deltas_db: deltas::Database,
send_event: impl Fn(&crate::events::Event) -> Result<()> + Send + Sync + 'static,
) -> Self {
Handler {
local_data_dir,
analytics,
users,
projects,
vbranch_controller,
assets_proxy,
sessions_db,
deltas_db,
send_event: Arc::new(send_event),
}
}
/// Handle the events that come in from the filesystem, or the public API.
#[instrument(skip(self, now), fields(event = %event), err(Debug))]
pub(super) async fn handle(
&self,
event: events::InternalEvent,
now: time::SystemTime,
) -> Result<()> {
match event {
events::InternalEvent::ProjectFilesChange(project_id, path) => {
self.recalculate_everything(path, project_id).await
}
events::InternalEvent::GitFilesChange(project_id, paths) => self
.git_files_change(paths, project_id)
.await
.context("failed to handle git file change event"),
events::InternalEvent::PushGitbutlerData(project_id) => self
.push_gb_data(project_id)
.context("failed to push gitbutler data"),
events::InternalEvent::FetchGitbutlerData(project_id) => self
.fetch_gb_data(project_id, now)
.await
.context("failed to fetch gitbutler data"),
events::InternalEvent::Flush(project_id, session) => self
.flush_session(project_id, &session)
.await
.context("failed to handle flush session event"),
events::InternalEvent::CalculateVirtualBranches(project_id) => self
.calculate_virtual_branches(project_id)
.await
.context("failed to handle virtual branch event"),
}
}
}
impl Handler {
fn emit_app_event(&self, event: &crate::events::Event) -> Result<()> {
(self.send_event)(event).context("failed to send event")
}
fn emit_session_file(
&self,
project_id: ProjectId,
session_id: SessionId,
file_path: &Path,
contents: Option<&reader::Content>,
) -> Result<()> {
self.emit_app_event(&app_events::Event::file(
project_id,
session_id,
&file_path.display().to_string(),
contents,
))
}
fn send_analytics_event_none_blocking(&self, event: &analytics::Event) -> Result<()> {
if let Some(user) = self.users.get_user().context("failed to get user")? {
self.analytics
.send_non_anonymous_event_nonblocking(&user, event);
}
Ok(())
}
async fn flush_session(
&self,
project_id: ProjectId,
session: &sessions::Session,
) -> Result<()> {
let project = self
.projects
.get(&project_id)
.context("failed to get project")?;
let user = self.users.get_user()?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repo = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let session = gb_repo
.flush_session(&project_repository, session, user.as_ref())
.context(format!("failed to flush session {}", session.id))?;
self.index_session(project_id, &session)?;
let push_gb_data = tokio::task::spawn_blocking({
let this = self.clone();
move || this.push_gb_data(project_id)
});
self.push_project_to_gitbutler(project_id, 1000).await?;
push_gb_data.await??;
Ok(())
}
#[instrument(skip(self, project_id))]
async fn calculate_virtual_branches(&self, project_id: ProjectId) -> Result<()> {
match self
.vbranch_controller
.list_virtual_branches(&project_id)
.await
{
Ok((branches, _, skipped_files)) => {
let branches = self.assets_proxy.proxy_virtual_branches(branches).await;
self.emit_app_event(&app_events::Event::virtual_branches(
project_id,
&VirtualBranches {
branches,
skipped_files,
},
))
}
Err(err) if err.is::<virtual_branches::errors::VerifyError>() => Ok(()),
Err(err) => Err(err.context("failed to list virtual branches").into()),
}
}
/// NOTE: this is an honest non-async function, and it should stay that way to avoid
/// dealing with git2 repositories across await points, which aren't `Send`.
fn push_gb_data(&self, project_id: ProjectId) -> Result<()> {
let user = self.users.get_user()?;
let project = self.projects.get(&project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repo = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
gb_repo
.push(user.as_ref())
.context("failed to push gb repo")
}
pub async fn fetch_gb_data(&self, project_id: ProjectId, now: time::SystemTime) -> Result<()> {
let user = self.users.get_user()?;
let project = self
.projects
.get(&project_id)
.context("failed to get project")?;
if !project.api.as_ref().map(|api| api.sync).unwrap_or_default() {
bail!("sync disabled");
}
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repo = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let sessions_before_fetch = gb_repo
.get_sessions_iterator()?
.filter_map(Result::ok)
.collect::<Vec<_>>();
let policy = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(time::Duration::from_secs(10 * 60)))
.build();
let fetch_result = backoff::retry(policy, || {
gb_repo.fetch(user.as_ref()).map_err(|err| {
match err {
gb_repository::RemoteError::Network => backoff::Error::permanent(err),
err @ gb_repository::RemoteError::Other(_) => {
tracing::warn!(%project_id, ?err, will_retry = true, "failed to fetch project data");
backoff::Error::transient(err)
}
}
})
});
let fetch_result = match fetch_result {
Ok(()) => projects::FetchResult::Fetched { timestamp: now },
Err(backoff::Error::Permanent(gb_repository::RemoteError::Network)) => {
projects::FetchResult::Error {
timestamp: now,
error: "network error".to_string(),
}
}
Err(error) => {
tracing::error!(%project_id, ?error, will_retry=false, "failed to fetch gitbutler data");
projects::FetchResult::Error {
timestamp: now,
error: error.to_string(),
}
}
};
self.projects
.update(&projects::UpdateRequest {
id: project_id,
gitbutler_data_last_fetched: Some(fetch_result),
..Default::default()
})
.await
.context("failed to update fetched result")?;
let sessions_after_fetch = gb_repo.get_sessions_iterator()?.filter_map(Result::ok);
let new_sessions = sessions_after_fetch.filter(|s| !sessions_before_fetch.contains(s));
for session in new_sessions {
self.index_session(project_id, &session)?;
}
Ok(())
}
#[instrument(skip(self, paths, project_id), fields(paths = paths.len()))]
async fn recalculate_everything(
&self,
paths: Vec<PathBuf>,
project_id: ProjectId,
) -> Result<()> {
let calc_deltas = tokio::task::spawn_blocking({
let this = self.clone();
move || this.calculate_deltas(paths, project_id)
});
self.calculate_virtual_branches(project_id).await?;
calc_deltas.await??;
Ok(())
}
pub async fn git_file_change(
&self,
path: impl Into<PathBuf>,
project_id: ProjectId,
) -> Result<()> {
self.git_files_change(vec![path.into()], project_id).await
}
pub async fn git_files_change(&self, paths: Vec<PathBuf>, project_id: ProjectId) -> Result<()> {
let project = self
.projects
.get(&project_id)
.context("failed to get project")?;
let open_projects_repository = || {
project_repository::Repository::open(&project)
.context("failed to open project repository for project")
};
for path in paths {
let Some(file_name) = path.to_str() else {
continue;
};
match file_name {
"FETCH_HEAD" => {
self.emit_app_event(&app_events::Event::git_fetch(project_id))?;
self.calculate_virtual_branches(project_id).await?;
}
"logs/HEAD" => {
self.emit_app_event(&app_events::Event::git_activity(project.id))?;
}
"GB_FLUSH" => {
let user = self.users.get_user()?;
let project_repository = open_projects_repository()?;
let gb_repo = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let gb_flush_path = project.path.join(".git/GB_FLUSH");
if gb_flush_path.exists() {
if let Err(err) = std::fs::remove_file(&gb_flush_path) {
tracing::error!(%project_id, path = %gb_flush_path.display(), "GB_FLUSH file delete error: {err}");
}
if let Some(current_session) = gb_repo
.get_current_session()
.context("failed to get current session")?
{
self.flush_session(project.id, &current_session).await?;
}
}
}
"HEAD" => {
let project_repository = open_projects_repository()?;
let head_ref = project_repository
.get_head()
.context("failed to get head")?;
let head_ref_name = head_ref.name().context("failed to get head name")?;
if head_ref_name.to_string() != "refs/heads/gitbutler/integration" {
let mut integration_reference = project_repository
.git_repository
.find_reference(&git::Refname::from(git::LocalRefname::new(
"gitbutler/integration",
None,
)))?;
integration_reference.delete()?;
}
if let Some(head) = head_ref.name() {
self.send_analytics_event_none_blocking(&analytics::Event::HeadChange {
project_id,
reference_name: head_ref_name.to_string(),
})?;
self.emit_app_event(&app_events::Event::git_head(
project_id,
&head.to_string(),
))?;
}
}
"index" => {
self.emit_app_event(&app_events::Event::git_index(project.id))?;
}
_ => {}
}
}
Ok(())
}
}

View File

@ -0,0 +1,178 @@
use anyhow::{Context, Result};
use gitbutler_core::{
deltas, gb_repository, project_repository, projects::ProjectId, reader, sessions,
};
use std::num::NonZeroUsize;
use std::path::{Path, PathBuf};
use tracing::instrument;
impl super::Handler {
#[instrument(skip(self, paths, project_id))]
pub fn calculate_deltas(&self, paths: Vec<PathBuf>, project_id: ProjectId) -> Result<()> {
let make_processor = || -> Result<_> {
let project = self
.projects
.get(&project_id)
.context("failed to get project")?;
let project_repository = project_repository::Repository::open(&project)
.with_context(|| "failed to open project repository for project")?;
let user = self.users.get_user().context("failed to get user")?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open gb repository")?;
// If current session's branch is not the same as the project's head, flush it first.
if let Some(session) = gb_repository
.get_current_session()
.context("failed to get current session")?
{
let project_head = project_repository
.get_head()
.context("failed to get head")?;
if session.meta.branch != project_head.name().map(|n| n.to_string()) {
gb_repository
.flush_session(&project_repository, &session, user.as_ref())
.context(format!("failed to flush session {}", session.id))?;
}
}
let current_session = gb_repository
.get_or_create_current_session()
.context("failed to get or create current session")?;
let session = current_session.clone();
let process = move |path: &Path| -> Result<bool> {
let _span = tracing::span!(tracing::Level::TRACE, "processing", ?path).entered();
let current_session_reader =
sessions::Reader::open(&gb_repository, &current_session)
.context("failed to get session reader")?;
let deltas_reader = deltas::Reader::new(&current_session_reader);
let writer =
deltas::Writer::new(&gb_repository).context("failed to open deltas writer")?;
let current_wd_file_content = match Self::file_content(&project_repository, path) {
Ok(content) => Some(content),
Err(reader::Error::NotFound) => None,
Err(err) => Err(err).context("failed to get file content")?,
};
let latest_file_content = match current_session_reader.file(path) {
Ok(content) => Some(content),
Err(reader::Error::NotFound) => None,
Err(err) => Err(err).context("failed to get file content")?,
};
let current_deltas = deltas_reader
.read_file(path)
.context("failed to get file deltas")?;
let mut text_doc = deltas::Document::new(
latest_file_content.as_ref(),
current_deltas.unwrap_or_default(),
)?;
let new_delta = text_doc
.update(current_wd_file_content.as_ref())
.context("failed to calculate new deltas")?;
let Some(new_delta) = new_delta else {
return Ok(false);
};
let deltas = text_doc.get_deltas();
writer
.write(path, &deltas)
.context("failed to write deltas")?;
match &current_wd_file_content {
Some(reader::Content::UTF8(text)) => writer.write_wd_file(path, text),
Some(_) => writer.write_wd_file(path, ""),
None => writer.remove_wd_file(path),
}?;
let session_id = current_session.id;
self.emit_session_file(project_id, session_id, path, latest_file_content.as_ref())?;
self.index_deltas(
project_id,
session_id,
path,
std::slice::from_ref(&new_delta),
)
.context("failed to index deltas")?;
self.emit_app_event(&crate::events::Event::deltas(
project_id,
session_id,
std::slice::from_ref(&new_delta),
path,
))?;
Ok(true)
};
Ok((process, session))
};
let num_paths = paths.len();
let num_no_delta = std::thread::scope(|scope| -> Result<usize> {
let num_threads = std::thread::available_parallelism()
.unwrap_or(NonZeroUsize::new(1).unwrap())
.get()
.min(paths.len());
let mut num_no_delta = 0;
let current_session = if num_threads < 2 {
let (process, session) = make_processor()?;
for path in paths {
if !process(path.as_path())? {
num_no_delta += 1;
}
}
session
} else {
let (threads, tx) = {
let (tx, rx) = crossbeam_channel::bounded::<PathBuf>(num_threads);
let threads: Vec<_> = (0..num_threads)
.map(|id| {
std::thread::Builder::new()
.name(format!("gitbutler_delta_thread_{id}"))
.stack_size(512 * 1024)
.spawn_scoped(scope, {
let rx = rx.clone();
|| -> Result<usize> {
let mut num_no_delta = 0;
let (process, _) = make_processor()?;
for path in rx {
if !process(path.as_path())? {
num_no_delta += 1;
}
}
Ok(num_no_delta)
}
})
.expect("worker thread can be created")
})
.collect();
(threads, tx)
};
for path in paths {
tx.send(path).expect("many receivers");
}
drop(tx);
for thread in threads {
num_no_delta += thread.join().unwrap()?;
}
let (_, session) = make_processor()?;
session
};
self.index_session(project_id, &current_session)?;
Ok(num_no_delta)
})?;
tracing::debug!(%project_id, paths_without_deltas = num_no_delta, paths_with_delta = num_paths - num_no_delta);
Ok(())
}
fn file_content(
project_repository: &project_repository::Repository,
path: &Path,
) -> Result<reader::Content, reader::Error> {
let full_path = project_repository.project().path.join(path);
if !full_path.exists() {
return Err(reader::Error::NotFound);
}
Ok(reader::Content::read_from_file(&full_path)?)
}
}

View File

@ -0,0 +1,92 @@
use std::path::Path;
use anyhow::{Context, Result};
use gitbutler_core::{
deltas, gb_repository, project_repository,
projects::ProjectId,
sessions::{self, SessionId},
};
use crate::events as app_events;
impl super::Handler {
pub(super) fn index_deltas(
&self,
project_id: ProjectId,
session_id: SessionId,
file_path: &Path,
deltas: &[deltas::Delta],
) -> Result<()> {
self.deltas_db
.insert(&project_id, &session_id, file_path, deltas)
.context("failed to insert deltas into database")
}
pub(in crate::watcher) fn reindex(&self, project_id: ProjectId) -> Result<()> {
let user = self.users.get_user()?;
let project = self.projects.get(&project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let sessions_iter = gb_repository.get_sessions_iterator()?;
for session in sessions_iter {
self.process_session(&gb_repository, &session?)?;
}
Ok(())
}
pub(super) fn index_session(
&self,
project_id: ProjectId,
session: &sessions::Session,
) -> Result<()> {
let project = self.projects.get(&project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let user = self.users.get_user()?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
self.process_session(&gb_repository, session)
}
fn process_session(
&self,
gb_repository: &gb_repository::Repository,
session: &sessions::Session,
) -> Result<()> {
let project_id = gb_repository.get_project_id();
// now, index session if it has changed to the database.
let from_db = self.sessions_db.get_by_id(&session.id)?;
if from_db.map_or(false, |from_db| from_db == *session) {
return Ok(());
}
self.sessions_db
.insert(project_id, &[session])
.context("failed to insert session into database")?;
let session_reader = sessions::Reader::open(gb_repository, session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
for (file_path, deltas) in deltas_reader
.read(None)
.context("could not list deltas for session")?
{
self.index_deltas(*project_id, session.id, &file_path, &deltas)?;
}
(self.send_event)(&app_events::Event::session(*project_id, session))?;
Ok(())
}
}

View File

@ -1,6 +1,7 @@
use std::{path, sync::Arc, time};
use std::time;
use anyhow::{Context, Result};
use gitbutler_core::id::Id;
use gitbutler_core::{
gb_repository,
git::{self, Oid, Repository},
@ -9,81 +10,32 @@ use gitbutler_core::{
users,
};
use itertools::Itertools;
use tauri::{AppHandle, Manager};
use tokio::sync::Mutex;
use super::events;
#[derive(Clone)]
pub struct Handler {
inner: Arc<Mutex<State>>,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else if let Some(app_data_dir) = value.path_resolver().app_data_dir() {
let projects = value.state::<projects::Controller>().inner().clone();
let users = value.state::<users::Controller>().inner().clone();
let handler = Handler::new(app_data_dir, projects, users, 1000);
value.manage(handler.clone());
Ok(handler)
} else {
Err(anyhow::anyhow!("failed to get app data dir"))
}
}
}
impl Handler {
pub fn new(
local_data_dir: path::PathBuf,
project_store: projects::Controller,
users: users::Controller,
impl super::Handler {
pub async fn push_project_to_gitbutler(
&self,
project_id: ProjectId,
batch_size: usize,
) -> Self {
Self {
inner: Arc::new(Mutex::new(State {
local_data_dir,
project_store,
users,
batch_size,
})),
}
}
pub async fn handle(&self, project_id: &ProjectId) -> Result<Vec<events::Event>> {
if let Ok(state) = self.inner.try_lock() {
Self::handle_inner(&state, project_id).await
} else {
Ok(vec![])
}
}
async fn handle_inner(state: &State, project_id: &ProjectId) -> Result<Vec<events::Event>> {
let project = state
.project_store
.get(project_id)
) -> Result<()> {
let project = self
.projects
.get(&project_id)
.context("failed to get project")?;
if !project.is_sync_enabled() || !project.has_code_url() {
return Ok(vec![]);
return Ok(());
}
let user = state.users.get_user()?;
let user = self.users.get_user()?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_code_last_commit = project
.gitbutler_code_push_state
.as_ref()
.map(|state| &state.id)
.copied();
let gb_repository = gb_repository::Repository::open(
&state.local_data_dir,
&self.local_data_dir,
&project_repository,
user.as_ref(),
)?;
@ -92,50 +44,73 @@ impl Handler {
.context("failed to open gb repo")?
.context("failed to get default target")?;
let target_changed = !gb_code_last_commit
.map(|id| id == default_target.sha)
.unwrap_or_default();
let target_changed = gb_code_last_commit.map_or(true, |id| id != default_target.sha);
if target_changed {
match Self::push_target(
state,
&project_repository,
&default_target,
gb_code_last_commit,
project_id,
&user,
)
.await
match self
.push_target(
&project_repository,
&default_target,
gb_code_last_commit,
project_id,
user.as_ref(),
batch_size,
)
.await
{
Ok(()) => {}
Err(project_repository::RemoteError::Network) => return Ok(vec![]),
Err(project_repository::RemoteError::Network) => return Ok(()),
Err(err) => return Err(err).context("failed to push"),
};
}
match push_all_refs(&project_repository, &user, project_id) {
Ok(()) => {}
Err(project_repository::RemoteError::Network) => return Ok(vec![]),
Err(err) => return Err(err).context("failed to push"),
};
tokio::task::spawn_blocking(move || -> Result<()> {
match push_all_refs(&project_repository, user.as_ref(), project_id) {
Ok(()) => Ok(()),
Err(project_repository::RemoteError::Network) => Ok(()),
Err(err) => Err(err).context("failed to push"),
}
})
.await??;
// make sure last push time is updated
Self::update_project(state, project_id, &default_target.sha).await?;
self.update_project(project_id, default_target.sha).await?;
Ok(())
}
}
Ok(vec![])
/// Currently required to make functionality testable without requiring a `Handler` with all of its state.
impl super::Handler {
async fn update_project(
&self,
project_id: Id<projects::Project>,
id: Oid,
) -> Result<(), project_repository::RemoteError> {
self.projects
.update(&projects::UpdateRequest {
id: project_id,
gitbutler_code_push_state: Some(CodePushState {
id,
timestamp: time::SystemTime::now(),
}),
..Default::default()
})
.await
.context("failed to update last push")?;
Ok(())
}
async fn push_target(
state: &State,
&self,
project_repository: &project_repository::Repository,
default_target: &gitbutler_core::virtual_branches::target::Target,
gb_code_last_commit: Option<Oid>,
project_id: &gitbutler_core::id::Id<projects::Project>,
user: &Option<users::User>,
project_id: Id<projects::Project>,
user: Option<&users::User>,
batch_size: usize,
) -> Result<(), project_repository::RemoteError> {
let ids = batch_rev_walk(
&project_repository.git_repository,
state.batch_size,
batch_size,
default_target.sha,
gb_code_last_commit,
)?;
@ -146,14 +121,12 @@ impl Handler {
"batches left to push",
);
let id_count = &ids.len();
let id_count = ids.len();
for (idx, id) in ids.iter().enumerate().rev() {
let refspec = format!("+{}:refs/push-tmp/{}", id, project_id);
project_repository.push_to_gitbutler_server(user.as_ref(), &[&refspec])?;
Self::update_project(state, project_id, id).await?;
project_repository.push_to_gitbutler_server(user, &[&refspec])?;
self.update_project(project_id, *id).await?;
tracing::info!(
%project_id,
@ -164,57 +137,26 @@ impl Handler {
}
project_repository.push_to_gitbutler_server(
user.as_ref(),
user,
&[&format!("+{}:refs/{}", default_target.sha, project_id)],
)?;
//TODO: remove push-tmp ref
tracing::info!(
%project_id,
"project target ref fully pushed",
);
Ok(())
}
async fn update_project(
state: &State,
project_id: &gitbutler_core::id::Id<projects::Project>,
id: &Oid,
) -> Result<(), project_repository::RemoteError> {
state
.project_store
.update(&projects::UpdateRequest {
id: *project_id,
gitbutler_code_push_state: Some(CodePushState {
id: *id,
timestamp: time::SystemTime::now(),
}),
..Default::default()
})
.await
.context("failed to update last push")?;
Ok(())
}
}
struct State {
local_data_dir: path::PathBuf,
project_store: projects::Controller,
users: users::Controller,
batch_size: usize,
}
fn push_all_refs(
project_repository: &project_repository::Repository,
user: &Option<users::User>,
project_id: &gitbutler_core::id::Id<projects::Project>,
user: Option<&users::User>,
project_id: Id<projects::Project>,
) -> Result<(), project_repository::RemoteError> {
let gb_references = collect_refs(project_repository)?;
let all_refs = gb_references
let all_refs: Vec<_> = gb_references
.iter()
.filter(|r| {
matches!(
@ -223,20 +165,16 @@ fn push_all_refs(
)
})
.map(|r| format!("+{}:{}", r, r))
.collect::<Vec<_>>();
let all_refs = all_refs.iter().map(String::as_str).collect::<Vec<_>>();
let anything_pushed =
project_repository.push_to_gitbutler_server(user.as_ref(), all_refs.as_slice())?;
.collect();
let all_refs: Vec<_> = all_refs.iter().map(String::as_str).collect();
let anything_pushed = project_repository.push_to_gitbutler_server(user, &all_refs)?;
if anything_pushed {
tracing::info!(
%project_id,
"refs pushed",
);
}
Ok(())
}
@ -261,7 +199,6 @@ fn batch_rev_walk(
revwalk
.push(from.into())
.context(format!("failed to push {}", from))?;
if let Some(oid) = until {
revwalk
.hide(oid.into())
@ -269,12 +206,13 @@ fn batch_rev_walk(
}
let mut oids = Vec::new();
oids.push(from);
let from = from.into();
for batch in &revwalk.chunks(batch_size) {
if let Some(oid) = batch.last() {
let oid = oid.context("failed to get oid")?;
if oid != from.into() {
oids.push(oid.into());
}
let Some(oid) = batch.last() else { continue };
let oid = oid.context("failed to get oid")?;
if oid != from {
oids.push(oid.into());
}
}
Ok(oids)

View File

@ -1,196 +0,0 @@
mod analytics_handler;
pub mod calculate_deltas_handler;
mod caltulate_virtual_branches_handler;
pub mod fetch_gitbutler_data;
mod filter_ignored_files;
mod flush_session;
pub mod git_file_change;
mod index_handler;
mod push_gitbutler_data;
pub mod push_project_to_gitbutler;
use std::time;
use anyhow::{Context, Result};
use tauri::{AppHandle, Manager};
use tracing::instrument;
use super::events;
use crate::events as app_events;
#[derive(Clone)]
pub struct Handler {
git_file_change_handler: git_file_change::Handler,
flush_session_handler: flush_session::Handler,
fetch_gitbutler_handler: fetch_gitbutler_data::Handler,
push_gitbutler_handler: push_gitbutler_data::Handler,
analytics_handler: analytics_handler::Handler,
index_handler: index_handler::Handler,
push_project_to_gitbutler: push_project_to_gitbutler::Handler,
calculate_vbranches_handler: caltulate_virtual_branches_handler::Handler,
calculate_deltas_handler: calculate_deltas_handler::Handler,
filter_ignored_files_handler: filter_ignored_files::Handler,
events_sender: app_events::Sender,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else {
let handler = Handler::new(
git_file_change::Handler::try_from(value)?,
flush_session::Handler::try_from(value)?,
fetch_gitbutler_data::Handler::try_from(value)?,
push_gitbutler_data::Handler::try_from(value)?,
analytics_handler::Handler::try_from(value)?,
index_handler::Handler::try_from(value)?,
push_project_to_gitbutler::Handler::try_from(value)?,
caltulate_virtual_branches_handler::Handler::try_from(value)?,
calculate_deltas_handler::Handler::try_from(value)?,
filter_ignored_files::Handler::try_from(value)?,
app_events::Sender::try_from(value)?,
);
value.manage(handler.clone());
Ok(handler)
}
}
}
impl Handler {
#[allow(clippy::too_many_arguments)]
fn new(
git_file_change_handler: git_file_change::Handler,
flush_session_handler: flush_session::Handler,
fetch_gitbutler_handler: fetch_gitbutler_data::Handler,
push_gitbutler_handler: push_gitbutler_data::Handler,
analytics_handler: analytics_handler::Handler,
index_handler: index_handler::Handler,
push_project_to_gitbutler: push_project_to_gitbutler::Handler,
calculate_vbranches_handler: caltulate_virtual_branches_handler::Handler,
calculate_deltas_handler: calculate_deltas_handler::Handler,
filter_ignored_files_handler: filter_ignored_files::Handler,
events_sender: app_events::Sender,
) -> Self {
Self {
git_file_change_handler,
flush_session_handler,
fetch_gitbutler_handler,
push_gitbutler_handler,
analytics_handler,
index_handler,
push_project_to_gitbutler,
calculate_vbranches_handler,
calculate_deltas_handler,
filter_ignored_files_handler,
events_sender,
}
}
#[instrument(skip(self), fields(event = %event), level = "debug")]
pub async fn handle(
&self,
event: &events::Event,
now: time::SystemTime,
) -> Result<Vec<events::Event>> {
match event {
events::Event::ProjectFileChange(project_id, path) => {
Ok(vec![events::Event::FilterIgnoredFiles(
*project_id,
path.clone(),
)])
}
events::Event::FilterIgnoredFiles(project_id, path) => self
.filter_ignored_files_handler
.handle(path, project_id)
.context("failed to handle filter ignored files event"),
events::Event::GitFileChange(project_id, path) => self
.git_file_change_handler
.handle(path, project_id)
.context("failed to handle git file change event"),
events::Event::PushGitbutlerData(project_id) => self
.push_gitbutler_handler
.handle(project_id)
.context("failed to push gitbutler data"),
events::Event::PushProjectToGitbutler(project_id) => self
.push_project_to_gitbutler
.handle(project_id)
.await
.context("failed to push project to gitbutler"),
events::Event::FetchGitbutlerData(project_id) => self
.fetch_gitbutler_handler
.handle(project_id, &now)
.await
.context("failed to fetch gitbutler data"),
events::Event::Flush(project_id, session) => self
.flush_session_handler
.handle(project_id, session)
.context("failed to handle flush session event"),
events::Event::SessionFile((project_id, session_id, file_path, contents)) => {
Ok(vec![events::Event::Emit(app_events::Event::file(
project_id,
session_id,
&file_path.display().to_string(),
contents.as_ref(),
))])
}
events::Event::SessionDelta((project_id, session_id, path, delta)) => {
self.index_handler
.index_deltas(project_id, session_id, path, &vec![delta.clone()])
.context("failed to index deltas")?;
Ok(vec![events::Event::Emit(app_events::Event::deltas(
project_id,
session_id,
&vec![delta.clone()],
path,
))])
}
events::Event::CalculateVirtualBranches(project_id) => self
.calculate_vbranches_handler
.handle(project_id)
.await
.context("failed to handle virtual branch event"),
events::Event::CalculateDeltas(project_id, path) => self
.calculate_deltas_handler
.handle(path, project_id)
.context(format!(
"failed to handle session processing event: {:?}",
path.display()
)),
events::Event::Emit(event) => {
self.events_sender
.send(event)
.context("failed to send event")?;
Ok(vec![])
}
events::Event::Analytics(event) => self
.analytics_handler
.handle(event)
.await
.context("failed to handle analytics event"),
events::Event::Session(project_id, session) => self
.index_handler
.index_session(project_id, session)
.context("failed to index session"),
events::Event::IndexAll(project_id) => self.index_handler.reindex(project_id),
}
}
}

View File

@ -1,45 +0,0 @@
use anyhow::{Context, Result};
use gitbutler_core::users;
use tauri::{AppHandle, Manager};
use super::events;
use crate::analytics;
#[derive(Clone)]
pub struct Handler {
users: users::Controller,
client: analytics::Client,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else {
let client = value
.try_state::<analytics::Client>()
.map_or(analytics::Client::default(), |client| {
client.inner().clone()
});
let users = value.state::<users::Controller>().inner().clone();
let handler = Handler::new(users, client);
value.manage(handler.clone());
Ok(handler)
}
}
}
impl Handler {
fn new(users: users::Controller, client: analytics::Client) -> Handler {
Handler { users, client }
}
pub async fn handle(&self, event: &analytics::Event) -> Result<Vec<events::Event>> {
if let Some(user) = self.users.get_user().context("failed to get user")? {
self.client.send(&user, event).await;
}
Ok(vec![])
}
}

View File

@ -1,182 +0,0 @@
use std::{path, vec};
use anyhow::{Context, Result};
use gitbutler_core::{
deltas, gb_repository, project_repository,
projects::{self, ProjectId},
reader, sessions, users,
};
use tauri::{AppHandle, Manager};
use super::events;
#[derive(Clone)]
pub struct Handler {
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else if let Some(app_data_dir) = value.path_resolver().app_data_dir() {
let handler = Self::new(
app_data_dir,
value.state::<projects::Controller>().inner().clone(),
value.state::<users::Controller>().inner().clone(),
);
value.manage(handler.clone());
Ok(handler)
} else {
Err(anyhow::anyhow!("failed to get app data dir"))
}
}
}
impl Handler {
fn new(
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
) -> Self {
Self {
local_data_dir,
projects,
users,
}
}
pub fn from_path<P: AsRef<std::path::Path>>(path: P) -> Self {
Self::new(
path.as_ref().to_path_buf(),
projects::Controller::from_path(&path),
users::Controller::from_path(path),
)
}
// Returns Some(file_content) or None if the file is ignored.
fn get_current_file(
project_repository: &project_repository::Repository,
path: &std::path::Path,
) -> Result<reader::Content, reader::Error> {
if project_repository.is_path_ignored(path).unwrap_or(false) {
return Err(reader::Error::NotFound);
}
let full_path = project_repository.project().path.join(path);
if !full_path.exists() {
return Err(reader::Error::NotFound);
}
Ok(reader::Content::read_from_file(&full_path)?)
}
pub fn handle<P: AsRef<std::path::Path>>(
&self,
path: P,
project_id: &ProjectId,
) -> Result<Vec<events::Event>> {
let project = self
.projects
.get(project_id)
.context("failed to get project")?;
let project_repository = project_repository::Repository::open(&project)
.with_context(|| "failed to open project repository for project")?;
let user = self.users.get_user().context("failed to get user")?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open gb repository")?;
// If current session's branch is not the same as the project's head, flush it first.
if let Some(session) = gb_repository
.get_current_session()
.context("failed to get current session")?
{
let project_head = project_repository
.get_head()
.context("failed to get head")?;
if session.meta.branch != project_head.name().map(|n| n.to_string()) {
gb_repository
.flush_session(&project_repository, &session, user.as_ref())
.context(format!("failed to flush session {}", session.id))?;
}
}
let path = path.as_ref();
let current_wd_file_content = match Self::get_current_file(&project_repository, path) {
Ok(content) => Some(content),
Err(reader::Error::NotFound) => None,
Err(err) => Err(err).context("failed to get file content")?,
};
let current_session = gb_repository
.get_or_create_current_session()
.context("failed to get or create current session")?;
let current_session_reader = sessions::Reader::open(&gb_repository, &current_session)
.context("failed to get session reader")?;
let latest_file_content = match current_session_reader.file(path) {
Ok(content) => Some(content),
Err(reader::Error::NotFound) => None,
Err(err) => Err(err).context("failed to get file content")?,
};
let deltas_reader = deltas::Reader::new(&current_session_reader);
let current_deltas = deltas_reader
.read_file(path)
.context("failed to get file deltas")?;
let mut text_doc = deltas::Document::new(
latest_file_content.as_ref(),
current_deltas.unwrap_or_default(),
)?;
let new_delta = text_doc
.update(current_wd_file_content.as_ref())
.context("failed to calculate new deltas")?;
if let Some(new_delta) = new_delta {
let deltas = text_doc.get_deltas();
let writer =
deltas::Writer::new(&gb_repository).context("failed to open deltas writer")?;
writer
.write(path, &deltas)
.context("failed to write deltas")?;
match &current_wd_file_content {
Some(reader::Content::UTF8(text)) => writer.write_wd_file(path, text),
Some(_) => writer.write_wd_file(path, ""),
None => writer.remove_wd_file(path),
}?;
Ok(vec![
events::Event::SessionFile((
*project_id,
current_session.id,
path.to_path_buf(),
latest_file_content,
)),
events::Event::Session(*project_id, current_session.clone()),
events::Event::SessionDelta((
*project_id,
current_session.id,
path.to_path_buf(),
new_delta.clone(),
)),
])
} else {
tracing::debug!(%project_id, path = %path.display(), "no new deltas, ignoring");
Ok(vec![])
}
}
}

View File

@ -1,106 +0,0 @@
use std::{sync::Arc, time::Duration};
use anyhow::Result;
use gitbutler_core::{
assets,
projects::ProjectId,
virtual_branches::{self, VirtualBranches},
};
use governor::{
clock::QuantaClock,
state::{InMemoryState, NotKeyed},
Quota, RateLimiter,
};
use tauri::{AppHandle, Manager};
use tokio::sync::Mutex;
use super::events;
use crate::events as app_events;
#[derive(Clone)]
pub struct Handler {
inner: Arc<Mutex<InnerHandler>>,
limit: Arc<RateLimiter<NotKeyed, InMemoryState, QuantaClock>>,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else {
let vbranches = value
.state::<virtual_branches::Controller>()
.inner()
.clone();
let proxy = value.state::<assets::Proxy>().inner().clone();
let inner = InnerHandler::new(vbranches, proxy);
let handler = Handler::new(inner);
value.manage(handler.clone());
Ok(handler)
}
}
}
impl Handler {
fn new(inner: InnerHandler) -> Self {
let quota = Quota::with_period(Duration::from_millis(100)).expect("valid quota");
Self {
inner: Arc::new(Mutex::new(inner)),
limit: Arc::new(RateLimiter::direct(quota)),
}
}
pub async fn handle(&self, project_id: &ProjectId) -> Result<Vec<events::Event>> {
if self.limit.check().is_err() {
Ok(vec![])
} else if let Ok(handler) = self.inner.try_lock() {
handler.handle(project_id).await
} else {
Ok(vec![])
}
}
}
struct InnerHandler {
vbranch_controller: virtual_branches::Controller,
assets_proxy: assets::Proxy,
}
impl InnerHandler {
fn new(vbranch_controller: virtual_branches::Controller, assets_proxy: assets::Proxy) -> Self {
Self {
vbranch_controller,
assets_proxy,
}
}
pub async fn handle(&self, project_id: &ProjectId) -> Result<Vec<events::Event>> {
match self
.vbranch_controller
.list_virtual_branches(project_id)
.await
{
Ok((branches, _, skipped_files)) => {
let branches = self.assets_proxy.proxy_virtual_branches(branches).await;
Ok(vec![events::Event::Emit(
app_events::Event::virtual_branches(
project_id,
&VirtualBranches {
branches,
skipped_files,
},
),
)])
}
Err(error) => {
if error.is::<virtual_branches::errors::VerifyError>() {
Ok(vec![])
} else {
Err(error.context("failed to list virtual branches").into())
}
}
}
}
}

View File

@ -1,155 +0,0 @@
use std::{path, sync::Arc, time};
use anyhow::{Context, Result};
use gitbutler_core::{gb_repository, project_repository, projects, projects::ProjectId, users};
use tauri::{AppHandle, Manager};
use tokio::sync::Mutex;
use super::events;
#[derive(Clone)]
pub struct Handler {
state: Arc<Mutex<State>>,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else if let Some(app_data_dir) = value.path_resolver().app_data_dir() {
let projects = value.state::<projects::Controller>().inner().clone();
let users = value.state::<users::Controller>().inner().clone();
let handler = Handler::new(app_data_dir, projects, users);
value.manage(handler.clone());
Ok(handler)
} else {
Err(anyhow::anyhow!("failed to get app data dir"))
}
}
}
impl Handler {
pub fn new(
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
) -> Self {
Self {
state: Arc::new(Mutex::new(State {
local_data_dir,
projects,
users,
})),
}
}
pub async fn handle(
&self,
project_id: &ProjectId,
now: &time::SystemTime,
) -> Result<Vec<events::Event>> {
if let Ok(state) = self.state.try_lock() {
Self::handle_inner(&state, project_id, now).await
} else {
Ok(vec![])
}
}
async fn handle_inner(
state: &State,
project_id: &ProjectId,
now: &time::SystemTime,
) -> Result<Vec<events::Event>> {
let user = state.users.get_user()?;
let project = state
.projects
.get(project_id)
.context("failed to get project")?;
if !project.api.as_ref().map(|api| api.sync).unwrap_or_default() {
anyhow::bail!("sync disabled");
}
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repo = gb_repository::Repository::open(
&state.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let sessions_before_fetch = gb_repo
.get_sessions_iterator()?
.filter_map(Result::ok)
.collect::<Vec<_>>();
let policy = backoff::ExponentialBackoffBuilder::new()
.with_max_elapsed_time(Some(time::Duration::from_secs(10 * 60)))
.build();
let fetch_result = match backoff::retry(policy, || {
gb_repo.fetch(user.as_ref()).map_err(|err| {
match err {
gb_repository::RemoteError::Network => backoff::Error::permanent(err),
err @ gb_repository::RemoteError::Other(_) => {
tracing::warn!(%project_id, ?err, will_retry = true, "failed to fetch project data");
backoff::Error::transient(err)
}
}
})
}) {
Ok(()) => projects::FetchResult::Fetched { timestamp: *now },
Err(backoff::Error::Permanent(gb_repository::RemoteError::Network)) => {
projects::FetchResult::Error {
timestamp: *now,
error: "network error".to_string(),
}
}
Err(error) => {
tracing::error!(%project_id, ?error, will_retry=false, "failed to fetch gitbutler data");
projects::FetchResult::Error {
timestamp: *now,
error: error.to_string(),
}
}
};
state
.projects
.update(&projects::UpdateRequest {
id: *project_id,
gitbutler_data_last_fetched: Some(fetch_result),
..Default::default()
})
.await
.context("failed to update fetched result")?;
let sessions_after_fetch = gb_repo
.get_sessions_iterator()?
.filter_map(Result::ok)
.collect::<Vec<_>>();
let new_sessions = sessions_after_fetch
.iter()
.filter(|s| !sessions_before_fetch.contains(s))
.collect::<Vec<_>>();
let events = new_sessions
.into_iter()
.cloned()
.map(|session| events::Event::Session(*project_id, session))
.collect::<Vec<_>>();
Ok(events)
}
}
struct State {
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
}

View File

@ -1,96 +0,0 @@
use std::{sync::Arc, time::Duration, vec};
use anyhow::{Context, Result};
use gitbutler_core::{
project_repository,
projects::{self, ProjectId},
};
use governor::{
clock::QuantaClock,
state::{InMemoryState, NotKeyed},
Quota, RateLimiter,
};
use tauri::{AppHandle, Manager};
use tokio::sync::Mutex;
use super::events;
#[derive(Clone)]
pub struct Handler {
inner: Arc<Mutex<InnerHandler>>,
limit: Arc<RateLimiter<NotKeyed, InMemoryState, QuantaClock>>,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else {
let projects = value.state::<projects::Controller>().inner().clone();
let inner = InnerHandler::new(projects);
let handler = Handler::new(inner);
value.manage(handler.clone());
Ok(handler)
}
}
}
impl Handler {
fn new(inner: InnerHandler) -> Self {
// There could be an application (e.g an IDE) which is constantly writing, so the threshold cant be too high
let quota = Quota::with_period(Duration::from_millis(5)).expect("valid quota");
Self {
inner: Arc::new(Mutex::new(inner)),
limit: Arc::new(RateLimiter::direct(quota)),
}
}
pub fn handle<P: AsRef<std::path::Path>>(
&self,
path: P,
project_id: &ProjectId,
) -> Result<Vec<events::Event>> {
if self.limit.check().is_err() {
Ok(vec![])
} else if let Ok(handler) = self.inner.try_lock() {
handler.handle(path, project_id)
} else {
Ok(vec![])
}
}
}
struct InnerHandler {
projects: projects::Controller,
}
impl InnerHandler {
fn new(projects: projects::Controller) -> Self {
Self { projects }
}
pub fn handle<P: AsRef<std::path::Path>>(
&self,
path: P,
project_id: &ProjectId,
) -> Result<Vec<events::Event>> {
let project = self
.projects
.get(project_id)
.context("failed to get project")?;
let project_repository = project_repository::Repository::open(&project)
.with_context(|| "failed to open project repository for project")?;
if project_repository
.is_path_ignored(path.as_ref())
.unwrap_or(false)
{
Ok(vec![])
} else {
Ok(vec![
events::Event::CalculateDeltas(*project_id, path.as_ref().to_path_buf()),
events::Event::CalculateVirtualBranches(*project_id),
])
}
}
}

View File

@ -1,106 +0,0 @@
use std::{path, sync::Arc};
use anyhow::{Context, Result};
use gitbutler_core::{
gb_repository, project_repository, projects, projects::ProjectId, sessions, users,
};
use tauri::{AppHandle, Manager};
use tokio::sync::Mutex;
use super::events;
#[derive(Clone)]
pub struct Handler {
inner: Arc<Mutex<HandlerInner>>,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else if let Some(app_data_dir) = value.path_resolver().app_data_dir() {
let projects = value.state::<projects::Controller>().inner().clone();
let users = value.state::<users::Controller>().inner().clone();
let inner = HandlerInner::new(app_data_dir, projects, users);
let handler = Handler::new(inner);
value.manage(handler.clone());
Ok(handler)
} else {
Err(anyhow::anyhow!("failed to get app data dir"))
}
}
}
impl Handler {
fn new(inner: HandlerInner) -> Handler {
Handler {
inner: Arc::new(Mutex::new(inner)),
}
}
pub fn handle(
&self,
project_id: &ProjectId,
session: &sessions::Session,
) -> Result<Vec<events::Event>> {
if let Ok(inner) = self.inner.try_lock() {
inner.handle(project_id, session)
} else {
Ok(vec![])
}
}
}
struct HandlerInner {
local_data_dir: path::PathBuf,
project_store: projects::Controller,
users: users::Controller,
}
impl HandlerInner {
fn new(
local_data_dir: path::PathBuf,
project_store: projects::Controller,
users: users::Controller,
) -> HandlerInner {
HandlerInner {
local_data_dir,
project_store,
users,
}
}
pub fn handle(
&self,
project_id: &ProjectId,
session: &sessions::Session,
) -> Result<Vec<events::Event>> {
let project = self
.project_store
.get(project_id)
.context("failed to get project")?;
let user = self.users.get_user()?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repo = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let session = gb_repo
.flush_session(&project_repository, session, user.as_ref())
.context(format!("failed to flush session {}", session.id))?;
Ok(vec![
events::Event::Session(*project_id, session),
events::Event::PushGitbutlerData(*project_id),
events::Event::PushProjectToGitbutler(*project_id),
])
}
}

View File

@ -1,134 +0,0 @@
use std::path;
use anyhow::{Context, Result};
use gitbutler_core::{
gb_repository, git, project_repository,
projects::{self, ProjectId},
users,
};
use tauri::{AppHandle, Manager};
use super::events;
use crate::{analytics, events as app_events};
#[derive(Clone)]
pub struct Handler {
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else if let Some(app_data_dir) = value.path_resolver().app_data_dir() {
let projects = value.state::<projects::Controller>().inner().clone();
let users = value.state::<users::Controller>().inner().clone();
let handler = Handler::new(app_data_dir, projects, users);
value.manage(handler.clone());
Ok(handler)
} else {
Err(anyhow::anyhow!("failed to get app data dir"))
}
}
}
impl Handler {
pub fn new(
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
) -> Self {
Self {
local_data_dir,
projects,
users,
}
}
pub fn handle<P: AsRef<std::path::Path>>(
&self,
path: P,
project_id: &ProjectId,
) -> Result<Vec<events::Event>> {
let project = self
.projects
.get(project_id)
.context("failed to get project")?;
let project_repository = project_repository::Repository::open(&project)
.context("failed to open project repository for project")?;
match path.as_ref().to_str().unwrap() {
"FETCH_HEAD" => Ok(vec![
events::Event::Emit(app_events::Event::git_fetch(&project.id)),
events::Event::CalculateVirtualBranches(*project_id),
]),
"logs/HEAD" => Ok(vec![events::Event::Emit(app_events::Event::git_activity(
&project.id,
))]),
"GB_FLUSH" => {
let user = self.users.get_user()?;
let gb_repo = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let file_path = project.path.join(".git/GB_FLUSH");
if file_path.exists() {
if let Err(e) = std::fs::remove_file(&file_path) {
tracing::error!(%project_id, path = %file_path.display(), "GB_FLUSH file delete error: {}", e);
}
if let Some(current_session) = gb_repo
.get_current_session()
.context("failed to get current session")?
{
return Ok(vec![events::Event::Flush(project.id, current_session)]);
}
}
Ok(vec![])
}
"HEAD" => {
let head_ref = project_repository
.get_head()
.context("failed to get head")?;
let head_ref_name = head_ref.name().context("failed to get head name")?;
if head_ref_name.to_string() != "refs/heads/gitbutler/integration" {
let mut integration_reference = project_repository
.git_repository
.find_reference(&git::Refname::from(git::LocalRefname::new(
"gitbutler/integration",
None,
)))?;
integration_reference.delete()?;
}
if let Some(head) = head_ref.name() {
Ok(vec![
events::Event::Analytics(analytics::Event::HeadChange {
project_id: project.id,
reference_name: head_ref_name.to_string(),
}),
events::Event::Emit(app_events::Event::git_head(
&project.id,
&head.to_string(),
)),
])
} else {
Ok(vec![])
}
}
"index" => Ok(vec![events::Event::Emit(app_events::Event::git_index(
&project.id,
))]),
_ => Ok(vec![]),
}
}
}

View File

@ -1,149 +0,0 @@
use std::path;
use anyhow::{Context, Result};
use gitbutler_core::{
deltas, gb_repository, project_repository,
projects::{self, ProjectId},
sessions::{self, SessionId},
users,
};
use tauri::{AppHandle, Manager};
use super::events;
use crate::events as app_events;
#[derive(Clone)]
pub struct Handler {
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
sessions_database: sessions::Database,
deltas_database: deltas::Database,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else if let Some(app_data_dir) = value.path_resolver().app_data_dir() {
let projects = value.state::<projects::Controller>().inner().clone();
let users = value.state::<users::Controller>().inner().clone();
let sessions_database = value.state::<sessions::Database>().inner().clone();
let deltas_database = value.state::<deltas::Database>().inner().clone();
let handler = Handler::new(
app_data_dir,
projects,
users,
sessions_database,
deltas_database,
);
value.manage(handler.clone());
Ok(handler)
} else {
Err(anyhow::anyhow!("failed to get app data dir"))
}
}
}
impl Handler {
fn new(
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
sessions_database: sessions::Database,
deltas_database: deltas::Database,
) -> Handler {
Handler {
local_data_dir,
projects,
users,
sessions_database,
deltas_database,
}
}
pub fn index_deltas(
&self,
project_id: &ProjectId,
session_id: &SessionId,
file_path: &path::Path,
deltas: &Vec<deltas::Delta>,
) -> Result<()> {
self.deltas_database
.insert(project_id, session_id, file_path, deltas)
.context("failed to insert deltas into database")?;
Ok(())
}
pub fn reindex(&self, project_id: &ProjectId) -> Result<Vec<events::Event>> {
let user = self.users.get_user()?;
let project = self.projects.get(project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
let sessions_iter = gb_repository.get_sessions_iterator()?;
let mut events = vec![];
for session in sessions_iter {
events.extend(self.process_session(&gb_repository, &session?)?);
}
Ok(events)
}
pub fn index_session(
&self,
project_id: &ProjectId,
session: &sessions::Session,
) -> Result<Vec<events::Event>> {
let user = self.users.get_user()?;
let project = self.projects.get(project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repository = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
self.process_session(&gb_repository, session)
}
fn process_session(
&self,
gb_repository: &gb_repository::Repository,
session: &sessions::Session,
) -> Result<Vec<events::Event>> {
let project_id = gb_repository.get_project_id();
// now, index session if it has changed to the database.
let from_db = self.sessions_database.get_by_id(&session.id)?;
if from_db.is_some() && from_db.unwrap() == *session {
return Ok(vec![]);
}
self.sessions_database
.insert(project_id, &[session])
.context("failed to insert session into database")?;
let session_reader = sessions::Reader::open(gb_repository, session)?;
let deltas_reader = deltas::Reader::new(&session_reader);
for (file_path, deltas) in deltas_reader
.read(None)
.context("could not list deltas for session")?
{
self.index_deltas(project_id, &session.id, &file_path, &deltas)?;
}
Ok(vec![events::Event::Emit(app_events::Event::session(
project_id, session,
))])
}
}

View File

@ -1,91 +0,0 @@
use std::{
path,
sync::{Arc, Mutex, TryLockError},
};
use anyhow::{Context, Result};
use gitbutler_core::{
gb_repository, gb_repository::RemoteError, project_repository, projects, projects::ProjectId,
users,
};
use tauri::{AppHandle, Manager};
use super::events;
#[derive(Clone)]
pub struct Handler {
inner: Arc<Mutex<HandlerInner>>,
}
impl TryFrom<&AppHandle> for Handler {
type Error = anyhow::Error;
fn try_from(value: &AppHandle) -> std::result::Result<Self, Self::Error> {
if let Some(handler) = value.try_state::<Handler>() {
Ok(handler.inner().clone())
} else if let Some(app_data_dir) = value.path_resolver().app_data_dir() {
let projects = value.state::<projects::Controller>().inner().clone();
let users = value.state::<users::Controller>().inner().clone();
let inner = HandlerInner::new(app_data_dir, projects, users);
let handler = Handler::new(inner);
value.manage(handler.clone());
Ok(handler)
} else {
Err(anyhow::anyhow!("failed to get app data dir"))
}
}
}
impl Handler {
fn new(inner: HandlerInner) -> Self {
Self {
inner: Arc::new(Mutex::new(inner)),
}
}
pub fn handle(&self, project_id: &ProjectId) -> Result<Vec<events::Event>> {
match self.inner.try_lock() {
Ok(inner) => inner.handle(project_id),
Err(TryLockError::Poisoned(_)) => Err(anyhow::anyhow!("mutex poisoned")),
Err(TryLockError::WouldBlock) => Ok(vec![]),
}
}
}
struct HandlerInner {
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
}
impl HandlerInner {
fn new(
local_data_dir: path::PathBuf,
projects: projects::Controller,
users: users::Controller,
) -> Self {
Self {
local_data_dir,
projects,
users,
}
}
pub fn handle(&self, project_id: &ProjectId) -> Result<Vec<events::Event>> {
let user = self.users.get_user()?;
let project = self.projects.get(project_id)?;
let project_repository =
project_repository::Repository::open(&project).context("failed to open repository")?;
let gb_repo = gb_repository::Repository::open(
&self.local_data_dir,
&project_repository,
user.as_ref(),
)
.context("failed to open repository")?;
match gb_repo.push(user.as_ref()) {
Ok(()) | Err(RemoteError::Network) => Ok(vec![]),
Err(err) => Err(err).context("failed to push"),
}
}
}

View File

@ -5,19 +5,43 @@ use std::{
};
use anyhow::Result;
use gitbutler_core::projects::ProjectId;
use gitbutler_core::{
deltas::{self, operations::Operation},
reader, sessions,
virtual_branches::{self, branch, VirtualBranchesHandle},
};
use gitbutler_tauri::watcher::handlers::calculate_deltas_handler::Handler;
use gitbutler_tauri::watcher;
use once_cell::sync::Lazy;
use self::branch::BranchId;
use gitbutler_testsupport::{commit_all, Case, Suite};
use crate::watcher::handler::support::Fixture;
use gitbutler_testsupport::{commit_all, Case};
static TEST_TARGET_INDEX: Lazy<AtomicUsize> = Lazy::new(|| AtomicUsize::new(0));
#[derive(Clone)]
pub struct State {
inner: watcher::Handler,
}
impl State {
pub(super) fn from_fixture(fixture: &mut Fixture) -> Self {
Self {
inner: fixture.new_handler(),
}
}
pub(super) fn calculate_delta(
&self,
path: impl Into<PathBuf>,
project_id: ProjectId,
) -> Result<()> {
self.inner.calculate_deltas(vec![path.into()], project_id)?;
Ok(())
}
}
fn new_test_target() -> virtual_branches::target::Target {
virtual_branches::target::Target {
branch: format!(
@ -78,16 +102,16 @@ fn new_test_branch() -> branch::Branch {
#[test]
fn register_existing_commited_file() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "test")]));
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "test")]));
std::fs::write(project.path.join("test.txt"), "test2")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
@ -109,16 +133,16 @@ fn register_existing_commited_file() -> Result<()> {
#[test]
fn register_must_init_current_session() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
assert!(gb_repository.get_current_session()?.is_some());
@ -127,20 +151,20 @@ fn register_must_init_current_session() -> Result<()> {
#[test]
fn register_must_not_override_current_session() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let session1 = gb_repository.get_current_session()?.unwrap();
std::fs::write(project.path.join("test.txt"), "test2")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let session2 = gb_repository.get_current_session()?.unwrap();
assert_eq!(session1.id, session2.id);
@ -150,20 +174,20 @@ fn register_must_not_override_current_session() -> Result<()> {
#[test]
fn register_binfile() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
std::fs::write(
project.path.join("test.bin"),
[0, 159, 146, 150, 159, 146, 150],
)?;
listener.handle("test.bin", &project.id)?;
listener.calculate_delta("test.bin", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
@ -182,17 +206,17 @@ fn register_binfile() -> Result<()> {
#[test]
fn register_empty_new_file() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
@ -210,17 +234,17 @@ fn register_empty_new_file() -> Result<()> {
#[test]
fn register_new_file() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
@ -242,18 +266,18 @@ fn register_new_file() -> Result<()> {
#[test]
fn register_no_changes_saved_thgoughout_flushes() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
// file change, wd and deltas are written
std::fs::write(project.path.join("test.txt"), "test")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
// make two more sessions.
gb_repository.flush(project_repository, None)?;
@ -271,16 +295,16 @@ fn register_no_changes_saved_thgoughout_flushes() -> Result<()> {
#[test]
fn register_new_file_twice() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
std::fs::write(project.path.join("test.txt"), "test")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let session = gb_repository.get_current_session()?.unwrap();
let session_reader = sessions::Reader::open(gb_repository, &session)?;
@ -298,7 +322,7 @@ fn register_new_file_twice() -> Result<()> {
);
std::fs::write(project.path.join("test.txt"), "test2")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
assert_eq!(deltas.len(), 2);
@ -322,19 +346,19 @@ fn register_new_file_twice() -> Result<()> {
#[test]
fn register_file_deleted() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
{
// write file
std::fs::write(project.path.join("test.txt"), "test")?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
}
{
@ -377,7 +401,7 @@ fn register_file_deleted() -> Result<()> {
// removing the file
std::fs::remove_file(project.path.join("test.txt"))?;
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
// deltas are recorded
let deltas = deltas_reader.read_file("test.txt")?.unwrap();
@ -401,14 +425,14 @@ fn register_file_deleted() -> Result<()> {
#[test]
fn flow_with_commits() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
let size = 10;
let relative_file_path = Path::new("one/two/test.txt");
@ -421,7 +445,7 @@ fn flow_with_commits() -> Result<()> {
)?;
commit_all(&project_repository.git_repository);
listener.handle(relative_file_path, &project.id)?;
listener.calculate_delta(relative_file_path, project.id)?;
assert!(gb_repository.flush(project_repository, None)?.is_some());
}
@ -488,14 +512,14 @@ fn flow_with_commits() -> Result<()> {
#[test]
fn flow_no_commits() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
let size = 10;
let relative_file_path = Path::new("one/two/test.txt");
@ -507,7 +531,7 @@ fn flow_no_commits() -> Result<()> {
i.to_string(),
)?;
listener.handle(relative_file_path, &project.id)?;
listener.calculate_delta(relative_file_path, project.id)?;
assert!(gb_repository.flush(project_repository, None)?.is_some());
}
@ -574,13 +598,13 @@ fn flow_no_commits() -> Result<()> {
#[test]
fn flow_signle_session() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
let size = 10_i32;
let relative_file_path = Path::new("one/two/test.txt");
@ -592,7 +616,7 @@ fn flow_signle_session() -> Result<()> {
i.to_string(),
)?;
listener.handle(relative_file_path, &project.id)?;
listener.calculate_delta(relative_file_path, project.id)?;
}
// collect all operations from sessions in the reverse order
@ -628,14 +652,14 @@ fn flow_signle_session() -> Result<()> {
#[test]
fn should_persist_branches_targets_state_between_sessions() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &suite.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "hello world")]));
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "hello world")]));
let branch_writer =
branch::Writer::new(gb_repository, VirtualBranchesHandle::new(&project.gb_dir()))?;
@ -653,7 +677,7 @@ fn should_persist_branches_targets_state_between_sessions() -> Result<()> {
target_writer.write(&vbranch1.id, &vbranch1_target)?;
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.handle("test.txt", &project.id)?;
listener.calculate_delta("test.txt", project.id)?;
let flushed_session = gb_repository.flush(project_repository, None).unwrap();
@ -693,14 +717,14 @@ fn should_persist_branches_targets_state_between_sessions() -> Result<()> {
#[test]
fn should_restore_branches_targets_state_from_head_session() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &suite.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "hello world")]));
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "hello world")]));
let branch_writer =
branch::Writer::new(gb_repository, VirtualBranchesHandle::new(&project.gb_dir()))?;
@ -718,7 +742,7 @@ fn should_restore_branches_targets_state_from_head_session() -> Result<()> {
target_writer.write(&vbranch1.id, &vbranch1_target)?;
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.handle("test.txt", &project.id).unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
let flushed_session = gb_repository.flush(project_repository, None).unwrap();
@ -764,18 +788,18 @@ mod flush_wd {
#[test]
fn should_add_new_files_to_session_wd() {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
// write a file into session
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.handle("test.txt", &project.id).unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
@ -803,7 +827,9 @@ mod flush_wd {
// write another file into session
std::fs::create_dir_all(project.path.join("one/two")).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!").unwrap();
listener.handle("one/two/test2.txt", &project.id).unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
@ -837,21 +863,23 @@ mod flush_wd {
#[test]
fn should_remove_deleted_files_from_session_wd() {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
// write a file into session
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.handle("test.txt", &project.id).unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::create_dir_all(project.path.join("one/two")).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!").unwrap();
listener.handle("one/two/test2.txt", &project.id).unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
@ -884,9 +912,11 @@ mod flush_wd {
// rm the files
std::fs::remove_file(project.path.join("test.txt")).unwrap();
listener.handle("test.txt", &project.id).unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::remove_file(project.path.join("one/two/test2.txt")).unwrap();
listener.handle("one/two/test2.txt", &project.id).unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
@ -910,21 +940,23 @@ mod flush_wd {
#[test]
fn should_update_updated_files_in_session_wd() {
let suite = Suite::default();
let mut fixture = Fixture::default();
let listener = State::from_fixture(&mut fixture);
let Case {
gb_repository,
project,
project_repository,
..
} = &suite.new_case();
let listener = Handler::from_path(suite.local_app_data());
} = &fixture.new_case();
// write a file into session
std::fs::write(project.path.join("test.txt"), "hello world!").unwrap();
listener.handle("test.txt", &project.id).unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::create_dir_all(project.path.join("one/two")).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!").unwrap();
listener.handle("one/two/test2.txt", &project.id).unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)
@ -957,10 +989,12 @@ mod flush_wd {
// update the file
std::fs::write(project.path.join("test.txt"), "hello world!2").unwrap();
listener.handle("test.txt", &project.id).unwrap();
listener.calculate_delta("test.txt", project.id).unwrap();
std::fs::write(project.path.join("one/two/test2.txt"), "hello world!2").unwrap();
listener.handle("one/two/test2.txt", &project.id).unwrap();
listener
.calculate_delta("one/two/test2.txt", project.id)
.unwrap();
let flushed_session = gb_repository
.flush(project_repository, None)

View File

@ -1,63 +1,57 @@
use std::time::SystemTime;
use gitbutler_core::projects;
use gitbutler_tauri::watcher::handlers::fetch_gitbutler_data::Handler;
use pretty_assertions::assert_eq;
use crate::watcher::handler::support::Fixture;
use crate::watcher::handler::test_remote_repository;
use gitbutler_testsupport::{Case, Suite};
use gitbutler_testsupport::Case;
#[tokio::test]
async fn fetch_success() -> anyhow::Result<()> {
let suite = Suite::default();
let Case { project, .. } = &suite.new_case();
let mut fixture = Fixture::default();
{
let handler = fixture.new_handler();
let Case { project, .. } = &fixture.new_case();
let (cloud, _tmp) = test_remote_repository()?;
let api_project = projects::ApiProject {
name: "test-sync".to_string(),
description: None,
repository_id: "123".to_string(),
git_url: cloud.path().to_str().unwrap().to_string(),
code_git_url: None,
created_at: 0_i32.to_string(),
updated_at: 0_i32.to_string(),
sync: true,
};
let (cloud, _tmp) = test_remote_repository()?;
let api_project = projects::ApiProject {
name: "test-sync".to_string(),
description: None,
repository_id: "123".to_string(),
git_url: cloud.path().to_str().unwrap().to_string(),
code_git_url: None,
created_at: 0_i32.to_string(),
updated_at: 0_i32.to_string(),
sync: true,
};
suite
.projects
.update(&projects::UpdateRequest {
id: project.id,
api: Some(api_project.clone()),
..Default::default()
})
.await?;
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
);
listener
.handle(&project.id, &SystemTime::now())
.await
.unwrap();
fixture
.projects
.update(&projects::UpdateRequest {
id: project.id,
api: Some(api_project.clone()),
..Default::default()
})
.await?;
handler
.fetch_gb_data(project.id, SystemTime::now())
.await
.unwrap();
}
assert_eq!(fixture.events().len(), 0);
Ok(())
}
#[tokio::test]
async fn fetch_fail_no_sync() {
let suite = Suite::default();
let Case { project, .. } = &suite.new_case();
let mut fixture = Fixture::default();
{
let handler = fixture.new_handler();
let Case { project, .. } = &fixture.new_case();
let res = handler.fetch_gb_data(project.id, SystemTime::now()).await;
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
);
let res = listener.handle(&project.id, &SystemTime::now()).await;
assert_eq!(&res.unwrap_err().to_string(), "sync disabled");
assert_eq!(&res.unwrap_err().to_string(), "sync disabled");
}
assert_eq!(fixture.events().len(), 0);
}

View File

@ -2,98 +2,93 @@ use std::fs;
use anyhow::Result;
use gitbutler_core::projects;
use gitbutler_tauri::watcher::{handlers, handlers::git_file_change::Handler, Event};
use gitbutler_tauri::watcher;
use pretty_assertions::assert_eq;
use gitbutler_testsupport::{Case, Suite};
use crate::watcher::handler::support::Fixture;
use gitbutler_testsupport::Case;
#[test]
fn flush_session() -> Result<()> {
let suite = Suite::default();
let Case {
project,
gb_repository,
..
} = &suite.new_case();
#[tokio::test]
async fn flush_session() -> Result<()> {
let mut fixture = Fixture::default();
{
let case = fixture.new_case();
let Case {
project,
gb_repository,
..
} = &case;
assert!(gb_repository.get_current_session()?.is_none());
create_new_session_via_new_file(project, &suite);
assert!(gb_repository.get_current_session()?.is_some());
assert!(gb_repository.get_current_session()?.is_none());
let handler = create_new_session_via_new_file(project, &mut fixture);
assert!(gb_repository.get_current_session()?.is_some());
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
);
let flush_file_path = project.path.join(".git/GB_FLUSH");
fs::write(flush_file_path.as_path(), "")?;
let flush_file_path = project.path.join(".git/GB_FLUSH");
fs::write(flush_file_path.as_path(), "")?;
let result = listener.handle("GB_FLUSH", &project.id)?;
assert_eq!(result.len(), 1);
assert!(matches!(result[0], Event::Flush(_, _)));
assert!(!flush_file_path.exists(), "flush file deleted");
handler.git_file_change("GB_FLUSH", project.id).await?;
assert!(!flush_file_path.exists(), "flush file deleted");
}
let events = fixture.events();
assert_eq!(events.len(), 4);
assert!(events[0].name().ends_with("/files"));
assert!(events[1].name().ends_with("/deltas"));
assert!(events[2].name().ends_with("/sessions"));
assert!(events[3].name().ends_with("/sessions"));
Ok(())
}
#[test]
fn do_not_flush_session_if_file_is_missing() -> Result<()> {
let suite = Suite::default();
let Case {
project,
gb_repository,
..
} = &suite.new_case();
#[tokio::test]
async fn do_not_flush_session_if_file_is_missing() -> Result<()> {
let mut fixture = Fixture::default();
{
let Case {
project,
gb_repository,
..
} = &fixture.new_case();
assert!(gb_repository.get_current_session()?.is_none());
create_new_session_via_new_file(project, &suite);
assert!(gb_repository.get_current_session()?.is_some());
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
);
let result = listener.handle("GB_FLUSH", &project.id)?;
assert_eq!(result.len(), 0);
assert!(gb_repository.get_current_session()?.is_none());
let handler = create_new_session_via_new_file(project, &mut fixture);
assert!(gb_repository.get_current_session()?.is_some());
handler.git_file_change("GB_FLUSH", project.id).await?;
}
let events = fixture.events();
assert_eq!(events.len(), 3);
assert!(events[0].name().ends_with("/files"));
assert!(events[1].name().ends_with("/deltas"));
assert!(events[2].name().ends_with("/sessions"));
Ok(())
}
fn create_new_session_via_new_file(project: &projects::Project, suite: &Suite) {
#[tokio::test]
async fn flush_deletes_flush_file_without_session_to_flush() -> Result<()> {
let mut fixture = Fixture::default();
{
let handler = fixture.new_handler();
let Case { project, .. } = &fixture.new_case();
let flush_file_path = project.path.join(".git/GB_FLUSH");
fs::write(flush_file_path.as_path(), "")?;
handler.git_file_change("GB_FLUSH", project.id).await?;
assert!(!flush_file_path.exists(), "flush file deleted");
}
assert_eq!(fixture.events().len(), 0);
Ok(())
}
fn create_new_session_via_new_file(
project: &projects::Project,
fixture: &mut Fixture,
) -> watcher::Handler {
fs::write(project.path.join("test.txt"), "test").unwrap();
let file_change_listener =
handlers::calculate_deltas_handler::Handler::from_path(suite.local_app_data());
file_change_listener
.handle("test.txt", &project.id)
let handler = fixture.new_handler();
handler
.calculate_deltas(vec!["test.txt".into()], project.id)
.unwrap();
}
#[test]
fn flush_deletes_flush_file_without_session_to_flush() -> Result<()> {
let suite = Suite::default();
let Case { project, .. } = &suite.new_case();
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
);
let flush_file_path = project.path.join(".git/GB_FLUSH");
fs::write(flush_file_path.as_path(), "")?;
let result = listener.handle("GB_FLUSH", &project.id)?;
assert_eq!(result.len(), 0);
assert!(!flush_file_path.exists(), "flush file deleted");
Ok(())
handler
}

View File

@ -1,5 +1,95 @@
use tempfile::TempDir;
mod support {
use gitbutler_core::{assets, deltas, git, sessions, virtual_branches};
use gitbutler_tauri::{analytics, watcher};
use tempfile::TempDir;
/// Like [`gitbutler_testsupport::Suite`], but with all the instances needed to build a handler
pub struct Fixture {
inner: gitbutler_testsupport::Suite,
pub sessions_db: sessions::Database,
pub deltas_db: deltas::Database,
pub vbranch_controller: virtual_branches::Controller,
pub assets_proxy: assets::Proxy,
/// Keeps events emitted from the last created handler.
events: Option<std::sync::mpsc::Receiver<gitbutler_tauri::Event>>,
/// Storage for the databases, to be dropped last.
_tmp: TempDir,
}
impl std::ops::Deref for Fixture {
type Target = gitbutler_testsupport::Suite;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl Default for Fixture {
fn default() -> Self {
let (db, tmp) = gitbutler_testsupport::test_database();
let inner = gitbutler_testsupport::Suite::default();
let sessions_db = sessions::Database::new(db.clone());
let deltas_db = deltas::Database::new(db);
let git_credentials_helper =
git::credentials::Helper::new(inner.keys.clone(), inner.users.clone(), None);
let vbranch_controller = virtual_branches::Controller::new(
inner.local_app_data().to_owned(),
inner.projects.clone(),
inner.users.clone(),
inner.keys.clone(),
git_credentials_helper,
);
let assets_proxy = assets::Proxy::new(tmp.path().to_owned());
Fixture {
inner,
sessions_db,
deltas_db,
vbranch_controller,
assets_proxy,
events: None,
_tmp: tmp,
}
}
}
impl Fixture {
/// Must be mut as handler events are collected into the fixture automatically.
///
/// Note that this only works for the most recent created handler.
pub fn new_handler(&mut self) -> watcher::Handler {
let (tx, rx) = std::sync::mpsc::channel();
self.events = Some(rx);
watcher::Handler::new(
self.local_app_data().to_owned(),
analytics::Client::default(),
self.users.clone(),
self.projects.clone(),
self.vbranch_controller.clone(),
self.assets_proxy.clone(),
self.sessions_db.clone(),
self.deltas_db.clone(),
move |event| tx.send(event.clone()).map_err(Into::into),
)
}
/// Returns the events that were emitted to the tauri app.
pub fn events(&mut self) -> Vec<gitbutler_tauri::Event> {
let Some(rx) = self.events.as_ref() else {
return Vec::new();
};
let mut out = Vec::new();
// For safety, in case the `handler` is still alive, blocking consumption.
while let Ok(event) = rx.try_recv() {
out.push(event);
}
out
}
}
}
use gitbutler_testsupport::init_opts_bare;
fn test_remote_repository() -> anyhow::Result<(git2::Repository, TempDir)> {
@ -8,7 +98,7 @@ fn test_remote_repository() -> anyhow::Result<(git2::Repository, TempDir)> {
Ok((repo_a, tmp))
}
mod calculate_delta_handler;
mod calculate_delta;
mod fetch_gitbutler_data;
mod git_file_change;
mod push_project_to_gitbutler;

View File

@ -2,10 +2,10 @@ use std::{collections::HashMap, path::PathBuf};
use anyhow::Result;
use gitbutler_core::{git, project_repository::LogUntil, projects};
use gitbutler_tauri::watcher::handlers::push_project_to_gitbutler::Handler;
use crate::watcher::handler::support::Fixture;
use crate::watcher::handler::test_remote_repository;
use gitbutler_testsupport::{virtual_branches::set_test_target, Case, Suite};
use gitbutler_testsupport::{virtual_branches::set_test_target, Case};
fn log_walk(repo: &git2::Repository, head: git::Oid) -> Vec<git::Oid> {
let mut walker = repo.revwalk().unwrap();
@ -15,8 +15,9 @@ fn log_walk(repo: &git2::Repository, head: git::Oid) -> Vec<git::Oid> {
#[tokio::test]
async fn push_error() -> Result<()> {
let suite = Suite::default();
let Case { project, .. } = &suite.new_case();
let mut fixture = Fixture::default();
let handler = fixture.new_handler();
let Case { project, .. } = &fixture.new_case();
let api_project = projects::ApiProject {
name: "test-sync".to_string(),
@ -29,7 +30,7 @@ async fn push_error() -> Result<()> {
sync: true,
};
suite
fixture
.projects
.update(&projects::UpdateRequest {
id: project.id,
@ -38,39 +39,30 @@ async fn push_error() -> Result<()> {
})
.await?;
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
100,
);
let res = listener.handle(&project.id).await;
res.unwrap_err();
let res = handler.push_project_to_gitbutler(project.id, 100).await;
let err = res.unwrap_err();
assert_eq!(err.to_string(), "failed to get default target");
Ok(())
}
#[tokio::test]
async fn push_simple() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let handler = fixture.new_handler();
let Case {
project,
gb_repository,
project_repository,
..
} = &suite.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "test")]));
suite.sign_in();
} = &fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "test")]));
fixture.sign_in();
set_test_target(gb_repository, project_repository).unwrap();
let target_id = gb_repository.default_target().unwrap().unwrap().sha;
let reference = project_repository.l(target_id, LogUntil::End).unwrap();
let (cloud_code, _tmp) = test_remote_repository()?;
let api_project = projects::ApiProject {
name: "test-sync".to_string(),
description: None,
@ -82,7 +74,7 @@ async fn push_simple() -> Result<()> {
sync: true,
};
suite
fixture
.projects
.update(&projects::UpdateRequest {
id: project.id,
@ -94,14 +86,10 @@ async fn push_simple() -> Result<()> {
cloud_code.find_commit(target_id.into()).unwrap_err();
{
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
10,
);
let res = listener.handle(&project.id).await.unwrap();
assert!(res.is_empty());
handler
.push_project_to_gitbutler(project.id, 10)
.await
.unwrap();
}
cloud_code.find_commit(target_id.into()).unwrap();
@ -111,7 +99,7 @@ async fn push_simple() -> Result<()> {
assert_eq!(reference, pushed);
assert_eq!(
suite
fixture
.projects
.get(&project.id)
.unwrap()
@ -126,16 +114,16 @@ async fn push_simple() -> Result<()> {
#[tokio::test]
async fn push_remote_ref() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let handler = fixture.new_handler();
let Case {
project,
gb_repository,
project_repository,
..
} = &suite.new_case();
suite.sign_in();
} = &fixture.new_case();
fixture.sign_in();
set_test_target(gb_repository, project_repository).unwrap();
let (cloud_code, _tmp) = test_remote_repository()?;
@ -180,7 +168,7 @@ async fn push_remote_ref() -> Result<()> {
sync: true,
};
suite
fixture
.projects
.update(&projects::UpdateRequest {
id: project.id,
@ -190,17 +178,13 @@ async fn push_remote_ref() -> Result<()> {
.await?;
{
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
10,
);
listener.handle(&project.id).await.unwrap();
handler
.push_project_to_gitbutler(project.id, 10)
.await
.unwrap();
}
cloud_code.find_commit(last_commit).unwrap();
Ok(())
}
@ -251,15 +235,16 @@ fn create_test_commits(repo: &git::Repository, commits: usize) -> git::Oid {
#[tokio::test]
async fn push_batches() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let handler = fixture.new_handler();
let Case {
project,
gb_repository,
project_repository,
..
} = &suite.new_case();
} = &fixture.new_case();
suite.sign_in();
fixture.sign_in();
{
let head: git::Oid = project_repository
@ -281,9 +266,7 @@ async fn push_batches() -> Result<()> {
set_test_target(gb_repository, project_repository).unwrap();
let target_id = gb_repository.default_target().unwrap().unwrap().sha;
let reference = project_repository.l(target_id, LogUntil::End).unwrap();
let (cloud_code, _tmp) = test_remote_repository()?;
let api_project = projects::ApiProject {
@ -297,7 +280,7 @@ async fn push_batches() -> Result<()> {
sync: true,
};
suite
fixture
.projects
.update(&projects::UpdateRequest {
id: project.id,
@ -307,13 +290,10 @@ async fn push_batches() -> Result<()> {
.await?;
{
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
2,
);
listener.handle(&project.id).await.unwrap();
handler
.push_project_to_gitbutler(project.id, 2)
.await
.unwrap();
}
cloud_code.find_commit(target_id.into()).unwrap();
@ -323,7 +303,7 @@ async fn push_batches() -> Result<()> {
assert_eq!(reference, pushed);
assert_eq!(
suite
fixture
.projects
.get(&project.id)
.unwrap()
@ -338,22 +318,20 @@ async fn push_batches() -> Result<()> {
#[tokio::test]
async fn push_again_no_change() -> Result<()> {
let suite = Suite::default();
let mut fixture = Fixture::default();
let handler = fixture.new_handler();
let Case {
project,
gb_repository,
project_repository,
..
} = &suite.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "test")]));
} = &fixture.new_case_with_files(HashMap::from([(PathBuf::from("test.txt"), "test")]));
suite.sign_in();
fixture.sign_in();
set_test_target(gb_repository, project_repository).unwrap();
let target_id = gb_repository.default_target().unwrap().unwrap().sha;
let reference = project_repository.l(target_id, LogUntil::End).unwrap();
let (cloud_code, _tmp) = test_remote_repository()?;
let api_project = projects::ApiProject {
@ -367,7 +345,7 @@ async fn push_again_no_change() -> Result<()> {
sync: true,
};
suite
fixture
.projects
.update(&projects::UpdateRequest {
id: project.id,
@ -379,14 +357,10 @@ async fn push_again_no_change() -> Result<()> {
cloud_code.find_commit(target_id.into()).unwrap_err();
{
let listener = Handler::new(
suite.local_app_data().into(),
suite.projects.clone(),
suite.users.clone(),
10,
);
let res = listener.handle(&project.id).await.unwrap();
assert!(res.is_empty());
handler
.push_project_to_gitbutler(project.id, 10)
.await
.unwrap();
}
cloud_code.find_commit(target_id.into()).unwrap();
@ -396,7 +370,7 @@ async fn push_again_no_change() -> Result<()> {
assert_eq!(reference, pushed);
assert_eq!(
suite
fixture
.projects
.get(&project.id)
.unwrap()

View File

@ -1,3 +1,4 @@
#![forbid(rust_2018_idioms)]
pub const VAR_NO_CLEANUP: &str = "GITBUTLER_TESTS_NO_CLEANUP";
mod test_project;

View File

@ -89,8 +89,7 @@ impl Suite {
}
}
pub struct Case<'a> {
suite: &'a Suite,
pub struct Case {
pub project: gitbutler_core::projects::Project,
pub project_repository: gitbutler_core::project_repository::Repository,
pub gb_repository: gitbutler_core::gb_repository::Repository,
@ -99,7 +98,7 @@ pub struct Case<'a> {
project_tmp: Option<TempDir>,
}
impl Drop for Case<'_> {
impl Drop for Case {
fn drop(&mut self) {
if let Some(tmp) = self
.project_tmp
@ -111,12 +110,12 @@ impl Drop for Case<'_> {
}
}
impl<'a> Case<'a> {
impl Case {
fn new(
suite: &'a Suite,
suite: &Suite,
project: gitbutler_core::projects::Project,
project_tmp: TempDir,
) -> Case<'a> {
) -> Case {
let project_repository = gitbutler_core::project_repository::Repository::open(&project)
.expect("failed to create project repository");
let gb_repository = gitbutler_core::gb_repository::Repository::open(
@ -128,7 +127,6 @@ impl<'a> Case<'a> {
let credentials =
gitbutler_core::git::credentials::Helper::from_path(suite.local_app_data());
Case {
suite,
project,
gb_repository,
project_repository,
@ -137,21 +135,19 @@ impl<'a> Case<'a> {
}
}
pub fn refresh(mut self) -> Self {
let project = self
.suite
pub fn refresh(mut self, suite: &Suite) -> Self {
let project = suite
.projects
.get(&self.project.id)
.expect("failed to get project");
let project_repository = gitbutler_core::project_repository::Repository::open(&project)
.expect("failed to create project repository");
let user = self.suite.users.get_user().expect("failed to get user");
let user = suite.users.get_user().expect("failed to get user");
let credentials =
gitbutler_core::git::credentials::Helper::from_path(self.suite.local_app_data());
gitbutler_core::git::credentials::Helper::from_path(suite.local_app_data());
Self {
suite: self.suite,
gb_repository: gitbutler_core::gb_repository::Repository::open(
self.suite.local_app_data(),
suite.local_app_data(),
&project_repository,
user.as_ref(),
)

View File

@ -241,7 +241,7 @@ impl TestProject {
.unwrap();
}
pub fn find_commit(&self, oid: git::Oid) -> Result<git::Commit, git::Error> {
pub fn find_commit(&self, oid: git::Oid) -> Result<git::Commit<'_>, git::Error> {
self.local_repository.find_commit(oid)
}
@ -316,7 +316,7 @@ impl TestProject {
.expect("failed to commit")
}
pub fn references(&self) -> Vec<git::Reference> {
pub fn references(&self) -> Vec<git::Reference<'_>> {
self.local_repository
.references()
.expect("failed to get references")