Merge pull request #3207 from gitbutlerapp/refactor-gitbutler-git-to-be-cli-only

refactor gitbutler-git to be cli-only and allow prompt handlers
This commit is contained in:
Josh Junon 2024-03-19 08:38:25 +01:00 committed by GitHub
commit d88ff925d9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
24 changed files with 465 additions and 2038 deletions

View File

@ -14,6 +14,7 @@ jobs:
rust: ${{ steps.filter.outputs.rust }}
gitbutler-app: ${{ steps.filter.outputs.gitbutler-app }}
gitbutler-changeset: ${{ steps.filter.outputs.gitbutler-changeset }}
gitbutler-git: ${{ steps.filter.outputs.gitbutler-git }}
steps:
- uses: actions/checkout@v4
- uses: dorny/paths-filter@v3
@ -36,6 +37,9 @@ jobs:
gitbutler-changeset:
- *rust
- 'gitbutler-changeset/**'
gitbutler-git:
- *rust
- 'gitbutler-git/**'
lint-node:
needs: changes
@ -87,7 +91,7 @@ jobs:
- uses: ./.github/actions/init-env-rust
# TODO(qix-): we have to exclude the app here for now because for some
# TODO(qix-): reason it doesn't build with the docs feature enabled.
- run: cargo doc --no-deps --all-features --document-private-items -p gitbutler-changeset
- run: cargo doc --no-deps --all-features --document-private-items -p gitbutler-changeset -p gitbutler-git
env:
RUSTDOCFLAGS: -Dwarnings
@ -139,12 +143,37 @@ jobs:
features: ${{ toJson(matrix.features) }}
action: ${{ matrix.action }}
check-gitbutler-git:
needs: [changes, rust-init]
if: ${{ needs.changes.outputs.gitbutler-git == 'true' }}
runs-on: ubuntu-latest
container:
image: ghcr.io/gitbutlerapp/ci-base-image:latest
strategy:
matrix:
action:
- test
- check
features:
- ''
- '*'
- []
- [tokio]
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/check-crate
with:
crate: gitbutler-git
features: ${{ toJson(matrix.features) }}
action: ${{ matrix.action }}
check-rust:
if: always()
needs:
- changes
- check-gitbutler-app
- check-gitbutler-changeset
- check-gitbutler-git
runs-on: ubuntu-latest
steps:
- name: Decide whether the needed jobs succeeded or failed

74
Cargo.lock generated
View File

@ -1996,6 +1996,19 @@ dependencies = [
"thiserror",
]
[[package]]
name = "gitbutler-git"
version = "0.0.0"
dependencies = [
"futures",
"nix 0.27.1",
"rand 0.8.5",
"serde",
"sysinfo",
"thiserror",
"tokio",
]
[[package]]
name = "glib"
version = "0.15.12"
@ -2970,7 +2983,7 @@ dependencies = [
"combine",
"libc",
"mach2",
"nix",
"nix 0.26.4",
"sysctl",
"thiserror",
"widestring",
@ -3042,6 +3055,18 @@ dependencies = [
"pin-utils",
]
[[package]]
name = "nix"
version = "0.27.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
dependencies = [
"bitflags 2.4.0",
"cfg-if",
"libc",
"memoffset 0.9.0",
]
[[package]]
name = "no-std-compat"
version = "0.4.1"
@ -3103,6 +3128,15 @@ dependencies = [
"walkdir",
]
[[package]]
name = "ntapi"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4"
dependencies = [
"winapi",
]
[[package]]
name = "nu-ansi-term"
version = "0.46.0"
@ -5230,6 +5264,21 @@ dependencies = [
"walkdir",
]
[[package]]
name = "sysinfo"
version = "0.30.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c385888ef380a852a16209afc8cfad22795dd8873d69c9a14d2e2088f118d18"
dependencies = [
"cfg-if",
"core-foundation-sys",
"libc",
"ntapi",
"once_cell",
"rayon",
"windows 0.52.0",
]
[[package]]
name = "system-configuration"
version = "0.5.1"
@ -6496,6 +6545,16 @@ dependencies = [
"windows-targets 0.48.5",
]
[[package]]
name = "windows"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e48a53791691ab099e5e2ad123536d0fff50652600abaf43bbf952894110d0be"
dependencies = [
"windows-core",
"windows-targets 0.52.0",
]
[[package]]
name = "windows-bindgen"
version = "0.39.0"
@ -6506,6 +6565,15 @@ dependencies = [
"windows-tokens",
]
[[package]]
name = "windows-core"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9"
dependencies = [
"windows-targets 0.52.0",
]
[[package]]
name = "windows-implement"
version = "0.39.0"
@ -6931,7 +6999,7 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2769203cd13a0c6015d515be729c526d041e9cf2c0cc478d57faee85f40c6dcd"
dependencies = [
"nix",
"nix 0.26.4",
"winapi",
]
@ -6965,7 +7033,7 @@ dependencies = [
"futures-sink",
"futures-util",
"hex",
"nix",
"nix 0.26.4",
"once_cell",
"ordered-stream",
"rand 0.8.5",

View File

@ -2,6 +2,7 @@
members = [
"gitbutler-app",
"gitbutler-changeset",
"gitbutler-git",
]
resolver = "2"

View File

@ -8,37 +8,24 @@ path = "src/lib.rs"
[[bin]]
name = "gitbutler-git-askpass"
path = "src/backend/cli/bin/askpass.rs"
required-features = ["cli"]
path = "src/cli/bin/askpass.rs"
[[bin]]
name = "gitbutler-git-setsid"
path = "src/backend/cli/bin/setsid.rs"
required-features = ["cli"]
path = "src/cli/bin/setsid.rs"
[features]
default = ["git2", "cli", "serde", "tokio"]
cli = ["dep:nix", "dep:rand", "dep:futures", "dep:sysinfo"]
git2 = ["dep:git2", "dep:dirs"]
default = ["serde", "tokio"]
serde = ["dep:serde"]
tokio = ["dep:tokio"]
[dependencies]
thiserror.workspace = true
git2 = { workspace = true, optional = true }
serde = { workspace = true, optional = true }
tokio = { workspace = true, optional = true, features = ["process", "rt", "process", "time", "io-util", "net", "fs", "sync"]}
rand = { version = "0.8.5", optional = true }
futures = { version = "0.3.30", optional = true }
sysinfo = { version = "0.30.5", optional = true }
dirs = { version = "5.0.1", optional = true }
[dev-dependencies]
git2.workspace = true # Used for tests
async-trait = "0.1.77"
russh = { version = "0.41.0-beta.4", features = ["openssl"] }
russh-keys = "0.41.0-beta.3"
tokio = { workspace = true, features = ["rt-multi-thread"] }
rand = "0.8.5"
futures = "0.3.30"
sysinfo = "0.30.5"
[target."cfg(unix)".dependencies]
nix = { version = "0.27.1", optional = true, features = ["process", "socket", "user"] }
nix = { version = "0.27.1", features = ["process", "socket", "user"] }

View File

@ -1,6 +1,2 @@
#[cfg(feature = "cli")]
pub mod cli;
// We use the libgit2 backend for tests as well.
#[cfg(any(test, feature = "git2"))]
pub mod git2;

View File

@ -1,37 +0,0 @@
//! CLI-based (fork/exec) backend implementation,
//! executing the `git` command-line tool available
//! on `$PATH`.
mod executor;
mod repository;
#[cfg(unix)]
pub use self::executor::Uid;
pub use self::{
executor::{AskpassServer, FileStat, GitExecutor, Pid, Socket},
repository::Repository,
};
#[cfg(feature = "tokio")]
pub use self::executor::tokio;
#[cfg(test)]
mod tests {
use super::*;
async fn make_repo(test_name: String) -> impl crate::Repository {
let repo_path = std::env::temp_dir()
.join("gitbutler-tests")
.join("git")
.join("cli")
.join(test_name);
let _ = std::fs::remove_dir_all(&repo_path);
std::fs::create_dir_all(&repo_path).unwrap();
Repository::open_or_init(executor::tokio::TokioExecutor, repo_path.to_str().unwrap())
.await
.unwrap()
}
crate::gitbutler_git_integration_tests!(make_repo, enable_io);
}

View File

@ -1,632 +0,0 @@
use super::executor::{AskpassServer, GitExecutor, Pid, Socket};
use crate::{Authorization, ConfigScope, RefSpec};
use futures::{select, FutureExt};
use rand::Rng;
use std::{collections::HashMap, time::Duration};
/// The number of characters in the secret used for checking
/// askpass invocations by ssh/git when connecting to our process.
const ASKPASS_SECRET_LENGTH: usize = 24;
/// Higher level errors that can occur when interacting with the CLI.
///
/// You probably don't want to use this type. Use [`Error`] instead.
#[derive(Debug, thiserror::Error)]
pub enum RepositoryError<
Eexec: std::error::Error + core::fmt::Debug + Send + Sync + 'static,
Easkpass: std::error::Error + core::fmt::Debug + Send + Sync + 'static,
Esocket: std::error::Error + core::fmt::Debug + Send + Sync + 'static,
> {
#[error("failed to execute git command: {0}")]
Exec(Eexec),
#[error("failed to create askpass server: {0}")]
AskpassServer(Easkpass),
#[error("i/o error communicating with askpass utility: {0}")]
AskpassIo(Esocket),
#[error(
"git command exited with non-zero exit code {status}: {args:?}\n\nSTDOUT:\n{stdout}\n\nSTDERR:\n{stderr}"
)]
Failed {
status: usize,
args: Vec<String>,
stdout: String,
stderr: String,
},
#[error("failed to determine path to this executable: {0}")]
NoSelfExe(std::io::Error),
#[error("askpass secret mismatch")]
AskpassSecretMismatch,
#[error("git requires authorization credentials but none were provided: prompt was {0:?}")]
NeedsAuthorization(String),
#[error("unable to determine PID of askpass peer: {0}")]
NoPid(Esocket),
#[cfg(unix)]
#[error("unable to determine UID of askpass peer: {0}")]
NoUid(Esocket),
#[error("failed to perform askpass security check; no such PID: {0}")]
NoSuchPid(Pid),
#[error("failed to perform askpass security check; device mismatch")]
AskpassDeviceMismatch,
#[error("failed to perform askpass security check; executable mismatch")]
AskpassExecutableMismatch,
}
/// Higher level errors that can occur when interacting with the CLI.
pub type Error<E> = RepositoryError<
<E as GitExecutor>::Error,
<<E as GitExecutor>::ServerHandle as AskpassServer>::Error,
<<<E as GitExecutor>::ServerHandle as AskpassServer>::SocketHandle as Socket>::Error,
>;
/// A [`crate::Repository`] implementation using the `git` CLI
/// and the given [`GitExecutor`] implementation.
pub struct Repository<E: GitExecutor> {
exec: E,
path: String,
}
impl<E: GitExecutor> Repository<E> {
/// Opens a repository using the given [`GitExecutor`].
///
/// Note that this **does not** check if the repository exists,
/// but assumes it does.
#[inline]
pub fn open_unchecked<P: AsRef<str>>(exec: E, path: P) -> Self {
Self {
exec,
path: path.as_ref().to_owned(),
}
}
/// (Re-)initializes a repository at the given path
/// using the given [`GitExecutor`].
#[cold]
pub async fn open_or_init<P: AsRef<str>>(exec: E, path: P) -> Result<Self, Error<E>> {
let path = path.as_ref().to_owned();
let args = vec!["init", "--quiet", &path];
let (exit_code, stdout, stderr) =
exec.execute(&args, None).await.map_err(Error::<E>::Exec)?;
if exit_code == 0 {
Ok(Self { exec, path })
} else {
Err(Error::<E>::Failed {
status: exit_code,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})
}
}
/// (Re-)initializes a bare repository at the given path
/// using the given [`GitExecutor`].
#[cold]
pub async fn open_or_init_bare<P: AsRef<str>>(exec: E, path: P) -> Result<Self, Error<E>> {
let path = path.as_ref().to_owned();
let args = vec!["init", "--bare", "--quiet", &path];
let (exit_code, stdout, stderr) =
exec.execute(&args, None).await.map_err(Error::<E>::Exec)?;
if exit_code == 0 {
Ok(Self { exec, path })
} else {
Err(Error::<E>::Failed {
status: exit_code,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})
}
}
#[cold]
async fn execute_with_auth_harness(
&self,
args: &[&str],
envs: Option<HashMap<String, String>>,
authorization: &Authorization,
) -> Result<(usize, String, String), Error<E>> {
let path = std::env::current_exe().map_err(Error::<E>::NoSelfExe)?;
// TODO(qix-): Get parent PID of connecting processes to make sure they're us.
//let our_pid = std::process::id();
// TODO(qix-): This is a bit of a hack. Under a test environment,
// TODO(qix-): Cargo is running a test runner with a quasi-random
// TODO(qix-): suffix. The actual executables live in the parent directory.
// TODO(qix-): Thus, we have to do this under test. It's not ideal, but
// TODO(qix-): it works for now.
#[cfg(test)]
let path = path.parent().unwrap();
let askpath_path = path
.with_file_name("gitbutler-git-askpass")
.to_string_lossy()
.into_owned();
#[cfg(not(target_os = "windows"))]
let setsid_path = path
.with_file_name("gitbutler-git-setsid")
.to_string_lossy()
.into_owned();
let askpath_stat = self
.exec
.stat(&askpath_path)
.await
.map_err(Error::<E>::Exec)?;
#[cfg(not(target_os = "windows"))]
let setsid_stat = self
.exec
.stat(&setsid_path)
.await
.map_err(Error::<E>::Exec)?;
#[allow(unsafe_code)]
let sock_server = unsafe { self.exec.create_askpass_server() }
.await
.map_err(Error::<E>::Exec)?;
// FIXME(qix-): This is probably not cryptographically secure, did this in a bit
// FIXME(qix-): of a hurry. We should probably use a proper CSPRNG here, but this
// FIXME(qix-): is probably fine for now (as this security mechanism is probably
// FIXME(qix-): overkill to begin with).
let secret = rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
.take(ASKPASS_SECRET_LENGTH)
.map(char::from)
.collect::<String>();
let mut envs = envs.unwrap_or_default();
envs.insert("GITBUTLER_ASKPASS_PIPE".into(), sock_server.to_string());
envs.insert("GITBUTLER_ASKPASS_SECRET".into(), secret.clone());
envs.insert("SSH_ASKPASS".into(), askpath_path);
// DISPLAY is required by SSH to check SSH_ASKPASS.
// Please don't ask us why, it's unclear.
if !std::env::var("DISPLAY")
.map(|v| !v.is_empty())
.unwrap_or(false)
{
envs.insert("DISPLAY".into(), ":".into());
}
envs.insert(
"GIT_SSH_COMMAND".into(),
format!(
"{}{}{} -o StrictHostKeyChecking=accept-new -o KbdInteractiveAuthentication=no{}",
{
#[cfg(not(target_os = "windows"))]
{
format!("{setsid_path} ")
}
#[cfg(target_os = "windows")]
{
""
}
},
envs.get("GIT_SSH_COMMAND").unwrap_or(&"ssh".into()),
match authorization {
Authorization::Ssh { .. } => " -o PreferredAuthentications=publickey",
Authorization::Basic { .. } => " -o PreferredAuthentications=password",
_ => "",
},
{
// In test environments, we don't want to pollute the user's known hosts file.
// So, we just use /dev/null instead.
#[cfg(test)]
{
" -o UserKnownHostsFile=/dev/null"
}
#[cfg(not(test))]
{
""
}
}
),
);
if let Authorization::Ssh {
private_key: Some(private_key),
..
} = authorization
{
envs.insert("GIT_SSH_VARIANT".into(), "ssh".into());
envs.insert("GIT_SSH_KEY".into(), private_key.clone());
}
let mut child_process = core::pin::pin! {
async {
self.exec
.execute(args, Some(envs))
.await
.map_err(Error::<E>::Exec)
}.fuse()
};
loop {
select! {
res = child_process => {
return res;
},
res = sock_server.accept(Some(Duration::from_secs(60))).fuse() => {
let mut sock = res.map_err(Error::<E>::AskpassServer)?;
// get the PID of the peer
let peer_pid = sock.pid().map_err(Error::<E>::NoPid)?;
// get the full image path of the peer id; this is pretty expensive at the moment.
// TODO(qix-): see if dropping sysinfo for a more bespoke implementation is worth it.
let mut system = sysinfo::System::new();
system.refresh_processes();
let peer_path = system
.process(sysinfo::Pid::from_u32(peer_pid.try_into().map_err(|_| Error::<E>::NoSuchPid(peer_pid))?))
.and_then(|p| p.exe().map(|exe| exe.to_string_lossy().into_owned()))
.ok_or(Error::<E>::NoSuchPid(peer_pid))?;
// stat the askpass executable that is being invoked
let peer_stat = self.exec.stat(&peer_path).await.map_err(Error::<E>::Exec)?;
if peer_stat.ino == askpath_stat.ino {
if peer_stat.dev != askpath_stat.dev {
return Err(Error::<E>::AskpassDeviceMismatch)?;
}
} else if peer_stat.ino == setsid_stat.ino {
if peer_stat.dev != setsid_stat.dev {
return Err(Error::<E>::AskpassDeviceMismatch)?;
}
} else {
return Err(Error::<E>::AskpassExecutableMismatch)?;
}
// await for peer to send secret
let peer_secret = sock.read_line().await.map_err(Error::<E>::AskpassIo)?;
// check the secret
if peer_secret.trim() != secret {
return Err(Error::<E>::AskpassSecretMismatch)?;
}
// get the prompt
let prompt = sock.read_line().await.map_err(Error::<E>::AskpassIo)?;
// TODO(qix-): The prompt matching logic here is fragile as the remote
// TODO(qix-): can customize prompts. I need to investigate if there's
// TODO(qix-): a better way to do this.
match authorization {
Authorization::Auto => {
return Err(Error::<E>::NeedsAuthorization(prompt))?;
}
Authorization::Basic{username, password} => {
if prompt.to_lowercase().contains("username:") || prompt.to_lowercase().contains("username for") {
if let Some(username) = username {
sock.write_line(username).await.map_err(Error::<E>::AskpassIo)?;
} else {
return Err(Error::<E>::NeedsAuthorization(prompt))?;
}
} else if prompt.to_lowercase().contains("password:") || prompt.to_lowercase().contains("password for") {
if let Some(password) = password {
sock.write_line(password).await.map_err(Error::<E>::AskpassIo)?;
} else {
return Err(Error::<E>::NeedsAuthorization(prompt))?;
}
} else {
return Err(Error::<E>::NeedsAuthorization(prompt))?;
}
},
Authorization::Ssh { passphrase, .. } => {
if let Some(passphrase) = passphrase {
if prompt.contains("passphrase for key") {
sock.write_line(passphrase).await.map_err(Error::<E>::AskpassIo)?;
continue;
}
}
return Err(Error::<E>::NeedsAuthorization(prompt))?;
}
}
}
}
}
}
}
impl<E: GitExecutor + 'static> crate::Repository for Repository<E> {
type Error = Error<E>;
async fn config_get(
&self,
key: &str,
scope: ConfigScope,
) -> Result<Option<String>, crate::Error<Self::Error>> {
let mut args = vec!["-C", &self.path, "config", "--get"];
// NOTE(qix-): See source comments for ConfigScope to explain
// NOTE(qix-): the `#[cfg(not(test))]` attributes.
match scope {
#[cfg(not(test))]
ConfigScope::Auto => {}
ConfigScope::Local => args.push("--local"),
#[cfg(not(test))]
ConfigScope::System => args.push("--system"),
#[cfg(not(test))]
ConfigScope::Global => args.push("--global"),
}
args.push(key);
let (exit_code, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if exit_code == 0 {
Ok(Some(stdout))
} else if exit_code != 0 && stderr.is_empty() {
Ok(None)
} else {
Err(Error::<E>::Failed {
status: exit_code,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
async fn config_set(
&self,
key: &str,
value: &str,
scope: ConfigScope,
) -> Result<(), crate::Error<Self::Error>> {
let mut args = vec!["-C", &self.path, "config", "--replace-all"];
// NOTE(qix-): See source comments for ConfigScope to explain
// NOTE(qix-): the `#[cfg(not(test))]` attributes.
match scope {
#[cfg(not(test))]
ConfigScope::Auto => {}
ConfigScope::Local => args.push("--local"),
#[cfg(not(test))]
ConfigScope::System => args.push("--system"),
#[cfg(not(test))]
ConfigScope::Global => args.push("--global"),
}
args.push(key);
args.push(value);
let (exit_code, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if exit_code == 0 {
Ok(())
} else {
Err(Error::<E>::Failed {
status: exit_code,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
async fn fetch(
&self,
remote: &str,
refspec: RefSpec,
authorization: &Authorization,
) -> Result<(), crate::Error<Self::Error>> {
let mut args = vec![
"-C",
&self.path,
"fetch",
"--quiet",
"--no-write-fetch-head",
];
let refspec = refspec.to_string();
args.push(remote);
args.push(&refspec);
let (status, stdout, stderr) = self
.execute_with_auth_harness(&args, None, authorization)
.await?;
if status == 0 {
Ok(())
} else {
// Was the ref not found?
if let Some(refname) = stderr
.lines()
.find(|line| line.to_lowercase().contains("couldn't find remote ref"))
.map(|line| line.split_whitespace().last().unwrap_or_default())
{
Err(crate::Error::RefNotFound(refname.to_owned()))?
} else if stderr.to_lowercase().contains("permission denied") {
Err(crate::Error::AuthorizationFailed(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
}))?
} else {
Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
}
async fn create_remote(
&self,
remote: &str,
uri: &str,
) -> Result<(), crate::Error<Self::Error>> {
let args = vec!["-C", &self.path, "remote", "add", remote, uri];
let (status, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if status != 0 {
Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
} else {
Ok(())
}
}
async fn create_or_update_remote(
&self,
remote: &str,
uri: &str,
) -> Result<(), crate::Error<Self::Error>> {
let created = self
.create_remote(remote, uri)
.await
.map(|_| true)
.or_else(|e| match e {
crate::Error::RemoteExists(..) => Ok(false),
e => Err(e),
})?;
if created {
return Ok(());
}
let args = vec!["-C", &self.path, "remote", "set-url", remote, uri];
let (status, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if status == 0 {
Ok(())
} else if status != 0 && stderr.to_lowercase().contains("error: no such remote") {
self.create_remote(remote, uri).await
} else {
Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
async fn remote(&self, remote: &str) -> Result<String, crate::Error<Self::Error>> {
let args = vec!["-C", &self.path, "remote", "get-url", remote];
let (status, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if status == 0 {
Ok(stdout)
} else if status != 0 && stderr.to_lowercase().contains("error: no such remote") {
Err(crate::Error::NoSuchRemote(
remote.to_owned(),
Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
},
))?
} else {
Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
async fn head(&self) -> Result<String, crate::Error<Self::Error>> {
let args = vec!["-C", &self.path, "rev-parse", "HEAD"];
let (status, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if status == 0 {
Ok(stdout.to_owned())
} else {
Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
async fn symbolic_head(&self) -> Result<String, crate::Error<Self::Error>> {
let args = vec!["-C", &self.path, "symbolic-ref", "HEAD"];
let (status, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if status != 0 {
return Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?;
}
// now we try to rev-parse it because the Git CLI will always
// return the default branch as a ref/head/... even if there
// is nothing on that branch (no history).
let refname = stdout.to_owned();
let args = vec!["-C", &self.path, "rev-parse", "--verify", &refname];
let (status, stdout, stderr) = self
.exec
.execute(&args, None)
.await
.map_err(Error::<E>::Exec)?;
if status == 0 {
Ok(refname)
} else {
Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
}

View File

@ -1,36 +0,0 @@
//! [libgit2](https://libgit2.org/) implementation of
//! the core `gitbutler-git` library traits.
//!
//! The entry point for this module is the [`Repository`] struct.
mod repository;
mod thread_resource;
#[cfg(feature = "tokio")]
pub use self::thread_resource::tokio;
pub use self::{
repository::Repository,
thread_resource::{ThreadedResource, ThreadedResourceHandle},
};
#[cfg(test)]
mod tests {
use super::*;
async fn make_repo(test_name: String) -> impl crate::Repository {
let repo_path = std::env::temp_dir()
.join("gitbutler-tests")
.join("git")
.join("git2")
.join(test_name);
let _ = std::fs::remove_dir_all(&repo_path);
std::fs::create_dir_all(&repo_path).unwrap();
Repository::<tokio::TokioThreadedResource>::open_or_init(&repo_path)
.await
.unwrap()
}
crate::gitbutler_git_integration_tests!(make_repo, disable_io);
}

View File

@ -1,347 +0,0 @@
use super::{ThreadedResource, ThreadedResourceHandle};
use crate::{Authorization, ConfigScope, RefSpec};
use std::path::{Path, PathBuf};
/// A [`crate::Repository`] implementation using the `git2` crate.
pub struct Repository<R: ThreadedResource> {
repo: R::Handle<git2::Repository>,
}
impl<R: ThreadedResource> Repository<R> {
/// Initializes a repository at the given path.
///
/// Errors if the repository is already initialized.
#[inline]
pub async fn init<P: AsRef<Path>>(path: P) -> Result<Self, git2::Error> {
let path = path.as_ref().to_path_buf();
Ok(Self {
repo: R::new(|| {
git2::Repository::init_opts(
path,
git2::RepositoryInitOptions::new().no_reinit(true),
)
})
.await?,
})
}
/// Opens a repository at the given path, or initializes it if it doesn't exist.
#[inline]
pub async fn open_or_init<P: AsRef<Path>>(path: P) -> Result<Self, git2::Error> {
let path = path.as_ref().to_path_buf();
Ok(Self {
repo: R::new(|| {
git2::Repository::init_opts(
path,
git2::RepositoryInitOptions::new().no_reinit(false),
)
})
.await?,
})
}
/// Initializes a bare repository at the given path.
///
/// Errors if the repository is already initialized.
#[inline]
pub async fn init_bare<P: AsRef<Path>>(path: P) -> Result<Self, git2::Error> {
let path = path.as_ref().to_path_buf();
Ok(Self {
repo: R::new(|| {
git2::Repository::init_opts(
path,
git2::RepositoryInitOptions::new()
.no_reinit(true)
.bare(true),
)
})
.await?,
})
}
/// Opens a repository at the given path, or initializes a new bare repository
/// if it doesn't exist.
#[inline]
pub async fn open_or_init_bare<P: AsRef<Path>>(path: P) -> Result<Self, git2::Error> {
let path = path.as_ref().to_path_buf();
Ok(Self {
repo: R::new(|| {
git2::Repository::init_opts(
path,
git2::RepositoryInitOptions::new()
.no_reinit(false)
.bare(true),
)
})
.await?,
})
}
/// Opens a repository at the given path.
/// Will error if there's no existing repository at the given path.
#[inline]
pub async fn open<P: AsRef<Path>>(path: P) -> Result<Self, git2::Error> {
let path = path.as_ref().to_path_buf();
Ok(Self {
repo: R::new(|| git2::Repository::open(path)).await?,
})
}
}
impl<R: ThreadedResource> crate::Repository for Repository<R> {
type Error = git2::Error;
async fn config_get(
&self,
key: &str,
#[cfg_attr(test, allow(unused_variables))] scope: ConfigScope,
) -> Result<Option<String>, crate::Error<Self::Error>> {
let key = key.to_owned();
self.repo
.with(move |repo| {
let config = repo.config()?;
#[cfg(test)]
let scope = ConfigScope::Local;
// NOTE(qix-): See source comments for ConfigScope to explain
// NOTE(qix-): the `#[cfg(not(test))]` attributes.
let res = match scope {
#[cfg(not(test))]
ConfigScope::Auto => config.get_string(&key),
ConfigScope::Local => config
.open_level(git2::ConfigLevel::Local)?
.get_string(&key),
#[cfg(not(test))]
ConfigScope::System => config
.open_level(git2::ConfigLevel::System)?
.get_string(&key),
#[cfg(not(test))]
ConfigScope::Global => config
.open_level(git2::ConfigLevel::Global)?
.get_string(&key),
};
Ok(res.map(Some).or_else(|e| {
if e.code() == git2::ErrorCode::NotFound {
Ok(None)
} else {
Err(e)
}
})?)
})
.await
.await
}
async fn config_set(
&self,
key: &str,
value: &str,
#[cfg_attr(test, allow(unused_variables))] scope: ConfigScope,
) -> Result<(), crate::Error<Self::Error>> {
let key = key.to_owned();
let value = value.to_owned();
self.repo
.with(move |repo| {
#[cfg_attr(test, allow(unused_mut))]
let mut config = repo.config()?;
#[cfg(test)]
let scope = ConfigScope::Local;
// NOTE(qix-): See source comments for ConfigScope to explain
// NOTE(qix-): the `#[cfg(not(test))]` attributes.
match scope {
#[cfg(not(test))]
ConfigScope::Auto => Ok(config.set_str(&key, &value)?),
ConfigScope::Local => Ok(config
.open_level(git2::ConfigLevel::Local)?
.set_str(&key, &value)?),
#[cfg(not(test))]
ConfigScope::System => Ok(config
.open_level(git2::ConfigLevel::System)?
.set_str(&key, &value)?),
#[cfg(not(test))]
ConfigScope::Global => Ok(config
.open_level(git2::ConfigLevel::Global)?
.set_str(&key, &value)?),
}
})
.await
.await
}
async fn fetch(
&self,
remote: &str,
refspec: RefSpec,
authorization: &Authorization,
) -> Result<(), crate::Error<Self::Error>> {
let remote = remote.to_owned();
let authorization = authorization.clone();
self.repo
.with(move |repo| {
let mut remote = repo.find_remote(&remote)?;
let mut callbacks = git2::RemoteCallbacks::new();
callbacks.credentials(|_url, username, _allowed| {
let auth = match &authorization {
Authorization::Auto => {
let cred = git2::Cred::default()?;
Ok(cred)
}
Authorization::Basic { username, password } => {
let username = username.as_deref().unwrap_or_default();
let password = password.as_deref().unwrap_or_default();
git2::Cred::userpass_plaintext(username, password)
}
Authorization::Ssh {
passphrase,
private_key,
} => {
let private_key =
private_key.as_ref().map(PathBuf::from).unwrap_or_else(|| {
let mut path = dirs::home_dir().unwrap();
path.push(".ssh");
path.push("id_rsa");
path
});
let username = username
.map(ToOwned::to_owned)
.unwrap_or_else(|| std::env::var("USER").unwrap_or_default());
git2::Cred::ssh_key(
&username,
None,
&private_key,
passphrase.clone().as_deref(),
)
}
};
auth
});
let mut fetch_options = git2::FetchOptions::new();
fetch_options.remote_callbacks(callbacks);
let refspec = refspec.to_string();
let r = remote.fetch(&[&refspec], Some(&mut fetch_options), None);
r.map_err(|e| {
if e.code() == git2::ErrorCode::NotFound {
crate::Error::RefNotFound(refspec)
} else {
e.into()
}
})
})
.await
.await
}
async fn create_remote(
&self,
remote: &str,
uri: &str,
) -> Result<(), crate::Error<Self::Error>> {
let remote = remote.to_owned();
let uri = uri.to_owned();
self.repo
.with(move |repo| {
repo.remote(&remote, &uri).map_err(|e| {
if e.code() == git2::ErrorCode::Exists {
crate::Error::RemoteExists(remote.to_owned(), e)
} else {
e.into()
}
})?;
Ok(())
})
.await
.await
}
async fn create_or_update_remote(
&self,
remote: &str,
uri: &str,
) -> Result<(), crate::Error<Self::Error>> {
let remote = remote.to_owned();
let uri = uri.to_owned();
self.repo
.with(move |repo| {
let r = repo
.find_remote(&remote)
.and_then(|_| repo.remote_set_url(&remote, &uri));
if let Err(e) = r {
if e.code() == git2::ErrorCode::NotFound {
repo.remote(&remote, &uri)?;
} else {
Err(e)?
}
}
Ok(())
})
.await
.await
}
async fn remote(&self, remote: &str) -> Result<String, crate::Error<Self::Error>> {
let remote = remote.to_owned();
self.repo
.with(move |repo| {
let r = repo.find_remote(&remote);
let r = match r {
Err(e) if e.code() == git2::ErrorCode::NotFound => {
return Err(crate::Error::NoSuchRemote(remote, e))?;
}
Err(e) => {
return Err(e)?;
}
Ok(r) => r,
};
let url = r.url().ok_or_else(|| {
crate::Error::NoSuchRemote(remote, git2::Error::from_str("remote has no URL"))
})?;
Ok(url.to_string())
})
.await
.await
}
async fn head(&self) -> Result<String, crate::Error<Self::Error>> {
self.repo
.with(|repo| {
// We can unwrap here because we assert that the target of the
// `.target()` call is a direct reference due to calling
// `.resolve()` immediately before it.
Ok(repo.head()?.resolve()?.target().unwrap().to_string())
})
.await
.await
}
async fn symbolic_head(&self) -> Result<String, crate::Error<Self::Error>> {
self.repo
.with(|repo| Ok(String::from_utf8_lossy(repo.head()?.name_bytes()).to_string()))
.await
.await
}
}

View File

@ -1,56 +0,0 @@
#[cfg(any(test, feature = "tokio"))]
pub mod tokio;
/// A resource that is held on an owning thread, and that can be
/// asynchronously locked and interacted with via lambda functions.
///
/// This is used to interact with `git2` resources in a thread-safe
/// manner, since `git2` is not thread-safe nor asynchronous.
pub trait ThreadedResource {
/// The type of handle returned by [`Self::new`].
type Handle<T: Unpin + Sized + 'static>: ThreadedResourceHandle<T>;
/// Creates a new resource; the function passed in will be
/// executed on the owning thread, the result of which becomes
/// the owned value that is later interacted with.
async fn new<T, F, E>(f: F) -> Result<Self::Handle<T>, E>
where
F: FnOnce() -> Result<T, E> + Send + 'static,
T: Unpin + Sized + 'static,
E: Send + 'static;
}
/// A handle to a resource that is held on an owning thread.
/// This handle can be used to asynchronously lock the resource
/// and interact with it via lambda functions.
///
/// Returned by [`ThreadedResource::new`].
pub trait ThreadedResourceHandle<T: Unpin + Sized + 'static> {
/// The type of future returned by [`Self::with`].
type WithFuture<'a, R>: std::future::Future<Output = R> + Send
where
Self: 'a,
R: Send + Unpin + 'static;
/// Locks the resource, and passes the locked value to the given
/// function, which can then interact with it. The function is
/// executed on the owning thread, and the result is returned
/// to the calling thread asynchronously.
///
/// Note that this is an async-async function - meaning, it
/// must be awaited in order to receive the future that actually
/// executes the code, which itself must also be awaited.
//
// FIXME(qix-): I think I'm too stupid to understand pinning and phantom
// FIXME(qix-): data, regardless of how many times I deep-dive into it.
// FIXME(qix-): I'm now ~48 hours (nearly straight) into this problem,
// FIXME(qix-): and I've lost a great deal of sanity trying to figure out
// FIXME(qix-): how to make this work. For now, the async-async function
// FIXME(qix-): will have to do, but I'm not happy with it. If you know
// FIXME(qix-): how to make this work, please PLEASE please send a PR.
// FIXME(qix-): I'm losing sleep and hair over this.
async fn with<F, R>(&self, f: F) -> Self::WithFuture<'_, R>
where
F: FnOnce(&mut T) -> R + Send + Unpin + 'static,
R: Send + Unpin + 'static;
}

View File

@ -1,177 +0,0 @@
//! A [Tokio](https://tokio.rs)-based implementation for [libgit2](https://libgit2.org/)
//! repository backends, allowing normally blocking libgit2 operations to be run on a
//! threadpool, asynchronously.
use futures::Future;
use std::{
pin::Pin,
sync::{atomic::AtomicBool, Arc, Barrier, Mutex as SyncMutex},
task::{Context, Poll, Waker},
thread::{JoinHandle, Thread},
};
use tokio::sync::Mutex as AsyncMutex;
/// A [`super::ThreadedResource`] implementation using Tokio.
pub struct TokioThreadedResource;
/// A [`super::ThreadedResourceHandle`] implementation using Tokio.
pub struct TokioThreadedResourceHandle<T: Unpin + Sized + 'static> {
terminate: Arc<AtomicBool>,
thread: JoinHandle<()>,
access_control_mutex: Arc<AsyncMutex<()>>,
#[allow(clippy::type_complexity)]
slot: Arc<SyncMutex<Option<(Waker, Box<dyn FnOnce(&mut T) + Send>)>>>,
}
impl super::ThreadedResource for TokioThreadedResource {
type Handle<T: Unpin + Sized + 'static> = TokioThreadedResourceHandle<T>;
async fn new<T, F, E>(f: F) -> Result<Self::Handle<T>, E>
where
F: FnOnce() -> Result<T, E> + Send + 'static,
T: Unpin + Sized + 'static,
E: Send + 'static,
{
#[allow(clippy::type_complexity)]
let slot: Arc<SyncMutex<Option<(Waker, Box<dyn FnOnce(&mut T) + Send>)>>> =
Arc::new(SyncMutex::new(None));
let maybe_error = Arc::new(SyncMutex::new(None));
let barrier = Arc::new(Barrier::new(2));
let terminate_signal = Arc::new(AtomicBool::new(false));
let thread = std::thread::spawn({
let slot = Arc::clone(&slot);
let barrier = Arc::clone(&barrier);
let maybe_error = Arc::clone(&maybe_error);
let terminate_signal = Arc::clone(&terminate_signal);
move || {
let mut v = match f() {
Ok(v) => v,
Err(e) => {
*maybe_error.lock().unwrap() = Some(e);
barrier.wait();
return;
}
};
barrier.wait();
loop {
if terminate_signal.load(std::sync::atomic::Ordering::SeqCst) {
break;
}
std::thread::park();
if terminate_signal.load(std::sync::atomic::Ordering::SeqCst) {
break;
}
if let Some((waker, fun)) = slot.lock().unwrap().take() {
fun(&mut v);
waker.wake();
} else {
break;
}
}
}
});
barrier.wait();
if let Some(e) = maybe_error.lock().unwrap().take() {
return Err(e);
}
Ok(TokioThreadedResourceHandle {
thread,
slot,
access_control_mutex: Arc::new(AsyncMutex::new(())),
terminate: terminate_signal,
})
}
}
impl<T> Drop for TokioThreadedResourceHandle<T>
where
T: Unpin + Sized + 'static,
{
fn drop(&mut self) {
self.terminate
.store(true, std::sync::atomic::Ordering::SeqCst);
self.thread.thread().unpark();
}
}
impl<T> super::ThreadedResourceHandle<T> for TokioThreadedResourceHandle<T>
where
T: Unpin + Sized + 'static,
{
type WithFuture<'a, R> = impl Future<Output = R> + Send
where
Self: 'a,
R: Send + Unpin + 'static;
async fn with<F, R>(&self, f: F) -> Self::WithFuture<'_, R>
where
F: FnOnce(&mut T) -> R + Send + Unpin + 'static,
R: Send + Unpin + 'static,
{
let guard = self.access_control_mutex.lock().await;
let result_slot = Arc::new(SyncMutex::new(Option::<R>::None));
let result_slot_clone = Arc::clone(&result_slot);
let slot = Arc::clone(&self.slot);
let boxed_f = Box::new(move |v: &mut T| {
*result_slot.lock().unwrap() = Some(f(v));
});
TokioThreadedResourceHandleFuture {
set_fun: Some(Box::new(move |waker| {
slot.lock().unwrap().replace((waker, boxed_f));
})),
result_slot: result_slot_clone,
handle: self.thread.thread(),
_access_guard: guard,
}
}
}
/// The future returned by [`TokioThreadedResourceHandle`]::with.
pub struct TokioThreadedResourceHandleFuture<'thread, R, Guard>
where
R: Send + Unpin + 'static,
Guard: Unpin,
{
set_fun: Option<Box<dyn FnOnce(Waker) + Send + Unpin + 'static>>,
result_slot: Arc<SyncMutex<Option<R>>>,
_access_guard: Guard,
handle: &'thread Thread,
}
impl<'thread, R, Guard> Future for TokioThreadedResourceHandleFuture<'thread, R, Guard>
where
R: Send + Unpin + 'static,
Guard: Unpin,
{
type Output = R;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<R> {
let this = self.as_mut().get_mut();
if let Some(set_fun) = this.set_fun.take() {
set_fun(cx.waker().clone());
this.handle.unpark();
return Poll::Pending;
}
if let Ok(mut result_slot) = this.result_slot.try_lock() {
if let Some(result) = result_slot.take() {
return Poll::Ready(result);
}
}
Poll::Pending
}
}

17
gitbutler-git/src/cli.rs Normal file
View File

@ -0,0 +1,17 @@
//! CLI-based (fork/exec) backend implementation,
//! executing the `git` command-line tool available
//! on `$PATH`.
mod executor;
mod repository;
#[cfg(unix)]
pub use self::executor::Uid;
pub use self::{
executor::{AskpassServer, FileStat, GitExecutor, Pid, Socket},
repository::fetch,
};
#[cfg(feature = "tokio")]
pub use self::executor::tokio;

View File

@ -1,4 +1,4 @@
use std::{collections::HashMap, time::Duration};
use std::{collections::HashMap, path::Path, time::Duration};
#[cfg(any(test, feature = "tokio"))]
pub mod tokio;
@ -56,9 +56,10 @@ pub unsafe trait GitExecutor {
///
/// `Err` is returned if the command could not be executed,
/// **not** if the command returned a non-zero exit code.
async fn execute_raw(
async fn execute_raw<P: AsRef<Path>>(
&self,
args: &[&str],
cwd: P,
envs: Option<HashMap<String, String>>,
) -> Result<(usize, String, String), Self::Error>;
@ -67,9 +68,10 @@ pub unsafe trait GitExecutor {
///
/// Implementers should use this method over [`Self::execute_raw`]
/// when possible.
async fn execute(
async fn execute<P: AsRef<Path>>(
&self,
args: &[&str],
cwd: P,
envs: Option<HashMap<String, String>>,
) -> Result<(usize, String, String), Self::Error> {
let mut args = args.as_ref().to_vec();
@ -86,7 +88,7 @@ pub unsafe trait GitExecutor {
envs.insert("GIT_TERMINAL_PROMPT".into(), "0".into());
envs.insert("LC_ALL".into(), "C".into()); // Force English. We need this for parsing output.
self.execute_raw(&args, Some(envs)).await
self.execute_raw(&args, cwd, Some(envs)).await
}
/// Creates a named pipe server that is compatible with

View File

@ -2,7 +2,7 @@
#[cfg(unix)]
use std::os::unix::fs::{MetadataExt, PermissionsExt};
use std::{collections::HashMap, fs::Permissions, time::Duration};
use std::{collections::HashMap, fs::Permissions, path::Path, time::Duration};
use tokio::process::Command;
/// A [`super::GitExecutor`] implementation using the `git` command-line tool
@ -14,9 +14,10 @@ unsafe impl super::GitExecutor for TokioExecutor {
type Error = std::io::Error;
type ServerHandle = TokioAskpassServer;
async fn execute_raw(
async fn execute_raw<P: AsRef<Path>>(
&self,
args: &[&str],
cwd: P,
envs: Option<HashMap<String, String>>,
) -> Result<(usize, String, String), Self::Error> {
let mut cmd = Command::new("git");
@ -37,6 +38,7 @@ unsafe impl super::GitExecutor for TokioExecutor {
cmd.kill_on_drop(true);
cmd.args(args);
cmd.current_dir(cwd);
if let Some(envs) = envs {
cmd.envs(envs);

View File

@ -0,0 +1,290 @@
use super::executor::{AskpassServer, GitExecutor, Pid, Socket};
use crate::RefSpec;
use futures::{select, FutureExt};
use rand::Rng;
use std::{collections::HashMap, path::Path, time::Duration};
/// The number of characters in the secret used for checking
/// askpass invocations by ssh/git when connecting to our process.
const ASKPASS_SECRET_LENGTH: usize = 24;
/// Higher level errors that can occur when interacting with the CLI.
///
/// You probably don't want to use this type. Use [`Error`] instead.
#[derive(Debug, thiserror::Error)]
pub enum RepositoryError<
Eexec: std::error::Error + core::fmt::Debug + Send + Sync + 'static,
Easkpass: std::error::Error + core::fmt::Debug + Send + Sync + 'static,
Esocket: std::error::Error + core::fmt::Debug + Send + Sync + 'static,
> {
#[error("failed to execute git command: {0}")]
Exec(Eexec),
#[error("failed to create askpass server: {0}")]
AskpassServer(Easkpass),
#[error("i/o error communicating with askpass utility: {0}")]
AskpassIo(Esocket),
#[error(
"git command exited with non-zero exit code {status}: {args:?}\n\nSTDOUT:\n{stdout}\n\nSTDERR:\n{stderr}"
)]
Failed {
status: usize,
args: Vec<String>,
stdout: String,
stderr: String,
},
#[error("failed to determine path to this executable: {0}")]
NoSelfExe(std::io::Error),
#[error("askpass secret mismatch")]
AskpassSecretMismatch,
#[error("git requires authorization credentials but none were provided: prompt was {0:?}")]
NeedsAuthorization(String),
#[error("unable to determine PID of askpass peer: {0}")]
NoPid(Esocket),
#[cfg(unix)]
#[error("unable to determine UID of askpass peer: {0}")]
NoUid(Esocket),
#[error("failed to perform askpass security check; no such PID: {0}")]
NoSuchPid(Pid),
#[error("failed to perform askpass security check; device mismatch")]
AskpassDeviceMismatch,
#[error("failed to perform askpass security check; executable mismatch")]
AskpassExecutableMismatch,
}
/// Higher level errors that can occur when interacting with the CLI.
pub type Error<E> = RepositoryError<
<E as GitExecutor>::Error,
<<E as GitExecutor>::ServerHandle as AskpassServer>::Error,
<<<E as GitExecutor>::ServerHandle as AskpassServer>::SocketHandle as Socket>::Error,
>;
#[cold]
async fn execute_with_auth_harness<P, F, Fut, E>(
repo_path: P,
executor: E,
args: &[&str],
envs: Option<HashMap<String, String>>,
on_prompt: F,
) -> Result<(usize, String, String), Error<E>>
where
P: AsRef<Path>,
E: GitExecutor,
F: Fn(&str) -> Fut,
Fut: std::future::Future<Output = Option<String>>,
{
let path = std::env::current_exe().map_err(Error::<E>::NoSelfExe)?;
// TODO(qix-): Get parent PID of connecting processes to make sure they're us.
//let our_pid = std::process::id();
// TODO(qix-): This is a bit of a hack. Under a test environment,
// TODO(qix-): Cargo is running a test runner with a quasi-random
// TODO(qix-): suffix. The actual executables live in the parent directory.
// TODO(qix-): Thus, we have to do this under test. It's not ideal, but
// TODO(qix-): it works for now.
#[cfg(test)]
let path = path.parent().unwrap();
let askpath_path = path
.with_file_name("gitbutler-git-askpass")
.to_string_lossy()
.into_owned();
#[cfg(not(target_os = "windows"))]
let setsid_path = path
.with_file_name("gitbutler-git-setsid")
.to_string_lossy()
.into_owned();
let askpath_stat = executor
.stat(&askpath_path)
.await
.map_err(Error::<E>::Exec)?;
#[cfg(not(target_os = "windows"))]
let setsid_stat = executor
.stat(&setsid_path)
.await
.map_err(Error::<E>::Exec)?;
#[allow(unsafe_code)]
let sock_server = unsafe { executor.create_askpass_server() }
.await
.map_err(Error::<E>::Exec)?;
// FIXME(qix-): This is probably not cryptographically secure, did this in a bit
// FIXME(qix-): of a hurry. We should probably use a proper CSPRNG here, but this
// FIXME(qix-): is probably fine for now (as this security mechanism is probably
// FIXME(qix-): overkill to begin with).
let secret = rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
.take(ASKPASS_SECRET_LENGTH)
.map(char::from)
.collect::<String>();
let mut envs = envs.unwrap_or_default();
envs.insert("GITBUTLER_ASKPASS_PIPE".into(), sock_server.to_string());
envs.insert("GITBUTLER_ASKPASS_SECRET".into(), secret.clone());
envs.insert("SSH_ASKPASS".into(), askpath_path);
// DISPLAY is required by SSH to check SSH_ASKPASS.
// Please don't ask us why, it's unclear.
if !std::env::var("DISPLAY")
.map(|v| !v.is_empty())
.unwrap_or(false)
{
envs.insert("DISPLAY".into(), ":".into());
}
envs.insert(
"GIT_SSH_COMMAND".into(),
format!(
"{}{} -o StrictHostKeyChecking=accept-new -o KbdInteractiveAuthentication=no{}",
{
#[cfg(not(target_os = "windows"))]
{
format!("{setsid_path} ")
}
#[cfg(target_os = "windows")]
{
""
}
},
envs.get("GIT_SSH_COMMAND").unwrap_or(&"ssh".into()),
{
// In test environments, we don't want to pollute the user's known hosts file.
// So, we just use /dev/null instead.
#[cfg(test)]
{
" -o UserKnownHostsFile=/dev/null"
}
#[cfg(not(test))]
{
""
}
}
),
);
let mut child_process = core::pin::pin! {
async {
executor
.execute(args, repo_path, Some(envs))
.await
.map_err(Error::<E>::Exec)
}.fuse()
};
loop {
select! {
res = child_process => {
return res;
},
res = sock_server.accept(Some(Duration::from_secs(60))).fuse() => {
let mut sock = res.map_err(Error::<E>::AskpassServer)?;
// get the PID of the peer
let peer_pid = sock.pid().map_err(Error::<E>::NoPid)?;
// get the full image path of the peer id; this is pretty expensive at the moment.
// TODO(qix-): see if dropping sysinfo for a more bespoke implementation is worth it.
let mut system = sysinfo::System::new();
system.refresh_processes();
let peer_path = system
.process(sysinfo::Pid::from_u32(peer_pid.try_into().map_err(|_| Error::<E>::NoSuchPid(peer_pid))?))
.and_then(|p| p.exe().map(|exe| exe.to_string_lossy().into_owned()))
.ok_or(Error::<E>::NoSuchPid(peer_pid))?;
// stat the askpass executable that is being invoked
let peer_stat = executor.stat(&peer_path).await.map_err(Error::<E>::Exec)?;
if peer_stat.ino == askpath_stat.ino {
if peer_stat.dev != askpath_stat.dev {
return Err(Error::<E>::AskpassDeviceMismatch)?;
}
} else if peer_stat.ino == setsid_stat.ino {
if peer_stat.dev != setsid_stat.dev {
return Err(Error::<E>::AskpassDeviceMismatch)?;
}
} else {
return Err(Error::<E>::AskpassExecutableMismatch)?;
}
// await for peer to send secret
let peer_secret = sock.read_line().await.map_err(Error::<E>::AskpassIo)?;
// check the secret
if peer_secret.trim() != secret {
return Err(Error::<E>::AskpassSecretMismatch)?;
}
// get the prompt
let prompt = sock.read_line().await.map_err(Error::<E>::AskpassIo)?;
// call the prompt handler
let response = on_prompt(&prompt).await;
if let Some(response) = response {
sock.write_line(&response).await.map_err(Error::<E>::AskpassIo)?;
} else {
return Err(Error::<E>::NeedsAuthorization(prompt));
}
}
}
}
}
/// Fetches the given refspec from the given remote in the repository
/// at the given path. Any prompts for the user are passed to the asynchronous
/// callback `on_promp` which should return the user's response or `None` if the
/// operation should be aborted, in which case an `Err` value is returned from this
/// function.
pub async fn fetch<P, F, Fut, E>(
repo_path: P,
executor: E,
remote: &str,
refspec: RefSpec,
on_prompt: F,
) -> Result<(), crate::Error<Error<E>>>
where
P: AsRef<Path>,
E: GitExecutor,
F: Fn(&str) -> Fut,
Fut: std::future::Future<Output = Option<String>>,
{
let mut args = vec!["fetch", "--quiet", "--no-write-fetch-head"];
let refspec = refspec.to_string();
args.push(remote);
args.push(&refspec);
let (status, stdout, stderr) =
execute_with_auth_harness(repo_path, executor, &args, None, on_prompt).await?;
if status == 0 {
Ok(())
} else {
// Was the ref not found?
if let Some(refname) = stderr
.lines()
.find(|line| line.to_lowercase().contains("couldn't find remote ref"))
.map(|line| line.split_whitespace().last().unwrap_or_default())
{
Err(crate::Error::RefNotFound(refname.to_owned()))?
} else if stderr.to_lowercase().contains("permission denied") {
Err(crate::Error::AuthorizationFailed(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
}))?
} else {
Err(Error::<E>::Failed {
status,
args: args.into_iter().map(Into::into).collect(),
stdout,
stderr,
})?
}
}
}

View File

@ -0,0 +1,30 @@
/// A backend-agnostic operation error.
#[derive(Debug, thiserror::Error)]
pub enum Error<BE: std::error::Error + core::fmt::Debug + Send + Sync + 'static> {
/// An otherwise backend-specific error that occurred and was not
/// directly related to the inputs or repository state related to
/// the operation, and instead occurred as a result of the backend
/// executing the operation itself.
#[error("backend error: {0}")]
Backend(#[from] BE),
/// The given refspec was not found.
/// Usually returned by a push or fetch operation.
#[error("a ref-spec was not found: {0}")]
RefNotFound(String),
/// An authorized operation was attempted, but the authorization
/// credentials were rejected by the remote (or further credentials
/// were required).
///
/// The inner error is the backend-specific error that may provide
/// more context.
#[error("authorization failed: {0}")]
AuthorizationFailed(BE),
/// An operation interacting with a remote by name failed to find
/// the remote.
#[error("no such remote: {0}")]
NoSuchRemote(String, #[source] BE),
/// An operation that expected a remote not to exist found that
/// the remote already existed.
#[error("remote already exists: {0}")]
RemoteExists(String, #[source] BE),
}

View File

@ -1,137 +0,0 @@
pub(crate) mod private;
/// To use in a backend, create a function that initializes
/// an empty repository, whatever that looks like, and returns
/// something that implements the `Repository` trait.
///
/// Include this file via
/// `include!(concat!(env!("CARGO_MANIFEST_DIR"), "/integration-tests.rs"));`
///
/// Then, pass the function to `gitbutler_git_integration_tests!(fn)`, like so:
///
/// ```
/// #[cfg(test)]
/// mod tests {
/// async fn make_repo(test_name: String) -> impl crate::Repository {
/// // Use `test_name` to create a unique repository, if needed.
/// todo!();
/// }
///
/// crate::gitbutler_git_integration_tests!(make_repo);
/// }
/// ```
#[allow(unused_macros)]
macro_rules! gitbutler_git_integration_tests {
($create_repo:expr, $io_tests:tt) => {
$crate::private::test_impl! {
$create_repo, enable_io,
async fn create_repo_selftest(repo) {
// Do-nothing, just a selftest.
}
async fn non_existent_remote(repo) {
use crate::*;
match repo.remote("non-existent").await.unwrap_err() {
Error::NoSuchRemote(remote, _) => assert_eq!(remote, "non-existent"),
err => panic!("expected NoSuchRemote, got {:?}", err),
}
}
async fn create_remote(repo) {
use crate::*;
match repo.remote("origin").await {
Err($crate::Error::NoSuchRemote(remote, _)) if remote == "origin" => {},
result => panic!("expected remote 'origin' query to fail with NoSuchRemote, but got {result:?}")
}
repo.create_remote("origin", "https://example.com/test.git").await.unwrap();
assert_eq!(repo.remote("origin").await.unwrap(), "https://example.com/test.git".to_owned());
}
async fn get_head_no_commits(repo) {
use crate::*;
assert!(repo.head().await.is_err());
}
async fn get_symbolic_head_no_commits(repo) {
use crate::*;
assert!(repo.symbolic_head().await.is_err());
}
// DO NOT ADD IO TESTS HERE. THIS IS THE WRONG SPOT.
}
$crate::private::test_impl! {
$create_repo, $io_tests,
async fn fetch_with_ssh_basic_bad_password(repo, server, server_repo) {
use crate::*;
server.allow_authorization(Authorization::Basic {
username: Some("my_username".to_owned()),
password: Some("my_password".to_owned())
});
server.run_with_server(async move |port| {
repo.create_remote("origin", &format!("[my_username@localhost:{port}]:test.git")).await.unwrap();
let err = repo.fetch(
"origin",
RefSpec{
source: Some("refs/heads/master".to_owned()),
destination: Some("refs/heads/master".to_owned()),
..Default::default()
},
&Authorization::Basic {
username: Some("my_username".to_owned()),
password: Some("wrong_password".to_owned()),
}
).await.unwrap_err();
match err {
Error::AuthorizationFailed(_) => {},
_ => panic!("expected AuthorizationFailed, got {:?}", err),
}
}).await
}
async fn fetch_with_ssh_basic_no_master(repo, server, server_repo) {
use crate::*;
let auth = Authorization::Basic {
username: Some("my_username".to_owned()),
password: Some("my_password".to_owned()),
};
server.allow_authorization(auth.clone());
server.run_with_server(async move |port| {
repo.create_remote("origin", &format!("[my_username@localhost:{port}]:test.git")).await.unwrap();
let err = repo.fetch(
"origin",
RefSpec{
source: Some("refs/heads/master".to_owned()),
destination: Some("refs/heads/master".to_owned()),
..Default::default()
},
&auth
).await.unwrap_err();
if let Error::RefNotFound(refname) = err {
assert_eq!(refname, "refs/heads/master");
} else {
panic!("expected RefNotFound, got {:?}", err);
}
}).await
}
// DO NOT ADD NON-IO TESTS HERE. THIS IS THE WRONG SPOT.
}
};
}
#[allow(unused_imports)]
pub(crate) use gitbutler_git_integration_tests;

View File

@ -1,362 +0,0 @@
use futures::FutureExt;
use russh::{server, Channel, ChannelId, MethodSet, Pty};
use std::{collections::HashMap, process::Stdio, sync::Arc};
use tokio::net::TcpListener;
#[derive(Debug)]
pub(crate) struct TestSshServer {
repo_path: String,
allowed_auths: Vec<crate::Authorization>,
}
impl TestSshServer {
pub fn new(repo_path: String) -> Self {
Self {
repo_path,
allowed_auths: Vec::new(),
}
}
pub async fn run_with_server<F, FN>(self, cb: FN)
where
FN: FnOnce(u16) -> F,
F: std::future::Future<Output = ()> + 'static,
{
// We manually set up a TcpListener here so that we can
// bind to a random port and retrieve it.
let listener = TcpListener::bind(("127.0.0.1", 0)).await.unwrap();
let addr = listener.local_addr().unwrap();
let port = addr.port();
let config = Arc::new(russh::server::Config {
inactivity_timeout: Some(std::time::Duration::from_secs(10)),
auth_rejection_time: std::time::Duration::from_secs(3),
auth_rejection_time_initial: Some(std::time::Duration::from_secs(0)),
keys: vec![russh_keys::key::KeyPair::generate_ed25519().unwrap()],
..Default::default()
});
let socket_future = russh::server::run_on_socket(config, &listener, self);
futures::select! {
_ = cb(port).fuse() => {},
_ = socket_future.fuse() => {
panic!("server exited prematurely");
},
}
}
#[allow(unused)]
pub fn allow_authorization(&mut self, auth: crate::Authorization) {
self.allowed_auths.push(auth);
}
}
impl server::Server for TestSshServer {
type Handler = TestSshClient;
fn new_client(&mut self, _: Option<std::net::SocketAddr>) -> Self::Handler {
TestSshClient {
repo_path: self.repo_path.clone(),
channels: HashMap::new(),
allowed_auths: self.allowed_auths.clone(),
}
}
}
#[derive(Debug)]
pub(crate) struct TestSshClient {
repo_path: String,
channels: HashMap<ChannelId, TestSshChannel>,
allowed_auths: Vec<crate::Authorization>,
}
#[derive(Debug)]
struct TestSshChannel {
envs: HashMap<String, String>,
channel: Channel<server::Msg>,
}
#[async_trait::async_trait]
impl server::Handler for TestSshClient {
type Error = russh::Error;
async fn auth_password(
self,
user: &str,
pass: &str,
) -> Result<(Self, server::Auth), Self::Error> {
for auth in &self.allowed_auths {
if let crate::Authorization::Basic { username, password } = auth {
if username.as_deref() == Some(user) && password.as_deref() == Some(pass) {
return Ok((self, server::Auth::Accept));
}
}
}
Ok((
self,
server::Auth::Reject {
proceed_with_methods: Some(MethodSet::PUBLICKEY),
},
))
}
async fn env_request(
mut self,
channel: ChannelId,
name: &str,
value: &str,
session: server::Session,
) -> Result<(Self, server::Session), Self::Error> {
match name {
name if name.starts_with("LC_") || name == "GIT_PROTOCOL" || name == "LANG" => {
self.channels
.get_mut(&channel)
.expect("env_request on unknown channel")
.envs
.insert(name.to_owned(), value.to_owned());
}
disallowed => {
eprintln!(
"client attempted to set disallowed environment variable {:?} to {:?}",
disallowed, value
)
}
}
Ok((self, session))
}
async fn pty_request(
self,
_channel: ChannelId,
_term: &str,
_col_width: u32,
_row_height: u32,
_pix_width: u32,
_pix_height: u32,
_modes: &[(Pty, u32)],
_session: server::Session,
) -> Result<(Self, server::Session), Self::Error> {
panic!("client requested a pty but we don't support that");
}
async fn shell_request(
self,
_channel: ChannelId,
_session: server::Session,
) -> Result<(Self, server::Session), Self::Error> {
panic!("client requested a shell but we don't support that");
}
async fn exec_request(
mut self,
channel_id: ChannelId,
command: &[u8],
session: server::Session,
) -> Result<(Self, server::Session), Self::Error> {
let req = String::from_utf8_lossy(command);
if req.starts_with("git-upload-pack") {
let channel = Box::leak(Box::new(self.channels.remove(&channel_id).unwrap()));
let repo_path = self.repo_path.clone();
let handle = session.handle();
tokio::spawn(async move {
let channel_id = channel.channel.id();
let mut writer = channel.channel.make_writer_ext(None);
let mut reader = channel.channel.make_reader_ext(None);
let mut cmd = tokio::process::Command::new("git-upload-pack")
.kill_on_drop(true)
.envs(channel.envs.iter())
.arg(&repo_path)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
.unwrap();
let mut stdin = cmd.stdin.take().unwrap();
let mut stdout = cmd.stdout.take().unwrap();
let copy_in = tokio::spawn(async move {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
//let file = tokio::fs::File::create("/tmp/gitbutler-upload-pack-in.log")
// .await
// .unwrap();
//let mut file_writer = tokio::io::BufWriter::new(file);
let mut buffer = [0; 1024];
while let Ok(n) = reader.read(&mut buffer).await {
if n == 0 {
break;
}
stdin.write_all(&buffer[..n]).await.unwrap();
//file_writer.write_all(&buffer[..n]).await.unwrap();
stdin.flush().await.unwrap();
//file_writer.flush().await.unwrap();
}
stdin.shutdown().await.ok(); // may have already been closed
//file_writer.shutdown().await.unwrap();
});
let copy_out = tokio::spawn(async move {
use tokio::io::{AsyncReadExt, AsyncWriteExt};
//let file = tokio::fs::File::create("/tmp/gitbutler-upload-pack-out.log")
// .await
// .unwrap();
//let mut file_writer = tokio::io::BufWriter::new(file);
let mut buffer = [0; 1024];
while let Ok(n) = stdout.read(&mut buffer).await {
if n == 0 {
break;
}
writer.write_all(&buffer[..n]).await.unwrap();
//file_writer.write_all(&buffer[..n]).await.unwrap();
writer.flush().await.unwrap();
//file_writer.flush().await.unwrap();
}
writer.shutdown().await.ok(); // may have already been closed.
//file_writer.shutdown().await.unwrap();
});
let cmd_future = tokio::spawn(async move { cmd.wait().await.unwrap() });
let (status, _, _) = futures::try_join!(cmd_future, copy_in, copy_out).unwrap();
let exit_code = status.code().unwrap_or(1) as u32;
handle
.exit_status_request(channel_id, exit_code)
.await
.unwrap();
handle.close(channel_id).await.unwrap();
});
} else {
panic!("client requested a command we don't support: {:?}", req);
}
Ok((self, session))
}
async fn channel_open_session(
mut self,
channel: Channel<server::Msg>,
session: server::Session,
) -> Result<(Self, bool, server::Session), Self::Error> {
self.channels.insert(
channel.id(),
TestSshChannel {
channel,
envs: HashMap::new(),
},
);
Ok((self, true, session))
}
async fn channel_close(
mut self,
channel: ChannelId,
session: server::Session,
) -> Result<(Self, server::Session), Self::Error> {
// Best effort; may already be consumed.
self.channels.remove(&channel);
Ok((self, session))
}
}
#[allow(unused_macros)]
macro_rules! test_impl {
($create_repo:expr, enable_io, $(async fn $name:ident($repo:ident $(, $server:ident , $server_repo:ident)?) { $($body:tt)* })*) => {
$($crate::private::test_impl!($create_repo, $name, $repo $(, $server, $server_repo)?, { $($body)* });)*
};
($create_repo:expr, disable_io, $(async fn $name:ident($repo:ident $(, $server:ident , $server_repo:ident)?) { $($body:tt)* })*) => {};
($create_repo:expr, $name:ident, $repo:ident, { $($body:tt)* }) => {
#[test]
fn $name() {
::tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
#[allow(unused_variables)]
let $repo = $create_repo({
let mod_name = ::std::module_path!();
let test_name = ::std::stringify!($name);
format!("{mod_name}::{test_name}")
}).await;
let test_future = async { $($body)* };
use futures::FutureExt;
let timeout_future = ::tokio::time::sleep(::std::time::Duration::from_secs(10));
futures::select! {
_ = test_future.fuse() => {},
_ = timeout_future.fuse() => {
panic!("test timed out");
},
}
})
}
};
($create_repo:expr, $name:ident, $repo:ident, $server:ident, $server_repo:ident, { $($body:tt)* }) => {
#[test]
fn $name() {
::tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap()
.block_on(async {
#[allow(unused_variables)]
let $repo = $create_repo({
let mod_name = ::std::module_path!();
let test_name = ::std::stringify!($name);
format!("{mod_name}::{test_name}")
}).await;
#[allow(unused_variables, unused_mut)]
let (mut $server, $server_repo) = async {
let mod_name = ::std::module_path!();
let test_name = ::std::stringify!($name);
let repo_path = ::std::env::temp_dir()
.join("gitbutler-tests")
.join("git")
.join("remote")
.join(test_name)
.to_string_lossy()
.into_owned();
::std::fs::create_dir_all(&repo_path).unwrap();
let repo = $crate::backend::git2::Repository::<
$crate::backend::git2::tokio::TokioThreadedResource
>::open_or_init_bare(repo_path.clone()).await.unwrap();
let server = $crate::private::TestSshServer::new(repo_path);
(server, repo)
}.await;
let test_future = async { $($body)* };
use futures::FutureExt;
let timeout_future = ::tokio::time::sleep(::std::time::Duration::from_secs(10));
futures::select! {
_ = test_future.fuse() => {},
_ = timeout_future.fuse() => {
panic!("test timed out");
},
}
})
}
};
}
pub(crate) use test_impl;

View File

@ -1,49 +1,20 @@
//! GitButler core library for interacting with Git.
//! GitButler utility library for pushing/fetching Git repositories
//! using the Git CLI.
//!
//! This library houses a number of Git implementations,
//! over which we abstract a common interface and provide
//! higher-level operations that are implementation-agnostic.
//!
//! # Libgit2 Support
//! This library supports libgit2 via the `git2` feature.
//! Not much in the way of assumptions are made about the environment;
//! it's a fairly clean and safe Git backend.
//!
//! # Fork/Exec (CLI) Support
//! This library supports the Git CLI via the `cli` feature.
//! Note that this is a fairly experimental implementation that
//! uses some (ideally portable) hacks for authentication,
//! including a custom executable (or two, in the case of
//! *nix systems) for handling automatic authentication
//! via the API.
//!
//! This means those executables must be situated next to
//! the executable that is running them (as sibling files),
//! for security purposes. They may not be symlinked.
//!
//! This hampers certain use cases, such as implementing
//! [`cli::GitExecutor`] for e.g. remote connections.
//! **Important Note:** This is an interim library. Please do not rely on it;
//! it's only used as a temporary measure in the GitButler app until we implement
//! a longer-term solution for managing Git operations.
#![deny(missing_docs, unsafe_code)]
#![allow(async_fn_in_trait)]
#![cfg_attr(test, feature(async_closure))]
#![feature(impl_trait_in_assoc_type)]
#[cfg(test)]
mod integration_tests;
#[cfg(test)]
#[allow(unused_imports)]
pub(crate) use integration_tests::*;
mod backend;
mod cli;
mod error;
mod refspec;
mod repository;
#[cfg(feature = "cli")]
pub use backend::cli;
#[cfg(feature = "git2")]
pub use backend::git2;
pub use self::{
cli::*,
error::Error,
refspec::{Error as RefSpecError, RefSpec},
repository::{Authorization, ConfigScope, Error, Repository},
};

View File

@ -1,9 +0,0 @@
pub enum Authorization {
Basic {
pub username: String,
pub password: String,
},
PublicKey {
pub path: PathBuf
}
}

View File

@ -1,173 +0,0 @@
use crate::RefSpec;
/// A backend-agnostic operation error.
#[derive(Debug, thiserror::Error)]
pub enum Error<BE: std::error::Error + core::fmt::Debug + Send + Sync + 'static> {
/// An otherwise backend-specific error that occurred and was not
/// directly related to the inputs or repository state related to
/// the operation, and instead occurred as a result of the backend
/// executing the operation itself.
#[error("backend error: {0}")]
Backend(#[from] BE),
/// The given refspec was not found.
/// Usually returned by a push or fetch operation.
#[error("a ref-spec was not found: {0}")]
RefNotFound(String),
/// An authorized operation was attempted, but the authorization
/// credentials were rejected by the remote (or further credentials
/// were required).
///
/// The inner error is the backend-specific error that may provide
/// more context.
#[error("authorization failed: {0}")]
AuthorizationFailed(BE),
/// An operation interacting with a remote by name failed to find
/// the remote.
#[error("no such remote: {0}")]
NoSuchRemote(String, #[source] BE),
/// An operation that expected a remote not to exist found that
/// the remote already existed.
#[error("remote already exists: {0}")]
RemoteExists(String, #[source] BE),
}
/// The scope from/to which a configuration value is read/written.
#[derive(Default, Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[cfg_attr(
all(not(test), feature = "serde"),
derive(serde::Serialize, serde::Deserialize)
)]
pub enum ConfigScope {
// NOTE(qix-): We disable all but `Local` when testing.
// NOTE(qix-): This is not a standard practice, and you shouldn't
// NOTE(qix-): do this in almost any other case. However, we do
// NOTE(qix-): this here because most backends for Git do not have
// NOTE(qix-): a way to override global/system/etc config locations,
// NOTE(qix-): and we don't want to accidentally modify the user's
// NOTE(qix-): global config when running tests or have them influence
// NOTE(qix-): the tests in any way. Thus, we force test writers to use
// NOTE(qix-): `Local` scope when testing. This is not ideal, but it's
// NOTE(qix-): the best we can do for now. Sorry for the mess.
/// Pull from the most appropriate scope.
/// This is the default, and will fall back to a higher
/// scope if the value is not initially found.
#[cfg(not(test))]
#[cfg_attr(not(test), default)]
Auto = 0,
/// Pull from the local scope (`.git/config`) _only_.
#[cfg_attr(test, default)]
Local = 1,
/// Pull from the system-wide scope (`${prefix}/etc/gitconfig`) _only_.
#[cfg(not(test))]
System = 2,
/// Pull from the global (user) scope (typically `~/.gitconfig`) _only_.
#[cfg(not(test))]
Global = 3,
}
/// A handle to an open Git repository.
pub trait Repository {
/// The type of error returned by this repository.
type Error: std::error::Error + core::fmt::Debug + Send + Sync + 'static;
/// Reads a configuration value.
///
/// Errors if the value is not valid UTF-8.
async fn config_get(
&self,
key: &str,
scope: ConfigScope,
) -> Result<Option<String>, Error<Self::Error>>;
/// Writes a configuration value.
///
/// Errors if the new value is not valid UTF-8.
async fn config_set(
&self,
key: &str,
value: &str,
scope: ConfigScope,
) -> Result<(), Error<Self::Error>>;
/// Fetchs the given refspec from the given remote.
///
/// This is an authorized operation; the given authorization
/// credentials will be used to authenticate with the remote.
async fn fetch(
&self,
remote: &str,
refspec: RefSpec,
authorization: &Authorization,
) -> Result<(), Error<Self::Error>>;
/// Sets the URI for a remote.
/// If the remote does not exist, it will be created.
/// If the remote already exists, [`Error::RemoteExists`] will be returned.
async fn create_remote(&self, remote: &str, uri: &str) -> Result<(), Error<Self::Error>>;
/// Creates a remote with the given URI, or updates the URI
/// if the remote already exists.
async fn create_or_update_remote(
&self,
remote: &str,
uri: &str,
) -> Result<(), Error<Self::Error>>;
/// Gets the URI for a remote.
async fn remote(&self, remote: &str) -> Result<String, Error<Self::Error>>;
/// Gets the current HEAD ref of the repository.
///
/// Errors if the repository is empty.
async fn head(&self) -> Result<String, Error<Self::Error>>;
/// Gets the symbolic HEAD of the repository.
///
/// Returns `"HEAD"` if the current HEAD
/// is not a symbolic ref (e.g. a detached head state
/// or a direct reference to a commit).
///
/// Errors if the repository is empty.
async fn symbolic_head(&self) -> Result<String, Error<Self::Error>>;
}
/// Provides authentication credentials when performing
/// an operation that interacts with a remote.
#[derive(Default, Debug, Clone, PartialEq, Eq, Hash)]
pub enum Authorization {
/// Performs no attempt to authorize; uses the system's
/// default authorization mechanism, if any.
#[default]
Auto,
/// Performs HTTP(S) Basic authentication with a username and password.
///
/// In the case of an SSH remote, the username is ignored. The username is
/// only used for HTTP(S) remotes, and in such cases, if username is `None`
/// and the remote requests for it, the operation will fail.
///
/// In order for HTTP(S) remotes to work with a `None` username or password,
/// the remote URI must include the basic auth credentials in the URI itself
/// (e.g. `https://[user]:[pass]@host/path`). Otherwise, the operation will
/// fail.
///
/// Note that certain remotes may use this mechanism for passing tokens as
/// well; consult the respective remote's documentation for what information
/// to supply.
Basic {
/// The username to use for authentication.
username: Option<String>,
/// The password to use for authentication.
password: Option<String>,
},
/// Specifies a set of credentials for logging in with SSH.
Ssh {
/// The path to the SSH private key to use for authentication.
/// If `None`, the default SSH key will be used (i.e. `-i` will not
/// be passed to `ssh`).
private_key: Option<String>,
/// The passphrase to use for the SSH private key.
/// If `None`, the key is assumed to be unencrypted.
/// A prompt for a passphrase will result in an error.
passphrase: Option<String>,
},
}