Merge branch 'master' into notgne2/document-hostname-dot

This commit is contained in:
notgne2 2021-01-24 18:53:42 -07:00
commit b35fccfd67
No known key found for this signature in database
GPG Key ID: BB661E172B42A7F8
11 changed files with 862 additions and 461 deletions

125
Cargo.lock generated
View File

@ -65,6 +65,19 @@ version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73"
dependencies = [
"libc",
"num-integer",
"num-traits",
"time",
"winapi 0.3.9",
]
[[package]]
name = "clap"
version = "3.0.0-beta.2"
@ -123,16 +136,17 @@ name = "deploy-rs"
version = "0.1.0"
dependencies = [
"clap",
"flexi_logger",
"fork",
"futures-util",
"log",
"merge",
"notify",
"pretty_env_logger",
"rnix",
"serde",
"serde_derive",
"serde_json",
"signal-hook",
"smol_str",
"thiserror",
"tokio",
@ -141,19 +155,6 @@ dependencies = [
"yn",
]
[[package]]
name = "env_logger"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36"
dependencies = [
"atty",
"humantime",
"log",
"regex",
"termcolor",
]
[[package]]
name = "filetime"
version = "0.2.13"
@ -166,6 +167,22 @@ dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "flexi_logger"
version = "0.16.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c37586928c27a25ff5fce49ff3f8e071b3beeef48b4f004fe7d40d75a26e3db5"
dependencies = [
"atty",
"chrono",
"glob",
"lazy_static",
"log",
"regex",
"thiserror",
"yansi",
]
[[package]]
name = "fork"
version = "0.1.18"
@ -253,6 +270,12 @@ dependencies = [
"slab",
]
[[package]]
name = "glob"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574"
[[package]]
name = "hashbrown"
version = "0.9.1"
@ -277,15 +300,6 @@ dependencies = [
"libc",
]
[[package]]
name = "humantime"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df004cfca50ef23c36850aaaa59ad52cc70d0e90243c3c7737a4dd32dc7a3c4f"
dependencies = [
"quick-error",
]
[[package]]
name = "indexmap"
version = "1.6.0"
@ -526,6 +540,16 @@ dependencies = [
"winapi 0.3.9",
]
[[package]]
name = "num-integer"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db"
dependencies = [
"autocfg",
"num-traits",
]
[[package]]
name = "num-traits"
version = "0.2.14"
@ -614,16 +638,6 @@ version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "pretty_env_logger"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "926d36b9553851b8b0005f1275891b392ee4d2d833852c417ed025477350fb9d"
dependencies = [
"env_logger",
"log",
]
[[package]]
name = "proc-macro-error"
version = "1.0.4"
@ -669,12 +683,6 @@ dependencies = [
"unicode-xid",
]
[[package]]
name = "quick-error"
version = "1.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0"
[[package]]
name = "quote"
version = "1.0.7"
@ -786,10 +794,20 @@ dependencies = [
]
[[package]]
name = "signal-hook-registry"
version = "1.2.2"
name = "signal-hook"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce32ea0c6c56d5eacaeb814fbed9960547021d3edd010ded1425f180536b20ab"
checksum = "6b3799fa361789a685db59e3986fb5f6f949e478728b9913c6759f7b014d0372"
dependencies = [
"libc",
"signal-hook-registry",
]
[[package]]
name = "signal-hook-registry"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16f1d0fef1604ba8f7a073c7e701f213e056707210e9020af4528e0101ce11a6"
dependencies = [
"libc",
]
@ -900,6 +918,17 @@ dependencies = [
"lazy_static",
]
[[package]]
name = "time"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
dependencies = [
"libc",
"wasi",
"winapi 0.3.9",
]
[[package]]
name = "tokio"
version = "0.3.5"
@ -983,6 +1012,12 @@ dependencies = [
"winapi-util",
]
[[package]]
name = "wasi"
version = "0.10.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
[[package]]
name = "whoami"
version = "0.9.0"
@ -1042,6 +1077,12 @@ dependencies = [
"winapi-build",
]
[[package]]
name = "yansi"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71"
[[package]]
name = "yn"
version = "0.1.1"

View File

@ -19,7 +19,7 @@ serde = "1.0.104"
merge = "0.1.0"
whoami = "0.9.0"
log = "0.4"
pretty_env_logger = "0.4"
flexi_logger = "0.16"
notify = "5.0.0-pre.3"
futures-util = "0.3.6"
fork = "0.1"
@ -27,6 +27,7 @@ thiserror = "1.0"
toml = "0.5"
yn = "0.1"
rnix = "0.8"
signal-hook = "0.3"
# smol_str is required by rnix, but 0.1.17 doesn't build on rustc
# 1.45.2 (shipped in nixos-20.09); it requires rustc 1.46.0. See
@ -34,10 +35,6 @@ rnix = "0.8"
smol_str = "=0.1.16"
[[bin]]
[lib]
name = "deploy"
path = "src/main.rs"
[[bin]]
name = "activate"
path = "src/activate.rs"
path = "src/lib.rs"

View File

@ -3,11 +3,11 @@
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1600853454,
"narHash": "sha256-EgsgbcJNZ9AQLVhjhfiegGjLbO+StBY9hfKsCwc8Hw8=",
"lastModified": 1606424373,
"narHash": "sha256-oq8d4//CJOrVj+EcOaSXvMebvuTkmBJuT5tzlfewUnQ=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "94cf59784c73ecec461eaa291918eff0bfb538ac",
"rev": "99f1c2157fba4bfe6211a321fd0ee43199025dbf",
"type": "github"
},
"original": {
@ -23,11 +23,11 @@
]
},
"locked": {
"lastModified": 1602173141,
"narHash": "sha256-m6wU6lP0wf2OMw3KtJqn27ITtg29+ftciGHicLiVSGE=",
"lastModified": 1610392286,
"narHash": "sha256-3wFl5y+4YZO4SgRYK8WE7JIS3p0sxbgrGaQ6RMw+d98=",
"owner": "nmattia",
"repo": "naersk",
"rev": "22b96210b2433228d42bce460f3befbdcfde7520",
"rev": "d7bfbad3304fd768c0f93a4c3b50976275e6d4be",
"type": "github"
},
"original": {
@ -39,11 +39,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1605716121,
"narHash": "sha256-CbHicvkzLTfEY+aSUeUY7dfFlDOgZH3uK+PpUfb/DPA=",
"lastModified": 1610942247,
"narHash": "sha256-PKo1ATAlC6BmfYSRmX0TVmNoFbrec+A5OKcabGEu2yU=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6625284c397b44bc9518a5a1567c1b5aae455c08",
"rev": "7d71001b796340b219d1bfa8552c81995017544a",
"type": "github"
},
"original": {
@ -63,11 +63,11 @@
},
"utils": {
"locked": {
"lastModified": 1601282935,
"narHash": "sha256-WQAFV6sGGQxrRs3a+/Yj9xUYvhTpukQJIcMbIi7LCJ4=",
"lastModified": 1610051610,
"narHash": "sha256-U9rPz/usA1/Aohhk7Cmc2gBrEEKRzcW4nwPWMPwja4Y=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "588973065fce51f4763287f0fda87a174d78bf48",
"rev": "3982c9903e93927c2164caa727cd3f6a0e6d14cc",
"type": "github"
},
"original": {

View File

@ -63,6 +63,8 @@
name = base.name + "-activate-path";
text = ''
#!${pkgs.runtimeShell}
set -euo pipefail
${activate}
'';
executable = true;
@ -80,7 +82,14 @@
];
};
nixos = base: custom base.config.system.build.toplevel "$PROFILE/bin/switch-to-configuration switch";
nixos = base: custom base.config.system.build.toplevel ''
$PROFILE/bin/switch-to-configuration switch
# https://github.com/serokell/deploy-rs/issues/31
${with base.config.boot.loader;
pkgs.lib.optionalString systemd-boot.enable
"sed -i '/^default /d' ${efi.efiSysMountPoint}/loader/loader.conf"}
'';
noop = base: custom base ":";
};

View File

@ -3,6 +3,8 @@
//
// SPDX-License-Identifier: MPL-2.0
use signal_hook::{consts::signal::SIGHUP, iterator::Signals};
use clap::Clap;
use tokio::fs;
@ -18,27 +20,44 @@ use notify::{RecommendedWatcher, RecursiveMode, Watcher};
use thiserror::Error;
extern crate pretty_env_logger;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
mod utils;
/// Activation portion of the simple Rust Nix deploy tool
/// Remote activation utility for deploy-rs
#[derive(Clap, Debug)]
#[clap(version = "1.0", author = "Serokell <https://serokell.io/>")]
struct Opts {
profile_path: String,
closure: String,
/// Print debug logs to output
#[clap(short, long)]
debug_logs: bool,
/// Directory to print logs to
#[clap(long)]
log_dir: Option<String>,
/// Temp path for any temporary files that may be needed during activation
/// Path for any temporary files that may be needed during activation
#[clap(long)]
temp_path: String,
#[clap(subcommand)]
subcmd: SubCommand,
}
#[derive(Clap, Debug)]
enum SubCommand {
Activate(ActivateOpts),
Wait(WaitOpts),
}
/// Activate a profile
#[derive(Clap, Debug)]
struct ActivateOpts {
/// The closure to activate
closure: String,
/// The profile path to install into
profile_path: String,
/// Maximum time to wait for confirmation after activation
#[clap(long)]
confirm_timeout: u16,
@ -52,6 +71,13 @@ struct Opts {
auto_rollback: bool,
}
/// Activate a profile
#[derive(Clap, Debug)]
struct WaitOpts {
/// The closure to wait for
closure: String,
}
#[derive(Error, Debug)]
pub enum DeactivateError {
#[error("Failed to execute the rollback command: {0}")]
@ -195,8 +221,9 @@ pub async fn activation_confirmation(
confirm_timeout: u16,
closure: String,
) -> Result<(), ActivationConfirmationError> {
let lock_hash = &closure["/nix/store/".len()..];
let lock_path = format!("{}/deploy-rs-canary-{}", temp_path, lock_hash);
let lock_path = deploy::make_lock_path(&temp_path, &closure);
debug!("Ensuring parent directory exists for canary file");
if let Some(parent) = Path::new(&lock_path).parent() {
fs::create_dir_all(parent)
@ -204,53 +231,98 @@ pub async fn activation_confirmation(
.map_err(ActivationConfirmationError::CreateConfirmDirError)?;
}
debug!("Creating canary file");
fs::File::create(&lock_path)
.await
.map_err(ActivationConfirmationError::CreateConfirmDirError)?;
.map_err(ActivationConfirmationError::CreateConfirmFileError)?;
debug!("Creating notify watcher");
let (deleted, done) = mpsc::channel(1);
let mut watcher: RecommendedWatcher =
Watcher::new_immediate(move |res: Result<notify::event::Event, notify::Error>| {
let send_result = match res {
Ok(e) if e.kind == notify::EventKind::Remove(notify::event::RemoveKind::File) => {
deleted.blocking_send(Ok(()))
debug!("Got worthy removal event, sending on channel");
deleted.try_send(Ok(()))
}
Err(e) => {
debug!("Got error waiting for removal event, sending on channel");
deleted.try_send(Err(e))
}
Ok(_) => Ok(()), // ignore non-removal events
Err(e) => deleted.blocking_send(Err(e)),
};
if let Err(e) = send_result {
// We can't communicate our error, but panic-ing would
// be bad; let's write an error and trust that the
// activate function will realize we aren't sending
// data.
eprintln!("Could not send file system event to watcher: {}", e);
error!("Could not send file system event to watcher: {}", e);
}
})?;
watcher.watch(lock_path, RecursiveMode::Recursive)?;
if let fork::Fork::Child =
fork::daemon(false, false).map_err(ActivationConfirmationError::ForkError)?
{
std::thread::spawn(move || {
let rt = tokio::runtime::Runtime::new().unwrap();
watcher.watch(&lock_path, RecursiveMode::NonRecursive)?;
rt.block_on(async move {
if let Err(err) = danger_zone(done, confirm_timeout).await {
if let Err(err) = deactivate(&profile_path).await {
good_panic!("Error de-activating due to another error in confirmation thread, oh no...: {}", err);
}
if let Err(err) = danger_zone(done, confirm_timeout).await {
error!("Error waiting for confirmation event: {}", err);
good_panic!("Error in confirmation thread: {}", err);
}
});
})
.join()
.unwrap();
info!("Confirmation successful!");
if let Err(err) = deactivate(&profile_path).await {
error!(
"Error de-activating due to another error waiting for confirmation, oh no...: {}",
err
);
}
}
std::process::exit(0);
Ok(())
}
#[derive(Error, Debug)]
pub enum WaitError {
#[error("Error creating watcher for activation: {0}")]
Watcher(#[from] notify::Error),
#[error("Error waiting for activation: {0}")]
Waiting(#[from] DangerZoneError),
}
pub async fn wait(temp_path: String, closure: String) -> Result<(), WaitError> {
let lock_path = deploy::make_lock_path(&temp_path, &closure);
let (created, done) = mpsc::channel(1);
let mut watcher: RecommendedWatcher = {
// TODO: fix wasteful clone
let lock_path = lock_path.clone();
Watcher::new_immediate(move |res: Result<notify::event::Event, notify::Error>| {
let send_result = match res {
Ok(e) if e.kind == notify::EventKind::Create(notify::event::CreateKind::File) => {
match &e.paths[..] {
[x] if x == Path::new(&lock_path) => created.try_send(Ok(())),
_ => Ok(()),
}
}
Err(e) => created.try_send(Err(e)),
Ok(_) => Ok(()), // ignore non-removal events
};
if let Err(e) = send_result {
error!("Could not send file system event to watcher: {}", e);
}
})?
};
watcher.watch(&temp_path, RecursiveMode::NonRecursive)?;
// Avoid a potential race condition by checking for existence after watcher creation
if fs::metadata(&lock_path).await.is_ok() {
watcher.unwatch(&temp_path)?;
return Ok(());
}
danger_zone(done, 240).await?;
info!("Found canary file, done waiting!");
Ok(())
}
#[derive(Error, Debug)]
@ -301,6 +373,8 @@ pub async fn activate(
}
};
debug!("Running activation script");
let activate_status = match Command::new(format!("{}/deploy-rs-activate", profile_path))
.env("PROFILE", &profile_path)
.current_dir(&profile_path)
@ -331,6 +405,7 @@ pub async fn activate(
if magic_rollback {
info!("Magic rollback is enabled, setting up confirmation hook...");
match activation_confirmation(profile_path.clone(), temp_path, confirm_timeout, closure)
.await
{
@ -347,26 +422,48 @@ pub async fn activate(
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
if std::env::var("DEPLOY_LOG").is_err() {
std::env::set_var("DEPLOY_LOG", "info");
}
pretty_env_logger::init_custom_env("DEPLOY_LOG");
// Ensure that this process stays alive after the SSH connection dies
let mut signals = Signals::new(&[SIGHUP])?;
std::thread::spawn(move || {
for _ in signals.forever() {
println!("Received NOHUP - ignoring...");
}
});
let opts: Opts = Opts::parse();
match activate(
opts.profile_path,
opts.closure,
opts.auto_rollback,
opts.temp_path,
opts.confirm_timeout,
opts.magic_rollback,
)
.await
{
deploy::init_logger(
opts.debug_logs,
opts.log_dir.as_deref(),
match opts.subcmd {
SubCommand::Activate(_) => deploy::LoggerType::Activate,
SubCommand::Wait(_) => deploy::LoggerType::Wait,
},
)?;
let r = match opts.subcmd {
SubCommand::Activate(activate_opts) => activate(
activate_opts.profile_path,
activate_opts.closure,
activate_opts.auto_rollback,
opts.temp_path,
activate_opts.confirm_timeout,
activate_opts.magic_rollback,
)
.await
.map_err(|x| Box::new(x) as Box<dyn std::error::Error>),
SubCommand::Wait(wait_opts) => wait(opts.temp_path, wait_opts.closure)
.await
.map_err(|x| Box::new(x) as Box<dyn std::error::Error>),
};
match r {
Ok(()) => (),
Err(err) => good_panic!("{}", err),
Err(err) => {
error!("{}", err);
std::process::exit(1)
}
}
Ok(())

View File

@ -12,17 +12,12 @@ use tokio::process::Command;
use thiserror::Error;
extern crate pretty_env_logger;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
#[macro_use]
mod utils;
/// Simple Rust rewrite of a simple Nix Flake deployment tool
#[derive(Clap, Debug)]
#[clap(version = "1.0", author = "Serokell <https://serokell.io/>")]
@ -39,6 +34,13 @@ struct Opts {
/// Extra arguments to be passed to nix build
extra_build_args: Vec<String>,
/// Print debug logs to output
#[clap(short, long)]
debug_logs: bool,
/// Directory to print logs to (including the background activation process)
#[clap(long)]
log_dir: Option<String>,
/// Keep the build outputs of each built profile
#[clap(short, long)]
keep_result: bool,
@ -98,9 +100,9 @@ async fn test_flake_support() -> Result<bool, std::io::Error> {
#[derive(Error, Debug)]
enum CheckDeploymentError {
#[error("Failed to execute Nix checking command: {0}")]
NixCheckError(#[from] std::io::Error),
NixCheck(#[from] std::io::Error),
#[error("Nix checking command resulted in a bad exit code: {0:?}")]
NixCheckExitError(Option<i32>),
NixCheckExit(Option<i32>),
}
async fn check_deployment(
@ -136,7 +138,7 @@ async fn check_deployment(
match check_status.code() {
Some(0) => (),
a => return Err(CheckDeploymentError::NixCheckExitError(a)),
a => return Err(CheckDeploymentError::NixCheckExit(a)),
};
Ok(())
@ -145,15 +147,15 @@ async fn check_deployment(
#[derive(Error, Debug)]
enum GetDeploymentDataError {
#[error("Failed to execute nix eval command: {0}")]
NixEvalError(std::io::Error),
NixEval(std::io::Error),
#[error("Failed to read output from evaluation: {0}")]
NixEvalOutError(std::io::Error),
NixEvalOut(std::io::Error),
#[error("Evaluation resulted in a bad exit code: {0:?}")]
NixEvalExitError(Option<i32>),
NixEvalExit(Option<i32>),
#[error("Error converting evaluation output to utf8: {0}")]
DecodeUtf8Error(#[from] std::string::FromUtf8Error),
DecodeUtf8(#[from] std::string::FromUtf8Error),
#[error("Error decoding the JSON from evaluation: {0}")]
DecodeJsonError(#[from] serde_json::error::Error),
DecodeJson(#[from] serde_json::error::Error),
}
/// Evaluates the Nix in the given `repo` and return the processed Data from it
@ -161,7 +163,7 @@ async fn get_deployment_data(
supports_flakes: bool,
repo: &str,
extra_build_args: &[String],
) -> Result<utils::data::Data, GetDeploymentDataError> {
) -> Result<deploy::data::Data, GetDeploymentDataError> {
info!("Evaluating flake in {}", repo);
let mut c = match supports_flakes {
@ -193,16 +195,16 @@ async fn get_deployment_data(
let build_child = build_command
.stdout(Stdio::piped())
.spawn()
.map_err(GetDeploymentDataError::NixEvalError)?;
.map_err(GetDeploymentDataError::NixEval)?;
let build_output = build_child
.wait_with_output()
.await
.map_err(GetDeploymentDataError::NixEvalOutError)?;
.map_err(GetDeploymentDataError::NixEvalOut)?;
match build_output.status.code() {
Some(0) => (),
a => return Err(GetDeploymentDataError::NixEvalExitError(a)),
a => return Err(GetDeploymentDataError::NixEvalExit(a)),
};
let data_json = String::from_utf8(build_output.stdout)?;
@ -220,14 +222,14 @@ struct PromptPart<'a> {
}
fn print_deployment(
parts: &[(utils::DeployData, utils::DeployDefs)],
parts: &[(deploy::DeployData, deploy::DeployDefs)],
) -> Result<(), toml::ser::Error> {
let mut part_map: HashMap<String, HashMap<String, PromptPart>> = HashMap::new();
for (data, defs) in parts {
part_map
.entry(data.node_name.to_string())
.or_insert(HashMap::new())
.or_insert_with(HashMap::new)
.insert(
data.profile_name.to_string(),
PromptPart {
@ -242,7 +244,7 @@ fn print_deployment(
let toml = toml::to_string(&part_map)?;
warn!("The following profiles are going to be deployed:\n{}", toml);
info!("The following profiles are going to be deployed:\n{}", toml);
Ok(())
}
@ -259,7 +261,7 @@ enum PromptDeploymentError {
}
fn prompt_deployment(
parts: &[(utils::DeployData, utils::DeployDefs)],
parts: &[(deploy::DeployData, deploy::DeployDefs)],
) -> Result<(), PromptDeploymentError> {
print_deployment(parts)?;
@ -309,9 +311,9 @@ fn prompt_deployment(
#[derive(Error, Debug)]
enum RunDeployError {
#[error("Failed to deploy profile: {0}")]
DeployProfileError(#[from] utils::deploy::DeployProfileError),
DeployProfile(#[from] deploy::deploy::DeployProfileError),
#[error("Failed to push profile: {0}")]
PushProfileError(#[from] utils::push::PushProfileError),
PushProfile(#[from] deploy::push::PushProfileError),
#[error("No profile named `{0}` was found")]
ProfileNotFound(String),
#[error("No node named `{0}` was found")]
@ -319,45 +321,78 @@ enum RunDeployError {
#[error("Profile was provided without a node name")]
ProfileWithoutNode,
#[error("Error processing deployment definitions: {0}")]
DeployDataDefsError(#[from] utils::DeployDataDefsError),
DeployDataDefs(#[from] deploy::DeployDataDefsError),
#[error("Failed to make printable TOML of deployment: {0}")]
TomlFormat(#[from] toml::ser::Error),
#[error("{0}")]
PromptDeploymentError(#[from] PromptDeploymentError),
PromptDeployment(#[from] PromptDeploymentError),
}
type ToDeploy<'a> = Vec<(
(&'a str, &'a deploy::data::Node),
(&'a str, &'a deploy::data::Profile),
)>;
async fn run_deploy(
deploy_flake: utils::DeployFlake<'_>,
data: utils::data::Data,
deploy_flake: deploy::DeployFlake<'_>,
data: deploy::data::Data,
supports_flakes: bool,
check_sigs: bool,
interactive: bool,
cmd_overrides: utils::CmdOverrides,
cmd_overrides: deploy::CmdOverrides,
keep_result: bool,
result_path: Option<&str>,
extra_build_args: &[String],
debug_logs: bool,
log_dir: Option<String>,
) -> Result<(), RunDeployError> {
let to_deploy: Vec<((&str, &utils::data::Node), (&str, &utils::data::Profile))> =
match (&deploy_flake.node, &deploy_flake.profile) {
(Some(node_name), Some(profile_name)) => {
let node = match data.nodes.get(node_name) {
Some(x) => x,
None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
};
let to_deploy: ToDeploy = match (&deploy_flake.node, &deploy_flake.profile) {
(Some(node_name), Some(profile_name)) => {
let node = match data.nodes.get(node_name) {
Some(x) => x,
None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
};
let profile = match node.node_settings.profiles.get(profile_name) {
Some(x) => x,
None => return Err(RunDeployError::ProfileNotFound(profile_name.to_owned())),
};
vec![((node_name, node), (profile_name, profile))]
}
(Some(node_name), None) => {
let node = match data.nodes.get(node_name) {
Some(x) => x,
None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
};
let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new();
for profile_name in [
node.node_settings.profiles_order.iter().collect(),
node.node_settings.profiles.keys().collect::<Vec<&String>>(),
]
.concat()
{
let profile = match node.node_settings.profiles.get(profile_name) {
Some(x) => x,
None => return Err(RunDeployError::ProfileNotFound(profile_name.to_owned())),
};
vec![((node_name, node), (profile_name, profile))]
if !profiles_list.iter().any(|(n, _)| n == profile_name) {
profiles_list.push((&profile_name, profile));
}
}
(Some(node_name), None) => {
let node = match data.nodes.get(node_name) {
Some(x) => x,
None => return Err(RunDeployError::NodeNotFound(node_name.to_owned())),
};
let mut profiles_list: Vec<(&str, &utils::data::Profile)> = Vec::new();
profiles_list
.into_iter()
.map(|x| ((node_name.as_str(), node), x))
.collect()
}
(None, None) => {
let mut l = Vec::new();
for (node_name, node) in &data.nodes {
let mut profiles_list: Vec<(&str, &deploy::data::Profile)> = Vec::new();
for profile_name in [
node.node_settings.profiles_order.iter().collect(),
@ -377,61 +412,31 @@ async fn run_deploy(
}
}
profiles_list
let ll: ToDeploy = profiles_list
.into_iter()
.map(|x| ((node_name.as_str(), node), x))
.collect()
.collect();
l.extend(ll);
}
(None, None) => {
let mut l = Vec::new();
for (node_name, node) in &data.nodes {
let mut profiles_list: Vec<(&str, &utils::data::Profile)> = Vec::new();
l
}
(None, Some(_)) => return Err(RunDeployError::ProfileWithoutNode),
};
for profile_name in [
node.node_settings.profiles_order.iter().collect(),
node.node_settings.profiles.keys().collect::<Vec<&String>>(),
]
.concat()
{
let profile = match node.node_settings.profiles.get(profile_name) {
Some(x) => x,
None => {
return Err(RunDeployError::ProfileNotFound(
profile_name.to_owned(),
))
}
};
if !profiles_list.iter().any(|(n, _)| n == profile_name) {
profiles_list.push((&profile_name, profile));
}
}
let ll: Vec<((&str, &utils::data::Node), (&str, &utils::data::Profile))> =
profiles_list
.into_iter()
.map(|x| ((node_name.as_str(), node), x))
.collect();
l.extend(ll);
}
l
}
(None, Some(_)) => return Err(RunDeployError::ProfileWithoutNode),
};
let mut parts: Vec<(utils::DeployData, utils::DeployDefs)> = Vec::new();
let mut parts: Vec<(deploy::DeployData, deploy::DeployDefs)> = Vec::new();
for ((node_name, node), (profile_name, profile)) in to_deploy {
let deploy_data = utils::make_deploy_data(
let deploy_data = deploy::make_deploy_data(
&data.generic_settings,
node,
node_name,
profile,
profile_name,
&cmd_overrides,
debug_logs,
log_dir.as_deref(),
);
let deploy_defs = deploy_data.defs()?;
@ -446,21 +451,21 @@ async fn run_deploy(
}
for (deploy_data, deploy_defs) in &parts {
utils::push::push_profile(
deploy::push::push_profile(deploy::push::PushProfileData {
supports_flakes,
check_sigs,
deploy_flake.repo,
&deploy_data,
&deploy_defs,
repo: deploy_flake.repo,
deploy_data: &deploy_data,
deploy_defs: &deploy_defs,
keep_result,
result_path,
extra_build_args,
)
})
.await?;
}
for (deploy_data, deploy_defs) in &parts {
utils::deploy::deploy_profile(&deploy_data, &deploy_defs).await?;
deploy::deploy::deploy_profile(&deploy_data, &deploy_defs).await?;
}
Ok(())
@ -469,33 +474,35 @@ async fn run_deploy(
#[derive(Error, Debug)]
enum RunError {
#[error("Failed to deploy profile: {0}")]
DeployProfileError(#[from] utils::deploy::DeployProfileError),
DeployProfile(#[from] deploy::deploy::DeployProfileError),
#[error("Failed to push profile: {0}")]
PushProfileError(#[from] utils::push::PushProfileError),
PushProfile(#[from] deploy::push::PushProfileError),
#[error("Failed to test for flake support: {0}")]
FlakeTestError(std::io::Error),
FlakeTest(std::io::Error),
#[error("Failed to check deployment: {0}")]
CheckDeploymentError(#[from] CheckDeploymentError),
CheckDeployment(#[from] CheckDeploymentError),
#[error("Failed to evaluate deployment data: {0}")]
GetDeploymentDataError(#[from] GetDeploymentDataError),
GetDeploymentData(#[from] GetDeploymentDataError),
#[error("Error parsing flake: {0}")]
ParseFlakeError(#[from] utils::ParseFlakeError),
ParseFlake(#[from] deploy::ParseFlakeError),
#[error("Error initiating logger: {0}")]
Logger(#[from] flexi_logger::FlexiLoggerError),
#[error("{0}")]
RunDeployError(#[from] RunDeployError),
RunDeploy(#[from] RunDeployError),
}
async fn run() -> Result<(), RunError> {
if std::env::var("DEPLOY_LOG").is_err() {
std::env::set_var("DEPLOY_LOG", "info");
}
pretty_env_logger::init_custom_env("DEPLOY_LOG");
let opts: Opts = Opts::parse();
let deploy_flake = utils::parse_flake(opts.flake.as_str())?;
deploy::init_logger(
opts.debug_logs,
opts.log_dir.as_deref(),
deploy::LoggerType::Deploy,
)?;
let cmd_overrides = utils::CmdOverrides {
let deploy_flake = deploy::parse_flake(opts.flake.as_str())?;
let cmd_overrides = deploy::CmdOverrides {
ssh_user: opts.ssh_user,
profile_user: opts.profile_user,
ssh_opts: opts.ssh_opts,
@ -507,9 +514,7 @@ async fn run() -> Result<(), RunError> {
confirm_timeout: opts.confirm_timeout,
};
let supports_flakes = test_flake_support()
.await
.map_err(RunError::FlakeTestError)?;
let supports_flakes = test_flake_support().await.map_err(RunError::FlakeTest)?;
if !supports_flakes {
warn!("A Nix version without flakes support was detected, support for this is work in progress");
@ -534,6 +539,8 @@ async fn run() -> Result<(), RunError> {
opts.keep_result,
result_path,
&opts.extra_build_args,
opts.debug_logs,
opts.log_dir,
)
.await?;
@ -544,7 +551,10 @@ async fn run() -> Result<(), RunError> {
async fn main() -> Result<(), Box<dyn std::error::Error>> {
match run().await {
Ok(()) => (),
Err(err) => good_panic!("{}", err),
Err(err) => {
error!("{}", err);
std::process::exit(1);
}
}
Ok(())

297
src/deploy.rs Normal file
View File

@ -0,0 +1,297 @@
// SPDX-FileCopyrightText: 2020 Serokell <https://serokell.io/>
// SPDX-FileCopyrightText: 2020 Andreas Fuchs <asf@boinkor.net>
//
// SPDX-License-Identifier: MPL-2.0
use std::borrow::Cow;
use tokio::process::Command;
use thiserror::Error;
struct ActivateCommandData<'a> {
sudo: &'a Option<String>,
profile_path: &'a str,
closure: &'a str,
auto_rollback: bool,
temp_path: &'a str,
confirm_timeout: u16,
magic_rollback: bool,
debug_logs: bool,
log_dir: Option<&'a str>,
}
fn build_activate_command(data: ActivateCommandData) -> String {
let mut self_activate_command = format!("{}/activate-rs", data.closure);
if data.debug_logs {
self_activate_command = format!("{} --debug-logs", self_activate_command);
}
if let Some(log_dir) = data.log_dir {
self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir);
}
self_activate_command = format!(
"{} --temp-path '{}' activate '{}' '{}'",
self_activate_command, data.temp_path, data.closure, data.profile_path
);
self_activate_command = format!(
"{} --confirm-timeout {}",
self_activate_command, data.confirm_timeout
);
if data.magic_rollback {
self_activate_command = format!("{} --magic-rollback", self_activate_command);
}
if data.auto_rollback {
self_activate_command = format!("{} --auto-rollback", self_activate_command);
}
if let Some(sudo_cmd) = &data.sudo {
self_activate_command = format!("{} {}", sudo_cmd, self_activate_command);
}
self_activate_command
}
#[test]
fn test_activation_command_builder() {
let sudo = Some("sudo -u test".to_string());
let profile_path = "/blah/profiles/test";
let closure = "/nix/store/blah/etc";
let auto_rollback = true;
let temp_path = "/tmp";
let confirm_timeout = 30;
let magic_rollback = true;
let debug_logs = true;
let log_dir = Some("/tmp/something.txt");
assert_eq!(
build_activate_command(ActivateCommandData {
sudo: &sudo,
profile_path,
closure,
auto_rollback,
temp_path,
confirm_timeout,
magic_rollback,
debug_logs,
log_dir
}),
"sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt --temp-path '/tmp' activate '/nix/store/blah/etc' '/blah/profiles/test' --confirm-timeout 30 --magic-rollback --auto-rollback"
.to_string(),
);
}
struct WaitCommandData<'a> {
sudo: &'a Option<String>,
closure: &'a str,
temp_path: &'a str,
debug_logs: bool,
log_dir: Option<&'a str>,
}
fn build_wait_command(data: WaitCommandData) -> String {
let mut self_activate_command = format!("{}/activate-rs", data.closure);
if data.debug_logs {
self_activate_command = format!("{} --debug-logs", self_activate_command);
}
if let Some(log_dir) = data.log_dir {
self_activate_command = format!("{} --log-dir {}", self_activate_command, log_dir);
}
self_activate_command = format!(
"{} --temp-path '{}' wait '{}'",
self_activate_command, data.temp_path, data.closure
);
if let Some(sudo_cmd) = &data.sudo {
self_activate_command = format!("{} {}", sudo_cmd, self_activate_command);
}
self_activate_command
}
#[test]
fn test_wait_command_builder() {
let sudo = Some("sudo -u test".to_string());
let closure = "/nix/store/blah/etc";
let temp_path = "/tmp";
let debug_logs = true;
let log_dir = Some("/tmp/something.txt");
assert_eq!(
build_wait_command(WaitCommandData {
sudo: &sudo,
closure,
temp_path,
debug_logs,
log_dir
}),
"sudo -u test /nix/store/blah/etc/activate-rs --debug-logs --log-dir /tmp/something.txt --temp-path '/tmp' wait '/nix/store/blah/etc'"
.to_string(),
);
}
#[derive(Error, Debug)]
pub enum DeployProfileError {
#[error("Failed to spawn activation command over SSH: {0}")]
SSHSpawnActivateError(std::io::Error),
#[error("Failed to run activation command over SSH: {0}")]
SSHActivateError(std::io::Error),
#[error("Activating over SSH resulted in a bad exit code: {0:?}")]
SSHActivateExitError(Option<i32>),
#[error("Failed to run wait command over SSH: {0}")]
SSHWaitError(std::io::Error),
#[error("Waiting over SSH resulted in a bad exit code: {0:?}")]
SSHWaitExitError(Option<i32>),
#[error("Failed to run confirmation command over SSH (the server should roll back): {0}")]
SSHConfirmError(std::io::Error),
#[error(
"Confirming activation over SSH resulted in a bad exit code (the server should roll back): {0:?}"
)]
SSHConfirmExitError(Option<i32>),
}
pub async fn deploy_profile(
deploy_data: &super::DeployData<'_>,
deploy_defs: &super::DeployDefs,
) -> Result<(), DeployProfileError> {
info!(
"Activating profile `{}` for node `{}`",
deploy_data.profile_name, deploy_data.node_name
);
let temp_path: Cow<str> = match &deploy_data.merged_settings.temp_path {
Some(x) => x.into(),
None => "/tmp".into(),
};
let confirm_timeout = deploy_data.merged_settings.confirm_timeout.unwrap_or(30);
let magic_rollback = deploy_data.merged_settings.magic_rollback.unwrap_or(true);
let auto_rollback = deploy_data.merged_settings.auto_rollback.unwrap_or(true);
let self_activate_command = build_activate_command(ActivateCommandData {
sudo: &deploy_defs.sudo,
profile_path: &deploy_defs.profile_path,
closure: &deploy_data.profile.profile_settings.path,
auto_rollback,
temp_path: &temp_path,
confirm_timeout,
magic_rollback,
debug_logs: deploy_data.debug_logs,
log_dir: deploy_data.log_dir,
});
debug!("Constructed activation command: {}", self_activate_command);
let hostname = match deploy_data.cmd_overrides.hostname {
Some(ref x) => x,
None => &deploy_data.node.node_settings.hostname,
};
let ssh_addr = format!("ssh://{}@{}", deploy_defs.ssh_user, hostname);
let mut ssh_activate_command_ = Command::new("ssh");
let ssh_activate_command = ssh_activate_command_.arg(&ssh_addr);
for ssh_opt in &deploy_data.merged_settings.ssh_opts {
ssh_activate_command.arg(&ssh_opt);
}
if !magic_rollback {
let ssh_activate_exit_status = ssh_activate_command
.arg(self_activate_command)
.status()
.await
.map_err(DeployProfileError::SSHActivateError)?;
match ssh_activate_exit_status.code() {
Some(0) => (),
a => return Err(DeployProfileError::SSHActivateExitError(a)),
};
info!("Success activating, done!");
} else {
let self_wait_command = build_wait_command(WaitCommandData {
sudo: &deploy_defs.sudo,
closure: &deploy_data.profile.profile_settings.path,
temp_path: &temp_path,
debug_logs: deploy_data.debug_logs,
log_dir: deploy_data.log_dir,
});
debug!("Constructed wait command: {}", self_wait_command);
let ssh_activate = ssh_activate_command
.arg(self_activate_command)
.spawn()
.map_err(DeployProfileError::SSHSpawnActivateError)?;
info!("Creating activation waiter");
let mut ssh_wait_command_ = Command::new("ssh");
let ssh_wait_command = ssh_wait_command_.arg(&ssh_addr);
for ssh_opt in &deploy_data.merged_settings.ssh_opts {
ssh_wait_command.arg(ssh_opt);
}
let ssh_wait_exit_status = ssh_wait_command
.arg(self_wait_command)
.status()
.await
.map_err(DeployProfileError::SSHWaitError)?;
match ssh_wait_exit_status.code() {
Some(0) => (),
a => return Err(DeployProfileError::SSHWaitExitError(a)),
};
info!("Success activating, attempting to confirm activation");
let mut c = Command::new("ssh");
let mut ssh_confirm_command = c.arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname));
for ssh_opt in &deploy_data.merged_settings.ssh_opts {
ssh_confirm_command = ssh_confirm_command.arg(ssh_opt);
}
let lock_path =
super::make_lock_path(&temp_path, &deploy_data.profile.profile_settings.path);
let mut confirm_command = format!("rm {}", lock_path);
if let Some(sudo_cmd) = &deploy_defs.sudo {
confirm_command = format!("{} {}", sudo_cmd, confirm_command);
}
debug!(
"Attempting to run command to confirm deployment: {}",
confirm_command
);
let ssh_exit_status = ssh_confirm_command
.arg(confirm_command)
.status()
.await
.map_err(DeployProfileError::SSHConfirmError)?;
match ssh_exit_status.code() {
Some(0) => (),
a => return Err(DeployProfileError::SSHConfirmExitError(a)),
};
info!("Deployment confirmed.");
}
Ok(())
}

View File

@ -3,20 +3,132 @@
//
// SPDX-License-Identifier: MPL-2.0
use rnix::{types::*, NodeOrToken, SyntaxKind::*, SyntaxNode};
use std::path::PathBuf;
use rnix::{types::*, SyntaxKind::*};
use merge::Merge;
use thiserror::Error;
#[macro_export]
macro_rules! good_panic {
($($tts:tt)*) => {{
error!($($tts)*);
std::process::exit(1);
}}
use flexi_logger::*;
#[macro_use]
extern crate log;
#[macro_use]
extern crate serde_derive;
pub fn make_lock_path(temp_path: &str, closure: &str) -> String {
let lock_hash =
&closure["/nix/store/".len()..closure.find('-').unwrap_or_else(|| closure.len())];
format!("{}/deploy-rs-canary-{}", temp_path, lock_hash)
}
fn make_emoji(level: log::Level) -> &'static str {
match level {
log::Level::Error => "",
log::Level::Warn => "⚠️",
log::Level::Info => "",
log::Level::Debug => "",
log::Level::Trace => "🖊️",
}
}
pub fn logger_formatter_activate(
w: &mut dyn std::io::Write,
_now: &mut DeferredNow,
record: &Record,
) -> Result<(), std::io::Error> {
let level = record.level();
write!(
w,
"⭐ {} [activate] [{}] {}",
make_emoji(level),
style(level, level.to_string()),
record.args()
)
}
pub fn logger_formatter_wait(
w: &mut dyn std::io::Write,
_now: &mut DeferredNow,
record: &Record,
) -> Result<(), std::io::Error> {
let level = record.level();
write!(
w,
"👀 {} [wait] [{}] {}",
make_emoji(level),
style(level, level.to_string()),
record.args()
)
}
pub fn logger_formatter_deploy(
w: &mut dyn std::io::Write,
_now: &mut DeferredNow,
record: &Record,
) -> Result<(), std::io::Error> {
let level = record.level();
write!(
w,
"🚀 {} [deploy] [{}] {}",
make_emoji(level),
style(level, level.to_string()),
record.args()
)
}
pub enum LoggerType {
Deploy,
Activate,
Wait,
}
pub fn init_logger(
debug_logs: bool,
log_dir: Option<&str>,
logger_type: LoggerType,
) -> Result<(), FlexiLoggerError> {
let logger_formatter = match logger_type {
LoggerType::Deploy => logger_formatter_deploy,
LoggerType::Activate => logger_formatter_activate,
LoggerType::Wait => logger_formatter_wait,
};
if let Some(log_dir) = log_dir {
let mut logger = Logger::with_env_or_str("debug")
.log_to_file()
.format_for_stderr(logger_formatter)
.set_palette("196;208;51;7;8".to_string())
.directory(log_dir)
.duplicate_to_stderr(match debug_logs {
true => Duplicate::Debug,
false => Duplicate::Info,
})
.print_message();
match logger_type {
LoggerType::Activate => logger = logger.discriminant("activate"),
LoggerType::Wait => logger = logger.discriminant("wait"),
LoggerType::Deploy => (),
}
logger.start()?;
} else {
Logger::with_env_or_str(match debug_logs {
true => "debug",
false => "info",
})
.log_target(LogTarget::StdErr)
.format(logger_formatter)
.set_palette("196;208;51;7;8".to_string())
.start()?;
}
Ok(())
}
pub mod data;
@ -191,6 +303,9 @@ pub struct DeployData<'a> {
pub cmd_overrides: &'a CmdOverrides,
pub merged_settings: data::GenericSettings,
pub debug_logs: bool,
pub log_dir: Option<&'a str>,
}
#[derive(Debug)]
@ -259,6 +374,8 @@ pub fn make_deploy_data<'a, 's>(
profile: &'a data::Profile,
profile_name: &'a str,
cmd_overrides: &'a CmdOverrides,
debug_logs: bool,
log_dir: Option<&'a str>,
) -> DeployData<'a> {
let mut merged_settings = profile.generic_settings.clone();
merged_settings.merge(node.generic_settings.clone());
@ -292,13 +409,8 @@ pub fn make_deploy_data<'a, 's>(
cmd_overrides,
merged_settings,
debug_logs,
log_dir,
}
}
#[derive(Error, Debug)]
pub enum DeployPathToActivatePathError {
#[error("Deploy path did not have a parent directory")]
PathTooShort,
#[error("Deploy path was not valid utf8")]
InvalidUtf8,
}

View File

@ -2,22 +2,22 @@
//
// SPDX-License-Identifier: MPL-2.0
use std::path::Path;
use std::process::Stdio;
use tokio::process::Command;
use std::path::Path;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum PushProfileError {
#[error("Failed to calculate activate bin path from deploy bin path: {0}")]
DeployPathToActivatePathError(#[from] super::DeployPathToActivatePathError),
#[error("Failed to run Nix build command: {0}")]
BuildError(std::io::Error),
#[error("Nix build command resulted in a bad exit code: {0:?}")]
BuildExitError(Option<i32>),
#[error("Activation script deploy-rs-activate does not exist in profile.\n\
Did you forget to use deploy-rs#lib.<...>.activate.<...> on your profile path?")]
#[error(
"Activation script deploy-rs-activate does not exist in profile.\n\
Did you forget to use deploy-rs#lib.<...>.activate.<...> on your profile path?"
)]
DeployRsActivateDoesntExist,
#[error("Activation script activate-rs does not exist in profile.\n\
Is there a mismatch in deploy-rs used in the flake you're deploying and deploy-rs command you're running?")]
@ -32,53 +32,55 @@ pub enum PushProfileError {
CopyExitError(Option<i32>),
}
pub async fn push_profile(
supports_flakes: bool,
check_sigs: bool,
repo: &str,
deploy_data: &super::DeployData<'_>,
deploy_defs: &super::DeployDefs,
keep_result: bool,
result_path: Option<&str>,
extra_build_args: &[String],
) -> Result<(), PushProfileError> {
pub struct PushProfileData<'a> {
pub supports_flakes: bool,
pub check_sigs: bool,
pub repo: &'a str,
pub deploy_data: &'a super::DeployData<'a>,
pub deploy_defs: &'a super::DeployDefs,
pub keep_result: bool,
pub result_path: Option<&'a str>,
pub extra_build_args: &'a [String],
}
pub async fn push_profile(data: PushProfileData<'_>) -> Result<(), PushProfileError> {
info!(
"Building profile `{}` for node `{}`",
deploy_data.profile_name, deploy_data.node_name
data.deploy_data.profile_name, data.deploy_data.node_name
);
let mut build_c = if supports_flakes {
let mut build_c = if data.supports_flakes {
Command::new("nix")
} else {
Command::new("nix-build")
};
let mut build_command = if supports_flakes {
let mut build_command = if data.supports_flakes {
build_c.arg("build").arg(format!(
"{}#deploy.nodes.\"{}\".profiles.\"{}\".path",
repo, deploy_data.node_name, deploy_data.profile_name
data.repo, data.deploy_data.node_name, data.deploy_data.profile_name
))
} else {
build_c.arg(&repo).arg("-A").arg(format!(
build_c.arg(&data.repo).arg("-A").arg(format!(
"deploy.nodes.\"{}\".profiles.\"{}\".path",
deploy_data.node_name, deploy_data.profile_name
data.deploy_data.node_name, data.deploy_data.profile_name
))
};
build_command = match (keep_result, supports_flakes) {
build_command = match (data.keep_result, data.supports_flakes) {
(true, _) => {
let result_path = result_path.unwrap_or("./.deploy-gc");
let result_path = data.result_path.unwrap_or("./.deploy-gc");
build_command.arg("--out-link").arg(format!(
"{}/{}/{}",
result_path, deploy_data.node_name, deploy_data.profile_name
result_path, data.deploy_data.node_name, data.deploy_data.profile_name
))
}
(false, false) => build_command.arg("--no-out-link"),
(false, true) => build_command.arg("--no-link"),
};
for extra_arg in extra_build_args {
for extra_arg in data.extra_build_args {
build_command = build_command.arg(extra_arg);
}
@ -94,20 +96,34 @@ pub async fn push_profile(
a => return Err(PushProfileError::BuildExitError(a)),
};
if ! Path::new(format!("{}/deploy-rs-activate", deploy_data.profile.profile_settings.path).as_str()).exists() {
if !Path::new(
format!(
"{}/deploy-rs-activate",
data.deploy_data.profile.profile_settings.path
)
.as_str(),
)
.exists()
{
return Err(PushProfileError::DeployRsActivateDoesntExist);
}
if ! Path::new(format!("{}/activate-rs", deploy_data.profile.profile_settings.path).as_str()).exists() {
if !Path::new(
format!(
"{}/activate-rs",
data.deploy_data.profile.profile_settings.path
)
.as_str(),
)
.exists()
{
return Err(PushProfileError::ActivateRsDoesntExist);
}
if let Ok(local_key) = std::env::var("LOCAL_KEY") {
info!(
"Signing key present! Signing profile `{}` for node `{}`",
deploy_data.profile_name, deploy_data.node_name
data.deploy_data.profile_name, data.deploy_data.node_name
);
let sign_exit_status = Command::new("nix")
@ -115,7 +131,7 @@ pub async fn push_profile(
.arg("-r")
.arg("-k")
.arg(local_key)
.arg(&deploy_data.profile.profile_settings.path)
.arg(&data.deploy_data.profile.profile_settings.path)
.status()
.await
.map_err(PushProfileError::SignError)?;
@ -128,21 +144,22 @@ pub async fn push_profile(
debug!(
"Copying profile `{}` to node `{}`",
deploy_data.profile_name, deploy_data.node_name
data.deploy_data.profile_name, data.deploy_data.node_name
);
let mut copy_command_ = Command::new("nix");
let mut copy_command = copy_command_.arg("copy");
if deploy_data.merged_settings.fast_connection != Some(true) {
if data.deploy_data.merged_settings.fast_connection != Some(true) {
copy_command = copy_command.arg("--substitute-on-destination");
}
if !check_sigs {
if !data.check_sigs {
copy_command = copy_command.arg("--no-check-sigs");
}
let ssh_opts_str = deploy_data
let ssh_opts_str = data
.deploy_data
.merged_settings
.ssh_opts
// This should provide some extra safety, but it also breaks for some reason, oh well
@ -151,15 +168,15 @@ pub async fn push_profile(
// .collect::<Vec<String>>()
.join(" ");
let hostname = match deploy_data.cmd_overrides.hostname {
let hostname = match data.deploy_data.cmd_overrides.hostname {
Some(ref x) => x,
None => &deploy_data.node.node_settings.hostname,
None => &data.deploy_data.node.node_settings.hostname,
};
let copy_exit_status = copy_command
.arg("--to")
.arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname))
.arg(&deploy_data.profile.profile_settings.path)
.arg(format!("ssh://{}@{}", data.deploy_defs.ssh_user, hostname))
.arg(&data.deploy_data.profile.profile_settings.path)
.env("NIX_SSHOPTS", ssh_opts_str)
.status()
.await

View File

@ -1,179 +0,0 @@
// SPDX-FileCopyrightText: 2020 Serokell <https://serokell.io/>
// SPDX-FileCopyrightText: 2020 Andreas Fuchs <asf@boinkor.net>
//
// SPDX-License-Identifier: MPL-2.0
use std::borrow::Cow;
use tokio::process::Command;
use thiserror::Error;
fn build_activate_command(
sudo: &Option<String>,
profile_path: &str,
closure: &str,
auto_rollback: bool,
temp_path: &Cow<str>,
confirm_timeout: u16,
magic_rollback: bool,
) -> String {
let mut self_activate_command = format!(
"{}/activate-rs '{}' '{}' --temp-path {} --confirm-timeout {}",
closure, profile_path, closure, temp_path, confirm_timeout
);
if magic_rollback {
self_activate_command = format!("{} --magic-rollback", self_activate_command);
}
if auto_rollback {
self_activate_command = format!("{} --auto-rollback", self_activate_command);
}
if let Some(sudo_cmd) = &sudo {
self_activate_command = format!("{} {}", sudo_cmd, self_activate_command);
}
self_activate_command
}
#[test]
fn test_activation_command_builder() {
let activate_path_str = "/blah/bin/activate".to_string();
let sudo = Some("sudo -u test".to_string());
let profile_path = "/blah/profiles/test";
let closure = "/nix/store/blah/etc";
let auto_rollback = true;
let temp_path = &"/tmp".into();
let confirm_timeout = 30;
let magic_rollback = true;
assert_eq!(
build_activate_command(
&sudo,
profile_path,
closure,
auto_rollback,
temp_path,
confirm_timeout,
magic_rollback
),
"sudo -u test /nix/store/blah/etc/activate-rs '/blah/profiles/test' '/nix/store/blah/etc' --temp-path /tmp --confirm-timeout 30 --magic-rollback --auto-rollback"
.to_string(),
);
}
#[derive(Error, Debug)]
pub enum DeployProfileError {
#[error("Failed to calculate activate bin path from deploy bin path: {0}")]
DeployPathToActivatePathError(#[from] super::DeployPathToActivatePathError),
#[error("Failed to run activation command over SSH: {0}")]
SSHActivateError(std::io::Error),
#[error("Activation over SSH resulted in a bad exit code: {0:?}")]
SSHActivateExitError(Option<i32>),
#[error("Failed to run confirmation command over SSH (the server should roll back): {0}")]
SSHConfirmError(std::io::Error),
#[error(
"Confirming activation over SSH resulted in a bad exit code (the server should roll back): {0:?}"
)]
SSHConfirmExitError(Option<i32>),
}
pub async fn deploy_profile(
deploy_data: &super::DeployData<'_>,
deploy_defs: &super::DeployDefs,
) -> Result<(), DeployProfileError> {
info!(
"Activating profile `{}` for node `{}`",
deploy_data.profile_name, deploy_data.node_name
);
let temp_path: Cow<str> = match &deploy_data.merged_settings.temp_path {
Some(x) => x.into(),
None => "/tmp".into(),
};
let confirm_timeout = deploy_data.merged_settings.confirm_timeout.unwrap_or(30);
let magic_rollback = deploy_data.merged_settings.magic_rollback.unwrap_or(true);
let auto_rollback = deploy_data.merged_settings.auto_rollback.unwrap_or(true);
let self_activate_command = build_activate_command(
&deploy_defs.sudo,
&deploy_defs.profile_path,
&deploy_data.profile.profile_settings.path,
auto_rollback,
&temp_path,
confirm_timeout,
magic_rollback,
);
debug!("Constructed activation command: {}", self_activate_command);
let hostname = match deploy_data.cmd_overrides.hostname {
Some(ref x) => x,
None => &deploy_data.node.node_settings.hostname,
};
let mut c = Command::new("ssh");
let mut ssh_command = c
.arg("-t")
.arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname));
for ssh_opt in &deploy_data.merged_settings.ssh_opts {
ssh_command = ssh_command.arg(ssh_opt);
}
let ssh_exit_status = ssh_command
.arg(self_activate_command)
.status()
.await
.map_err(DeployProfileError::SSHActivateError)?;
match ssh_exit_status.code() {
Some(0) => (),
a => return Err(DeployProfileError::SSHActivateExitError(a)),
};
info!("Success activating!");
if magic_rollback {
info!("Attempting to confirm activation");
let mut c = Command::new("ssh");
let mut ssh_confirm_command = c.arg(format!("ssh://{}@{}", deploy_defs.ssh_user, hostname));
for ssh_opt in &deploy_data.merged_settings.ssh_opts {
ssh_confirm_command = ssh_confirm_command.arg(ssh_opt);
}
let lock_hash = &deploy_data.profile.profile_settings.path["/nix/store/".len()..];
let lock_path = format!("{}/deploy-rs-canary-{}", temp_path, lock_hash);
let mut confirm_command = format!("rm {}", lock_path);
if let Some(sudo_cmd) = &deploy_defs.sudo {
confirm_command = format!("{} {}", sudo_cmd, confirm_command);
}
debug!(
"Attempting to run command to confirm deployment: {}",
confirm_command
);
let ssh_exit_status = ssh_confirm_command
.arg(confirm_command)
.status()
.await
.map_err(DeployProfileError::SSHConfirmError)?;
match ssh_exit_status.code() {
Some(0) => (),
a => return Err(DeployProfileError::SSHConfirmExitError(a)),
};
info!("Deployment confirmed.");
}
Ok(())
}