Backups Rework (#698)

* wip: Backup al

* wip: Backup

* backup code complete

* wip

* wip

* update types

* wip

* fix errors

* Backups wizard (#699)

* backup adjustments

* fix endpoint arg

* Update prod-key-modal.page.ts

Co-authored-by: Drew Ansbacher <drew.ansbacher@spiredigital.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>

* build errs addressed

* working

* update backup command input, nix, and apk add

* add ecryptfs-utils

* fix build

* wip

* fixes for macos

* more mac magic

* fix typo

* working

* fixes after rebase

* chore: remove unused imports

Co-authored-by: Justin Miller <dragondef@gmail.com>
Co-authored-by: Drew Ansbacher <drew.ansbacher@gmail.com>
Co-authored-by: Drew Ansbacher <drew.ansbacher@spiredigital.com>
Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
This commit is contained in:
Aiden McClelland
2021-10-23 22:00:23 -06:00
parent 78dcce7be5
commit 8056285a7f
52 changed files with 2032 additions and 873 deletions

15
appmgr/Cargo.lock generated
View File

@@ -232,9 +232,9 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
[[package]]
name = "bitflags"
version = "1.2.1"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitvec"
@@ -854,7 +854,7 @@ dependencies = [
"lazy_static",
"libc",
"log",
"nix 0.22.2",
"nix 0.23.0",
"num",
"openssh-keys",
"openssl",
@@ -1775,9 +1775,9 @@ dependencies = [
[[package]]
name = "nix"
version = "0.22.2"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d3bb9a13fa32bc5aeb64150cd3f32d6cf4c748f8f8a417cce5d2eb976a8370ba"
checksum = "f305c2c2e4c39a82f7bf0bf65fb557f9070ce06781d4f2454295cc34b1c43188"
dependencies = [
"bitflags",
"cc",
@@ -2044,14 +2044,15 @@ dependencies = [
"json-patch",
"json-ptr",
"lazy_static",
"log",
"nix 0.22.2",
"nix 0.23.0",
"patch-db-macro",
"serde",
"serde_cbor 0.11.1",
"serde_json",
"thiserror",
"tokio 1.12.0",
"tracing",
"tracing-error",
]
[[package]]

View File

@@ -65,8 +65,8 @@ git-version = "0.3.5"
hex = "0.4.3"
hmac = "0.11.0"
http = "0.2.5"
hyper-ws-listener = { git = "https://github.com/Start9Labs/hyper-ws-listener.git", branch = "main" }
hyper = "0.14.13"
hyper-ws-listener = { git = "https://github.com/Start9Labs/hyper-ws-listener.git", branch = "main" }
indexmap = { version = "1.7.0", features = ["serde"] }
isocountry = "0.3.2"
itertools = "0.10.1"
@@ -74,12 +74,12 @@ jsonpath_lib = "0.3.0"
lazy_static = "1.4"
libc = "0.2.103"
log = "0.4.14"
nix = "0.22.1"
nix = "0.23.0"
num = "0.4.0"
openssh-keys = "0.5.0"
openssl = { version = "0.10.36", features = ["vendored"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"debug",
"trace",
] }
pbkdf2 = "0.9.0"
pin-project = "1.0.8"
@@ -120,14 +120,14 @@ tokio-util = { version = "0.6.8", features = ["io"] }
torut = "0.2.0"
tracing = "0.1"
tracing-error = "0.1"
tracing-futures = "0.2"
tracing-subscriber = "0.2"
tracing-futures="0.2"
typed-builder = "0.9.1"
url = { version = "2.2.2", features = ["serde"] }
[dependencies.serde_with]
features = ["macros", "json"]
version = "1.10.0"
features = [ "macros", "json" ]
[profile.dev.package.backtrace]
opt-level = 3

View File

@@ -30,6 +30,16 @@
"nullable": []
}
},
"165daa7d6a60cb42122373b2c5ac7d39399bcc99992f0002ee7bfef50a8daceb": {
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;",
"describe": {
"columns": [],
"parameters": {
"Right": 0
},
"nullable": []
}
},
"177c4b9cc7901a3b906e5969b86b1c11e6acbfb8e86e98f197d7333030b17964": {
"query": "DELETE FROM notifications WHERE id = ?",
"describe": {
@@ -134,6 +144,16 @@
"nullable": []
}
},
"5b114c450073f77f466c980a2541293f30087b57301c379630326e5e5c2fb792": {
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)",
"describe": {
"columns": [],
"parameters": {
"Right": 3
},
"nullable": []
}
},
"5c47da44b9c84468e95a13fc47301989900f130b3b5899d1ee6664df3ed812ac": {
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, ?, ?, NULL, datetime('now'), datetime('now'))",
"describe": {

View File

@@ -4,12 +4,12 @@ use std::marker::PhantomData;
use chrono::{DateTime, Utc};
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use http::HeaderValue;
use rpc_toolkit::command;
use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts};
use rpc_toolkit::yajrc::RpcError;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use sqlx::{Executor, Sqlite};
use tracing::instrument;
use crate::context::{CliContext, RpcContext};
@@ -65,6 +65,32 @@ async fn cli_login(
Ok(())
}
pub fn check_password(hash: &str, password: &str) -> Result<(), Error> {
ensure_code!(
argon2::verify_encoded(&hash, password.as_bytes()).map_err(|_| {
Error::new(
eyre!("Password Incorrect"),
crate::ErrorKind::IncorrectPassword,
)
})?,
crate::ErrorKind::IncorrectPassword,
"Password Incorrect"
);
Ok(())
}
pub async fn check_password_against_db<Ex>(secrets: &mut Ex, password: &str) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
{
let pw_hash = sqlx::query!("SELECT password FROM account")
.fetch_one(secrets)
.await?
.password;
check_password(&pw_hash, password)?;
Ok(())
}
#[command(
custom_cli(cli_login(async, context(CliContext))),
display(display_none),
@@ -85,17 +111,7 @@ pub async fn login(
) -> Result<(), Error> {
let password = password.unwrap_or_default();
let mut handle = ctx.secret_store.acquire().await?;
let pw_hash = sqlx::query!("SELECT password FROM account")
.fetch_one(&mut handle)
.await?
.password;
ensure_code!(
argon2::verify_encoded(&pw_hash, password.as_bytes()).map_err(|_| {
Error::new(eyre!("Password Incorrect"), crate::ErrorKind::Authorization)
})?,
crate::ErrorKind::Authorization,
"Password Incorrect"
);
check_password_against_db(&mut handle, &password).await?;
let hash_token = HashSessionToken::new();
let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok());

View File

@@ -0,0 +1,387 @@
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use chrono::Utc;
use color_eyre::eyre::eyre;
use futures::task::Spawn;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::{DbHandle, LockType, PatchDbHandle, Revision};
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use torut::onion::TorSecretKeyV3;
use tracing::instrument;
use super::PackageBackupReport;
use crate::auth::check_password_against_db;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
use crate::db::model::ServerStatus;
use crate::db::util::WithRevision;
use crate::disk::util::{BackupMountGuard, TmpMountGuard};
use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::PackageId;
use crate::status::MainStatus;
use crate::util::{display_none, AtomicFile, IoFormat};
use crate::version::VersionT;
use crate::{Error, ErrorKind, ResultExt};
#[derive(Debug)]
pub struct OsBackup {
pub tor_key: TorSecretKeyV3,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ui: Value,
}
impl<'de> Deserialize<'de> for OsBackup {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Deserialize)]
struct OsBackupDe {
tor_key: TorSecretKeyV3,
root_ca_key: String,
root_ca_cert: String,
ui: Value,
}
let int = OsBackupDe::deserialize(deserializer)?;
Ok(OsBackup {
tor_key: int.tor_key,
root_ca_key: PKey::<Private>::private_key_from_pem(int.root_ca_key.as_bytes())
.map_err(serde::de::Error::custom)?,
root_ca_cert: X509::from_pem(int.root_ca_cert.as_bytes())
.map_err(serde::de::Error::custom)?,
ui: int.ui,
})
}
}
impl Serialize for OsBackup {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
#[derive(Serialize)]
struct OsBackupSer<'a> {
tor_key: &'a TorSecretKeyV3,
root_ca_key: String,
root_ca_cert: String,
ui: &'a Value,
}
OsBackupSer {
tor_key: &self.tor_key,
root_ca_key: String::from_utf8(
self.root_ca_key
.private_key_to_pem_pkcs8()
.map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
root_ca_cert: String::from_utf8(
self.root_ca_cert
.to_pem()
.map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
ui: &self.ui,
}
.serialize(serializer)
}
}
#[command(rename = "create", display(display_none))]
pub async fn backup_all(
#[context] ctx: RpcContext,
#[arg] logicalname: PathBuf,
#[arg(rename = "old-password", long = "old-password")] old_password: Option<String>,
#[arg] password: String,
) -> Result<WithRevision<()>, Error> {
let mut db = ctx.db.handle();
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
let revision = assure_backing_up(&mut db).await?;
tokio::task::spawn(async move {
match perform_backup(
&ctx,
&mut db,
logicalname,
old_password.as_deref(),
&password,
)
.await
{
Ok(report) => ctx
.notification_manager
.notify(
&mut db,
None,
NotificationLevel::Success,
"Backup Complete".to_owned(),
"Your backup has completed".to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: None,
},
packages: report,
},
None,
)
.await
.expect("failed to send notification"),
Err(e) => {
tracing::error!("Backup Failed: {}", e);
tracing::debug!("{:?}", e);
ctx.notification_manager
.notify(
&mut db,
None,
NotificationLevel::Error,
"Backup Failed".to_owned(),
"Your backup failed to complete.".to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: Some(e.to_string()),
},
packages: BTreeMap::new(),
},
None,
)
.await
.expect("failed to send notification");
}
}
crate::db::DatabaseModel::new()
.server_info()
.status()
.put(&mut db, &ServerStatus::Running)
.await
.expect("failed to change server status");
});
Ok(WithRevision {
response: (),
revision,
})
}
#[instrument(skip(db))]
async fn assure_backing_up(db: &mut PatchDbHandle) -> Result<Option<Arc<Revision>>, Error> {
let mut tx = db.begin().await?;
let mut info = crate::db::DatabaseModel::new()
.server_info()
.get_mut(&mut tx)
.await?;
match &info.status {
ServerStatus::Updating => {
return Err(Error::new(
eyre!("Server is updating!"),
crate::ErrorKind::InvalidRequest,
))
}
ServerStatus::Updated => {
return Err(Error::new(
eyre!("Server is updated and needs to be reset"),
crate::ErrorKind::InvalidRequest,
))
}
ServerStatus::BackingUp => {
return Err(Error::new(
eyre!("Server is already backing up!"),
crate::ErrorKind::InvalidRequest,
))
}
ServerStatus::Running => (),
}
info.status = ServerStatus::BackingUp;
info.save(&mut tx).await?;
Ok(tx.commit(None).await?)
}
async fn write_cbor_file<T: Serialize>(
value: &T,
tmp_path: impl AsRef<Path>,
path: impl AsRef<Path>,
) -> Result<(), Error> {
let tmp_path = tmp_path.as_ref();
let path = path.as_ref();
let mut file = File::create(tmp_path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, tmp_path.display().to_string()))?;
file.write_all(&IoFormat::Cbor.to_vec(value)?).await?;
file.flush().await?;
file.shutdown().await?;
file.sync_all().await?;
drop(file);
tokio::fs::rename(tmp_path, path).await.with_ctx(|_| {
(
ErrorKind::Filesystem,
format!("mv {} -> {}", tmp_path.display(), path.display()),
)
})
}
#[instrument(skip(ctx, db, password))]
async fn perform_backup<Db: DbHandle>(
ctx: &RpcContext,
mut db: Db,
logicalname: PathBuf,
old_password: Option<&str>,
password: &str,
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
let mut backup_guard = BackupMountGuard::mount(
TmpMountGuard::mount(&logicalname).await?,
old_password.unwrap_or(password),
)
.await?;
if old_password.is_some() {
backup_guard.change_password(password)?;
}
let mut backup_report = BTreeMap::new();
for package_id in crate::db::DatabaseModel::new()
.package_data()
.keys(&mut db, true)
.await?
{
let installed_model = if let Some(installed_model) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&package_id)
.and_then(|m| m.installed())
.check(&mut db)
.await?
{
installed_model
} else {
continue;
};
installed_model.lock(&mut db, LockType::Write).await;
let manifest = installed_model
.clone()
.manifest()
.get(&mut db, true)
.await?;
let main_status_model = installed_model.clone().status().main();
let (started, health) = match main_status_model.get(&mut db, true).await?.into_owned() {
MainStatus::Running { started, health } => (Some(started.clone()), health.clone()),
MainStatus::Stopped | MainStatus::Stopping => (None, Default::default()),
MainStatus::Restoring { .. } => {
backup_report.insert(
package_id,
PackageBackupReport {
error: Some(
"Can't do backup because service is in a restoring state".to_owned(),
),
},
);
continue;
}
MainStatus::BackingUp { .. } => {
backup_report.insert(
package_id,
PackageBackupReport {
error: Some(
"Can't do backup because service is in a backing up state".to_owned(),
),
},
);
continue;
}
};
main_status_model
.put(
&mut db,
&MainStatus::BackingUp {
started: started.clone(),
health: health.clone(),
},
)
.await?;
let guard = backup_guard.mount_package_backup(&package_id).await?;
let res = manifest
.backup
.create(
&ctx,
&package_id,
&manifest.title,
&manifest.version,
&manifest.interfaces,
&manifest.volumes,
)
.await;
drop(guard);
backup_report.insert(
package_id.clone(),
PackageBackupReport {
error: res.as_ref().err().map(|e| e.to_string()),
},
);
let mut tx = db.begin().await?;
if let Ok(pkg_meta) = res {
installed_model
.last_backup()
.put(&mut tx, &Some(pkg_meta.timestamp))
.await?;
backup_guard
.metadata
.package_backups
.insert(package_id, pkg_meta);
}
main_status_model
.put(
&mut tx,
&match started {
Some(started) => MainStatus::Running { started, health },
None => MainStatus::Stopped,
},
)
.await?;
tx.save().await?;
}
let (root_ca_key, root_ca_cert) = ctx
.net_controller
.nginx
.ssl_manager
.export_root_ca()
.await?;
let mut os_backup_file = AtomicFile::new(backup_guard.as_ref().join("os-backup.cbor")).await?;
os_backup_file
.write_all(
&IoFormat::Cbor.to_vec(&OsBackup {
tor_key: ctx.net_controller.tor.embassyd_tor_key().await,
root_ca_key,
root_ca_cert,
ui: crate::db::DatabaseModel::new()
.ui()
.get(&mut db, true)
.await?
.into_owned(),
})?,
)
.await?;
os_backup_file.save().await?;
let timestamp = Some(Utc::now());
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();
backup_guard.unencrypted_metadata.full = true;
backup_guard.metadata.version = crate::version::Current::new().semver().into();
backup_guard.metadata.timestamp = timestamp;
backup_guard.save_and_unmount().await?;
crate::db::DatabaseModel::new()
.server_info()
.last_backup()
.put(&mut db, &timestamp)
.await?;
Ok(backup_report)
}

View File

@@ -1,16 +1,57 @@
use std::collections::BTreeMap;
use std::path::Path;
use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre;
use patch_db::HasModel;
use regex::NoExpand;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use torut::onion::TorSecretKeyV3;
use tracing::instrument;
use crate::action::{ActionImplementation, NoOutput};
use crate::context::RpcContext;
use crate::disk::PackageBackupInfo;
use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces};
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::{Volume, VolumeId, Volumes};
use crate::util::{IoFormat, Version};
use crate::version::{Current, VersionT};
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
use crate::{Error, ResultExt};
mod backup_bulk;
#[derive(Debug, Deserialize, Serialize)]
pub struct BackupReport {
server: ServerBackupReport,
packages: BTreeMap<PackageId, PackageBackupReport>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct ServerBackupReport {
attempted: bool,
error: Option<String>,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct PackageBackupReport {
error: Option<String>,
}
#[command(subcommands(backup_bulk::backup_all))]
pub fn backup() -> Result<(), Error> {
Ok(())
}
#[derive(Deserialize, Serialize)]
struct BackupMetadata {
pub timestamp: DateTime<Utc>,
pub tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
pub struct BackupActions {
pub create: ActionImplementation,
@@ -22,25 +63,95 @@ impl BackupActions {
&self,
ctx: &RpcContext,
pkg_id: &PackageId,
pkg_title: &str,
pkg_version: &Version,
interfaces: &Interfaces,
volumes: &Volumes,
) -> Result<NoOutput, Error> {
) -> Result<PackageBackupInfo, Error> {
let mut volumes = volumes.to_readonly();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: false });
let backup_dir = backup_dir(pkg_id);
if tokio::fs::metadata(&backup_dir).await.is_err() {
tokio::fs::create_dir_all(&backup_dir).await?
}
self.create
.execute(
.execute::<(), NoOutput>(
ctx,
pkg_id,
pkg_version,
Some("CreateBackup"),
&volumes,
None::<()>,
None,
false,
)
.await?
.map_err(|e| eyre!("{}", e.1))
.with_kind(crate::ErrorKind::Backup)?;
Ok(NoOutput)
let tor_keys = interfaces
.tor_keys(&mut ctx.secret_store.acquire().await?, pkg_id)
.await?;
let tmp_path = Path::new(BACKUP_DIR)
.join(pkg_id)
.join(format!("{}.s9pk", pkg_id));
let real_path = Path::new(BACKUP_DIR)
.join(pkg_id)
.join(format!(".{}.s9pk.tmp", pkg_id));
let s9pk_path = ctx
.datadir
.join(PKG_ARCHIVE_DIR)
.join(pkg_id)
.join(pkg_version.as_str())
.join(format!("{}.s9pk", pkg_id));
let mut infile = File::open(&s9pk_path).await?;
let mut outfile = File::create(&tmp_path).await?;
tokio::io::copy(&mut infile, &mut outfile)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("cp {} -> {}", s9pk_path.display(), tmp_path.display()),
)
})?;
outfile.flush().await?;
outfile.shutdown().await?;
outfile.sync_all().await?;
tokio::fs::rename(&tmp_path, &real_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("mv {} -> {}", tmp_path.display(), real_path.display()),
)
})?;
let timestamp = Utc::now();
let tmp_path = Path::new(BACKUP_DIR)
.join(pkg_id)
.join(".metadata.cbor.tmp");
let real_path = Path::new(BACKUP_DIR).join(pkg_id).join("metadata.cbor");
let mut outfile = File::create(&tmp_path).await?;
outfile
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
timestamp,
tor_keys,
})?)
.await?;
outfile.flush().await?;
outfile.shutdown().await?;
outfile.sync_all().await?;
tokio::fs::rename(&tmp_path, &real_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("mv {} -> {}", tmp_path.display(), real_path.display()),
)
})?;
Ok(PackageBackupInfo {
os_version: Current::new().semver().into(),
title: pkg_title.to_owned(),
version: pkg_version.clone(),
timestamp,
})
}
pub async fn restore(
@@ -49,22 +160,43 @@ impl BackupActions {
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
) -> Result<NoOutput, Error> {
) -> Result<(), Error> {
let mut volumes = volumes.clone();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true });
self.restore
.execute(
.execute::<(), NoOutput>(
ctx,
pkg_id,
pkg_version,
Some("RestoreBackup"),
&volumes,
None::<()>,
None,
false,
)
.await?
.map_err(|e| eyre!("{}", e.1))
.with_kind(crate::ErrorKind::Restore)?;
Ok(NoOutput)
let metadata_path = Path::new(BACKUP_DIR).join(pkg_id).join("metadata.cbor");
let metadata: BackupMetadata = IoFormat::Cbor.from_slice(
&tokio::fs::read(&metadata_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
metadata_path.display().to_string(),
)
})?,
)?;
let mut sql_handle = ctx.secret_store.acquire().await?;
for (iface, key) in metadata.tor_keys {
let key_vec = key.as_bytes().to_vec();
sqlx::query!(
"REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)",
**pkg_id,
*iface,
key_vec,
)
.execute(&mut sql_handle)
.await?;
}
Ok(())
}
}

View File

@@ -157,10 +157,10 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
info.status = ServerStatus::Running;
}
}
info.version = emver::Version::new(0, 3, 0, 0).into();
// TODO: run migrations
info.save(&mut handle).await?;
embassy::version::init(&mut handle).await?;
Ok(())
}

View File

@@ -11,7 +11,7 @@ use embassy::middleware::diagnostic::diagnostic;
use embassy::net::mdns::MdnsController;
use embassy::net::tor::tor_health_check;
use embassy::shutdown::Shutdown;
use embassy::status::{check_all, synchronize_all};
use embassy::status::synchronize_all;
use embassy::util::{daemon, Invoke};
use embassy::{static_server, Error, ErrorKind, ResultExt};
use futures::{FutureExt, TryFutureExt};
@@ -190,22 +190,6 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
Duration::from_millis(500),
rpc_ctx.shutdown.subscribe(),
);
let health_ctx = rpc_ctx.clone();
let health_daemon = daemon(
move || {
let ctx = health_ctx.clone();
async move {
if let Err(e) = check_all(&ctx).await {
tracing::error!("Error in Health Check daemon: {}", e);
tracing::debug!("{:?}", e);
} else {
tracing::trace!("Health Check completed successfully");
}
}
},
Duration::from_millis(500),
rpc_ctx.shutdown.subscribe(),
);
let tor_health_ctx = rpc_ctx.clone();
let tor_client = Client::builder()
.proxy(
@@ -231,35 +215,49 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
embassy::sound::MARIO_COIN.play().await?;
futures::try_join!(
server.map_err(|e| Error::new(e, ErrorKind::Network)),
revision_cache_task.map_err(|e| Error::new(
server
.map_err(|e| Error::new(e, ErrorKind::Network))
.map_ok(|_| tracing::debug!("RPC Server Shutdown")),
revision_cache_task
.map_err(|e| Error::new(
eyre!("{}", e).wrap_err("Revision Cache daemon panicked!"),
ErrorKind::Unknown
)),
ws_server.map_err(|e| Error::new(e, ErrorKind::Network)),
file_server.map_err(|e| Error::new(e, ErrorKind::Network)),
status_daemon.map_err(|e| Error::new(
e.wrap_err("Status Sync daemon panicked!"),
))
.map_ok(|_| tracing::debug!("Revision Cache Shutdown")),
ws_server
.map_err(|e| Error::new(e, ErrorKind::Network))
.map_ok(|_| tracing::debug!("WebSocket Server Shutdown")),
file_server
.map_err(|e| Error::new(e, ErrorKind::Network))
.map_ok(|_| tracing::debug!("Static File Server Shutdown")),
status_daemon
.map_err(|e| Error::new(
e.wrap_err("Status Sync Daemon panicked!"),
ErrorKind::Unknown
)),
health_daemon.map_err(|e| Error::new(
e.wrap_err("Health Check daemon panicked!"),
))
.map_ok(|_| tracing::debug!("Status Sync Daemon Shutdown")),
tor_health_daemon
.map_err(|e| Error::new(
e.wrap_err("Tor Health Daemon panicked!"),
ErrorKind::Unknown
)),
tor_health_daemon.map_err(|e| Error::new(
e.wrap_err("Tor Health daemon panicked!"),
ErrorKind::Unknown
)),
))
.map_ok(|_| tracing::debug!("Tor Health Daemon Shutdown")),
)?;
let mut shutdown = shutdown_recv
.recv()
.await
.with_kind(crate::ErrorKind::Unknown)?;
if let Some(shutdown) = &mut shutdown {
drop(shutdown.db_handle.take());
}
rpc_ctx.managers.empty().await?;
sig_handler.abort();
Ok(shutdown_recv
.recv()
.await
.with_kind(crate::ErrorKind::Unknown)?)
Ok(shutdown)
}
fn main() {

View File

@@ -74,11 +74,12 @@ async fn stop_common<Db: DbHandle>(
id: &PackageId,
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
) -> Result<(), Error> {
let mut tx = db.begin().await?;
let mut status = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&id)
.and_then(|pkg| pkg.installed())
.expect(db)
.expect(&mut tx)
.await
.with_ctx(|_| {
(
@@ -88,11 +89,12 @@ async fn stop_common<Db: DbHandle>(
})?
.status()
.main()
.get_mut(db)
.get_mut(&mut tx)
.await?;
*status = MainStatus::Stopping;
status.save(db).await?;
status.save(&mut tx).await?;
tx.save().await?;
break_all_dependents_transitive(db, &id, DependencyError::NotRunning, breakages).await?;
Ok(())

View File

@@ -1,6 +1,8 @@
use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc;
use chrono::{DateTime, Utc};
use emver::VersionRange;
use patch_db::json_ptr::JsonPointer;
use patch_db::{HasModel, Map, MapModel, OptionModel};
use reqwest::Url;
@@ -15,6 +17,7 @@ use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
use crate::status::health_check::HealthCheckId;
use crate::status::Status;
use crate::util::Version;
use crate::version::{Current, VersionT};
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
@@ -34,7 +37,15 @@ impl Database {
Database {
server_info: ServerInfo {
id,
version: emver::Version::new(0, 3, 0, 0).into(),
version: Current::new().semver().into(),
last_backup: None,
eos_version_compat: VersionRange::Conj(
Box::new(VersionRange::Anchor(
emver::GTE,
emver::Version::new(0, 3, 0, 0),
)),
Box::new(VersionRange::Anchor(emver::LTE, Current::new().semver())),
),
lan_address: format!("https://{}.local", hostname).parse().unwrap(),
tor_address: format!("http://{}", tor_key.public().get_onion_address())
.parse()
@@ -73,6 +84,8 @@ impl DatabaseModel {
pub struct ServerInfo {
pub id: String,
pub version: Version,
pub last_backup: Option<DateTime<Utc>>,
pub eos_version_compat: VersionRange,
pub lan_address: Url,
pub tor_address: Url,
pub status: ServerStatus,
@@ -191,6 +204,26 @@ pub enum PackageDataEntry {
installed: InstalledPackageDataEntry,
},
}
impl PackageDataEntry {
pub fn installed(&self) -> Option<&InstalledPackageDataEntry> {
match self {
Self::Installing { .. } | Self::Removing { .. } => None,
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
}
}
pub fn installed_mut(&mut self) -> Option<&mut InstalledPackageDataEntry> {
match self {
Self::Installing { .. } | Self::Removing { .. } => None,
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
}
}
pub fn into_installed(self) -> Option<InstalledPackageDataEntry> {
match self {
Self::Installing { .. } | Self::Removing { .. } => None,
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
}
}
}
impl PackageDataEntryModel {
pub fn installed(self) -> OptionModel<InstalledPackageDataEntry> {
self.0.child("installed").into()
@@ -210,6 +243,7 @@ pub struct InstalledPackageDataEntry {
pub status: Status,
#[model]
pub manifest: Manifest,
pub last_backup: Option<DateTime<Utc>>,
pub system_pointers: Vec<SystemPointerSpec>,
#[model]
pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>,

View File

@@ -1,8 +1,6 @@
use std::collections::BTreeMap;
use std::time::Duration;
use crate::config::action::ConfigRes;
use crate::util::display_none;
use color_eyre::eyre::eyre;
use emver::VersionRange;
use futures::future::BoxFuture;
@@ -14,6 +12,7 @@ use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::action::{ActionImplementation, NoOutput};
use crate::config::action::ConfigRes;
use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::db::model::CurrentDependencyInfo;
@@ -21,8 +20,7 @@ use crate::error::ResultExt;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
use crate::status::{MainStatus, Status};
use crate::util::display_serializable;
use crate::util::Version;
use crate::util::{display_none, display_serializable, Version};
use crate::volume::Volumes;
use crate::Error;
@@ -723,13 +721,14 @@ pub fn break_transitive<'a, Db: DbHandle>(
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
let mut tx = db.begin().await?;
let model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|m| m.installed())
.expect(db)
.expect(&mut tx)
.await?;
let mut status = model.clone().status().get_mut(db).await?;
let mut status = model.clone().status().get_mut(&mut tx).await?;
let old = status.dependency_errors.0.remove(dependency);
let newly_broken = old.is_none();
@@ -755,7 +754,7 @@ pub fn break_transitive<'a, Db: DbHandle>(
.manifest()
.dependencies()
.idx_model(dependency)
.get(db, true)
.get(&mut tx, true)
.await?
.into_owned()
.ok_or_else(|| {
@@ -771,11 +770,20 @@ pub fn break_transitive<'a, Db: DbHandle>(
} else {
DependencyError::Transitive
};
break_all_dependents_transitive(db, id, transitive_error, breakages).await?;
}
}
status.save(&mut tx).await?;
status.save(db).await?;
tx.save().await?;
break_all_dependents_transitive(db, id, transitive_error, breakages).await?;
} else {
status.save(&mut tx).await?;
tx.save().await?;
}
} else {
status.save(&mut tx).await?;
tx.save().await?;
}
Ok(())
}
@@ -811,13 +819,14 @@ pub fn heal_transitive<'a, Db: DbHandle>(
dependency: &'a PackageId,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
let mut tx = db.begin().await?;
let model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|m| m.installed())
.expect(db)
.expect(&mut tx)
.await?;
let mut status = model.clone().status().get_mut(db).await?;
let mut status = model.clone().status().get_mut(&mut tx).await?;
let old = status.dependency_errors.0.remove(dependency);
@@ -826,19 +835,24 @@ pub fn heal_transitive<'a, Db: DbHandle>(
.manifest()
.dependencies()
.idx_model(dependency)
.expect(db)
.expect(&mut tx)
.await?
.get(db, true)
.get(&mut tx, true)
.await?;
if let Some(new) = old.try_heal(ctx, db, id, dependency, None, &*info).await? {
if let Some(new) = old
.try_heal(ctx, &mut tx, id, dependency, None, &*info)
.await?
{
status.dependency_errors.0.insert(dependency.clone(), new);
status.save(&mut tx).await?;
tx.save().await?;
} else {
status.save(&mut tx).await?;
tx.save().await?;
heal_all_dependents_transitive(ctx, db, id).await?;
}
}
status.save(db).await?;
Ok(())
}
.boxed()

View File

@@ -1,8 +1,17 @@
use std::collections::BTreeMap;
use std::path::PathBuf;
use chrono::{DateTime, Utc};
use clap::ArgMatches;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use self::util::DiskInfo;
use crate::util::{display_serializable, IoFormat};
use crate::context::RpcContext;
use crate::disk::util::{BackupMountGuard, TmpMountGuard};
use crate::s9pk::manifest::PackageId;
use crate::util::{display_serializable, IoFormat, Version};
use crate::Error;
pub mod main;
@@ -34,11 +43,7 @@ fn display_disk_info(info: Vec<DiskInfo>, matches: &ArgMatches<'_>) {
"N/A",
&format!("{:.2} GiB", disk.capacity as f64 / 1024.0 / 1024.0 / 1024.0),
"N/A",
if let Some(eos_info) = disk.embassy_os.as_ref() {
eos_info.version.as_str()
} else {
"N/A"
}
"N/A",
];
table.add_row(row);
for part in disk.partitions {
@@ -59,7 +64,11 @@ fn display_disk_info(info: Vec<DiskInfo>, matches: &ArgMatches<'_>) {
} else {
"N/A"
},
"N/A",
if let Some(eos) = part.embassy_os.as_ref() {
eos.version.as_str()
} else {
"N/A"
},
];
table.add_row(row);
}
@@ -75,3 +84,73 @@ pub async fn list(
) -> Result<Vec<DiskInfo>, Error> {
crate::disk::util::list().await
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct BackupInfo {
pub version: Version,
pub timestamp: Option<DateTime<Utc>>,
pub package_backups: BTreeMap<PackageId, PackageBackupInfo>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct PackageBackupInfo {
pub title: String,
pub version: Version,
pub os_version: Version,
pub timestamp: DateTime<Utc>,
}
fn display_backup_info(info: BackupInfo, matches: &ArgMatches<'_>) {
use prettytable::*;
if matches.is_present("format") {
return display_serializable(info, matches);
}
let mut table = Table::new();
table.add_row(row![bc =>
"ID",
"VERSION",
"OS VERSION",
"TIMESTAMP",
]);
table.add_row(row![
"EMBASSY OS",
info.version.as_str(),
info.version.as_str(),
&if let Some(ts) = &info.timestamp {
ts.to_string()
} else {
"N/A".to_owned()
},
]);
for (id, info) in info.package_backups {
let row = row![
id.as_str(),
info.version.as_str(),
info.os_version.as_str(),
&info.timestamp.to_string(),
];
table.add_row(row);
}
table.print_tty(false);
}
#[command(rename = "backup-info", display(display_backup_info))]
#[instrument(skip(ctx, password))]
pub async fn backup_info(
#[context] ctx: RpcContext,
#[arg] logicalname: PathBuf,
#[arg] password: String,
) -> Result<BackupInfo, Error> {
let guard =
BackupMountGuard::mount(TmpMountGuard::mount(logicalname).await?, &password).await?;
let res = guard.metadata.clone();
guard.unmount().await?;
Ok(res)
}

View File

@@ -1,7 +1,9 @@
use std::collections::BTreeMap;
use std::collections::{BTreeMap, BTreeSet};
use std::os::unix::prelude::OsStrExt;
use std::path::{Path, PathBuf};
use color_eyre::eyre::{self, eyre};
use digest::Digest;
use futures::TryStreamExt;
use indexmap::IndexSet;
use regex::Regex;
@@ -11,11 +13,16 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::process::Command;
use tracing::instrument;
use super::BackupInfo;
use crate::auth::check_password;
use crate::middleware::encrypt::{decrypt_slice, encrypt_slice};
use crate::s9pk::manifest::PackageId;
use crate::util::io::from_yaml_async_reader;
use crate::util::{GeneralGuard, Invoke, Version};
use crate::util::{AtomicFile, FileLock, GeneralGuard, Invoke, IoFormat, Version};
use crate::volume::BACKUP_DIR;
use crate::{Error, ResultExt as _};
pub const TMP_MOUNTPOINT: &'static str = "/media/embassy-os";
pub const TMP_MOUNTPOINT: &'static str = "/media/embassy-os/tmp";
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
@@ -25,7 +32,7 @@ pub struct DiskInfo {
pub model: Option<String>,
pub partitions: Vec<PartitionInfo>,
pub capacity: usize,
pub embassy_os: Option<EmbassyOsDiskInfo>,
pub internal: bool,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
@@ -35,12 +42,16 @@ pub struct PartitionInfo {
pub label: Option<String>,
pub capacity: usize,
pub used: Option<usize>,
pub embassy_os: Option<EmbassyOsRecoveryInfo>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct EmbassyOsDiskInfo {
pub struct EmbassyOsRecoveryInfo {
pub version: Version,
pub full: bool,
pub password_hash: Option<String>,
pub wrapped_key: Option<String>,
}
const DISK_PATH: &'static str = "/dev/disk/by-path";
@@ -48,6 +59,7 @@ const SYS_BLOCK_PATH: &'static str = "/sys/block";
lazy_static::lazy_static! {
static ref PARTITION_REGEX: Regex = Regex::new("-part[0-9]+$").unwrap();
static ref ZPOOL_REGEX: Regex = Regex::new("^\\s+([a-z0-9]+)\\s+ONLINE").unwrap();
}
#[instrument(skip(path))]
@@ -135,12 +147,19 @@ pub async fn get_used<P: AsRef<Path>>(path: P) -> Result<usize, Error> {
#[instrument]
pub async fn list() -> Result<Vec<DiskInfo>, Error> {
if tokio::fs::metadata(TMP_MOUNTPOINT).await.is_err() {
tokio::fs::create_dir_all(TMP_MOUNTPOINT)
let zpool_drives: BTreeSet<PathBuf> = match Command::new("zpool")
.arg("status")
.invoke(crate::ErrorKind::Zfs)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, TMP_MOUNTPOINT))?;
}
{
Ok(v) => String::from_utf8(v)?
.lines()
.filter_map(|l| ZPOOL_REGEX.captures(l))
.filter_map(|c| c.get(1))
.map(|d| Path::new("/dev").join(d.as_str()))
.collect(),
Err(e) => BTreeSet::new(),
};
let disks = tokio_stream::wrappers::ReadDirStream::new(
tokio::fs::read_dir(DISK_PATH)
.await
@@ -192,6 +211,7 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let mut res = Vec::with_capacity(disks.len());
for (disk, parts) in disks {
let mut internal = false;
let mut partitions = Vec::with_capacity(parts.len());
let vendor = get_vendor(&disk)
.await
@@ -207,8 +227,11 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source)
})
.unwrap_or_default();
let mut embassy_os = None;
if zpool_drives.contains(&disk) {
internal = true;
} else {
for part in parts {
let mut embassy_os = None;
let label = get_label(&part).await?;
let capacity = get_capacity(&part)
.await
@@ -230,16 +253,53 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
used = get_used(&tmp_mountpoint)
.await
.map_err(|e| {
tracing::warn!("Could not get usage of {}: {}", part.display(), e.source)
tracing::warn!(
"Could not get usage of {}: {}",
part.display(),
e.source
)
})
.ok();
if label.as_deref() == Some("rootfs") {
let version_path = tmp_mountpoint.join("root").join("appmgr").join("version");
let backup_unencrypted_metadata_path =
tmp_mountpoint.join("EmbassyBackups/unencrypted-metadata.cbor");
if tokio::fs::metadata(&backup_unencrypted_metadata_path)
.await
.is_ok()
{
embassy_os = match (|| async {
IoFormat::Cbor.from_slice(
&tokio::fs::read(&backup_unencrypted_metadata_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
backup_unencrypted_metadata_path.display().to_string(),
)
})?,
)
})()
.await
{
Ok(a) => Some(a),
Err(e) => {
tracing::error!(
"Error fetching unencrypted backup metadata: {}",
e
);
None
}
};
} else if label.as_deref() == Some("rootfs") {
let version_path =
tmp_mountpoint.join("root").join("appmgr").join("version");
if tokio::fs::metadata(&version_path).await.is_ok() {
embassy_os = Some(EmbassyOsDiskInfo {
embassy_os = Some(EmbassyOsRecoveryInfo {
version: from_yaml_async_reader(File::open(&version_path).await?)
.await?,
})
full: true,
password_hash: None,
wrapped_key: None,
});
}
}
mount_guard
@@ -253,39 +313,41 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
label,
capacity,
used,
embassy_os,
});
}
}
res.push(DiskInfo {
logicalname: disk,
vendor,
model,
partitions,
capacity,
embassy_os,
internal,
})
}
Ok(res)
}
#[instrument(skip(logicalname, mount_point))]
pub async fn mount<P0: AsRef<Path>, P1: AsRef<Path>>(
logicalname: P0,
mount_point: P1,
#[instrument(skip(logicalname, mountpoint))]
pub async fn mount(
logicalname: impl AsRef<Path>,
mountpoint: impl AsRef<Path>,
) -> Result<(), Error> {
let is_mountpoint = tokio::process::Command::new("mountpoint")
.arg(mount_point.as_ref())
.arg(mountpoint.as_ref())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?;
if is_mountpoint.success() {
unmount(mount_point.as_ref()).await?;
unmount(mountpoint.as_ref()).await?;
}
tokio::fs::create_dir_all(&mount_point).await?;
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;
let mount_output = tokio::process::Command::new("mount")
.arg(logicalname.as_ref())
.arg(mount_point.as_ref())
.arg(mountpoint.as_ref())
.output()
.await?;
crate::ensure_code!(
@@ -293,36 +355,47 @@ pub async fn mount<P0: AsRef<Path>, P1: AsRef<Path>>(
crate::ErrorKind::Filesystem,
"Error Mounting {} to {}: {}",
logicalname.as_ref().display(),
mount_point.as_ref().display(),
mountpoint.as_ref().display(),
std::str::from_utf8(&mount_output.stderr).unwrap_or("Unknown Error")
);
Ok(())
}
#[instrument(skip(src, dst, password))]
pub async fn mount_encfs<P0: AsRef<Path>, P1: AsRef<Path>>(
pub async fn mount_ecryptfs<P0: AsRef<Path>, P1: AsRef<Path>>(
src: P0,
dst: P1,
password: &str,
) -> Result<(), Error> {
let mut encfs = tokio::process::Command::new("encfs")
.arg("--standard")
.arg("--public")
.arg("-S")
let is_mountpoint = tokio::process::Command::new("mountpoint")
.arg(dst.as_ref())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?;
if is_mountpoint.success() {
unmount(dst.as_ref()).await?;
}
tokio::fs::create_dir_all(dst.as_ref()).await?;
let mut ecryptfs = tokio::process::Command::new("mount")
.arg("-t")
.arg("ecryptfs")
.arg(src.as_ref())
.arg(dst.as_ref())
.arg("-o")
.arg(format!("key=passphrase,passwd={},ecryptfs_cipher=aes,ecryptfs_key_bytes=32,ecryptfs_passthrough=n,ecryptfs_enable_filename_crypto=y", password))
.stdin(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()?;
let mut stdin = encfs.stdin.take().unwrap();
let mut stderr = encfs.stderr.take().unwrap();
stdin.write_all(password.as_bytes()).await?;
let mut stdin = ecryptfs.stdin.take().unwrap();
let mut stderr = ecryptfs.stderr.take().unwrap();
stdin.write_all(b"\nyes\nno").await?;
stdin.flush().await?;
stdin.shutdown().await?;
drop(stdin);
let mut err = String::new();
stderr.read_to_string(&mut err).await?;
if !encfs.wait().await?.success() {
if !ecryptfs.wait().await?.success() {
Err(Error::new(eyre!("{}", err), crate::ErrorKind::Filesystem))
} else {
Ok(())
@@ -373,27 +446,334 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
Ok(())
}
#[instrument(skip(mount_point))]
pub async fn unmount<P: AsRef<Path>>(mount_point: P) -> Result<(), Error> {
tracing::info!("Unmounting {}.", mount_point.as_ref().display());
#[instrument(skip(mountpoint))]
pub async fn unmount<P: AsRef<Path>>(mountpoint: P) -> Result<(), Error> {
tracing::debug!("Unmounting {}.", mountpoint.as_ref().display());
let umount_output = tokio::process::Command::new("umount")
.arg(mount_point.as_ref())
.arg(mountpoint.as_ref())
.output()
.await?;
crate::ensure_code!(
umount_output.status.success(),
crate::ErrorKind::Filesystem,
"Error Unmounting Drive: {}: {}",
mount_point.as_ref().display(),
mountpoint.as_ref().display(),
std::str::from_utf8(&umount_output.stderr).unwrap_or("Unknown Error")
);
tokio::fs::remove_dir_all(mount_point.as_ref())
tokio::fs::remove_dir_all(mountpoint.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("rm {}", mount_point.as_ref().display()),
format!("rm {}", mountpoint.as_ref().display()),
)
})?;
Ok(())
}
#[async_trait::async_trait]
pub trait GenericMountGuard: AsRef<Path> {
async fn unmount(mut self) -> Result<(), Error>;
}
pub struct MountGuard {
mountpoint: PathBuf,
mounted: bool,
}
impl MountGuard {
pub async fn mount(
logicalname: impl AsRef<Path>,
mountpoint: impl AsRef<Path>,
) -> Result<Self, Error> {
let mountpoint = mountpoint.as_ref().to_owned();
mount(logicalname, &mountpoint).await?;
Ok(MountGuard {
mountpoint,
mounted: true,
})
}
pub async fn unmount(mut self) -> Result<(), Error> {
if self.mounted {
unmount(&self.mountpoint).await?;
self.mounted = false;
}
Ok(())
}
}
impl AsRef<Path> for MountGuard {
fn as_ref(&self) -> &Path {
&self.mountpoint
}
}
impl Drop for MountGuard {
fn drop(&mut self) {
if self.mounted {
let mountpoint = std::mem::take(&mut self.mountpoint);
tokio::spawn(async move { unmount(mountpoint).await.unwrap() });
}
}
}
#[async_trait::async_trait]
impl GenericMountGuard for MountGuard {
async fn unmount(mut self) -> Result<(), Error> {
MountGuard::unmount(self).await
}
}
async fn tmp_mountpoint(source: impl AsRef<Path>) -> Result<PathBuf, Error> {
Ok(Path::new(TMP_MOUNTPOINT).join(base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&sha2::Sha256::digest(
tokio::fs::canonicalize(&source)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
source.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
),
)))
}
pub struct TmpMountGuard {
guard: MountGuard,
lock: FileLock,
}
impl TmpMountGuard {
pub async fn mount(logicalname: impl AsRef<Path>) -> Result<Self, Error> {
let mountpoint = tmp_mountpoint(&logicalname).await?;
let lock = FileLock::new(mountpoint.with_extension("lock")).await?;
let guard = MountGuard::mount(logicalname, &mountpoint).await?;
Ok(TmpMountGuard { guard, lock })
}
pub async fn unmount(self) -> Result<(), Error> {
let TmpMountGuard { guard, lock } = self;
guard.unmount().await?;
lock.unlock().await?;
Ok(())
}
}
impl AsRef<Path> for TmpMountGuard {
fn as_ref(&self) -> &Path {
self.guard.as_ref()
}
}
#[async_trait::async_trait]
impl GenericMountGuard for TmpMountGuard {
async fn unmount(mut self) -> Result<(), Error> {
TmpMountGuard::unmount(self).await
}
}
pub struct BackupMountGuard<G: GenericMountGuard> {
backup_disk_mount_guard: Option<G>,
enc_key: String,
pub unencrypted_metadata: EmbassyOsRecoveryInfo,
pub metadata: BackupInfo,
mountpoint: PathBuf,
mounted: bool,
}
impl<G: GenericMountGuard> BackupMountGuard<G> {
fn backup_disk_path(&self) -> &Path {
if let Some(guard) = &self.backup_disk_mount_guard {
guard.as_ref()
} else {
unreachable!()
}
}
pub async fn mount(backup_disk_mount_guard: G, password: &str) -> Result<Self, Error> {
let mountpoint = tmp_mountpoint(&backup_disk_mount_guard).await?;
let backup_disk_path = backup_disk_mount_guard.as_ref();
let unencrypted_metadata_path =
backup_disk_path.join("EmbassyBackups/unencrypted-metadata.cbor");
let unencrypted_metadata: EmbassyOsRecoveryInfo =
if tokio::fs::metadata(&unencrypted_metadata_path)
.await
.is_ok()
{
IoFormat::Cbor.from_slice(
&tokio::fs::read(&unencrypted_metadata_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
unencrypted_metadata_path.display().to_string(),
)
})?,
)?
} else {
Default::default()
};
let enc_key = if let (Some(hash), Some(wrapped_key)) = (
unencrypted_metadata.password_hash.as_ref(),
unencrypted_metadata.wrapped_key.as_ref(),
) {
let wrapped_key =
base32::decode(base32::Alphabet::RFC4648 { padding: false }, wrapped_key)
.ok_or_else(|| {
Error::new(
eyre!("failed to decode wrapped key"),
crate::ErrorKind::Backup,
)
})?;
check_password(hash, password)?;
String::from_utf8(decrypt_slice(wrapped_key, password))?
} else {
base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&rand::random::<[u8; 32]>()[..],
)
};
let crypt_path = backup_disk_path.join("EmbassyBackups/crypt");
if tokio::fs::metadata(&crypt_path).await.is_err() {
tokio::fs::create_dir_all(&crypt_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
crypt_path.display().to_string(),
)
})?;
}
mount_ecryptfs(&crypt_path, &mountpoint, &enc_key).await?;
let metadata = match async {
let metadata_path = mountpoint.join("metadata.cbor");
let metadata: BackupInfo = if tokio::fs::metadata(&metadata_path).await.is_ok() {
IoFormat::Cbor.from_slice(&tokio::fs::read(&metadata_path).await.with_ctx(
|_| {
(
crate::ErrorKind::Filesystem,
metadata_path.display().to_string(),
)
},
)?)?
} else {
Default::default()
};
Ok(metadata)
}
.await
{
Ok(a) => a,
Err(e) => {
unmount(&mountpoint).await?;
return Err(e);
}
};
Ok(Self {
backup_disk_mount_guard: Some(backup_disk_mount_guard),
enc_key,
unencrypted_metadata,
metadata,
mountpoint,
mounted: true,
})
}
pub fn change_password(&mut self, new_password: &str) -> Result<(), Error> {
self.unencrypted_metadata.password_hash = Some(
argon2::hash_encoded(
new_password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?,
);
self.unencrypted_metadata.wrapped_key = Some(base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&encrypt_slice(&self.enc_key, new_password),
));
Ok(())
}
pub async fn mount_package_backup(
&self,
id: &PackageId,
) -> Result<PackageBackupMountGuard, Error> {
let lock = FileLock::new(Path::new(BACKUP_DIR).join(format!("{}.lock", id))).await?;
let mountpoint = Path::new(BACKUP_DIR).join(id);
bind(self.mountpoint.join(id), &mountpoint, false).await?;
Ok(PackageBackupMountGuard {
mountpoint,
lock,
mounted: true,
})
}
pub async fn save(&self) -> Result<(), Error> {
let metadata_path = self.mountpoint.join("metadata.cbor");
let backup_disk_path = self.backup_disk_path();
let mut file = AtomicFile::new(&metadata_path).await?;
file.write_all(&IoFormat::Cbor.to_vec(&self.metadata)?)
.await?;
file.save().await?;
let unencrypted_metadata_path =
backup_disk_path.join("EmbassyBackups/unencrypted-metadata.cbor");
let mut file = AtomicFile::new(&unencrypted_metadata_path).await?;
file.write_all(&IoFormat::Cbor.to_vec(&self.unencrypted_metadata)?)
.await?;
file.save().await?;
Ok(())
}
pub async fn unmount(mut self) -> Result<(), Error> {
if self.mounted {
unmount(&self.mountpoint).await?;
self.mounted = false;
}
if let Some(guard) = self.backup_disk_mount_guard.take() {
guard.unmount().await?;
}
Ok(())
}
pub async fn save_and_unmount(self) -> Result<(), Error> {
self.save().await?;
self.unmount().await?;
Ok(())
}
}
impl<G: GenericMountGuard> AsRef<Path> for BackupMountGuard<G> {
fn as_ref(&self) -> &Path {
&self.mountpoint
}
}
impl<G: GenericMountGuard> Drop for BackupMountGuard<G> {
fn drop(&mut self) {
if self.mounted {
let mountpoint = std::mem::take(&mut self.mountpoint);
tokio::spawn(async move { unmount(mountpoint).await.unwrap() });
}
}
}
pub struct PackageBackupMountGuard {
mountpoint: PathBuf,
lock: FileLock,
mounted: bool,
}
impl PackageBackupMountGuard {
pub async fn unmount(mut self) -> Result<(), Error> {
if self.mounted {
unmount(&self.mountpoint).await?;
self.mounted = false;
}
Ok(())
}
}
impl AsRef<Path> for PackageBackupMountGuard {
fn as_ref(&self) -> &Path {
&self.mountpoint
}
}
impl Drop for PackageBackupMountGuard {
fn drop(&mut self) {
if self.mounted {
let mountpoint = std::mem::take(&mut self.mountpoint);
tokio::spawn(async move { unmount(mountpoint).await.unwrap() });
}
}
}

View File

@@ -1,6 +1,6 @@
use std::fmt::Display;
use color_eyre::eyre::{eyre, ErrReport};
use color_eyre::eyre::eyre;
use patch_db::Revision;
use rpc_toolkit::yajrc::RpcError;
@@ -12,7 +12,7 @@ pub enum ErrorKind {
ConfigSpecViolation = 4,
ConfigRulesViolation = 5,
NotFound = 6,
InvalidPassword = 7, // REMOVE
IncorrectPassword = 7,
VersionIncompatible = 8,
Network = 9,
Registry = 10,
@@ -70,7 +70,7 @@ impl ErrorKind {
ConfigSpecViolation => "Config Spec Violation",
ConfigRulesViolation => "Config Rules Violation",
NotFound => "Not Found",
InvalidPassword => "Invalid Password",
IncorrectPassword => "Incorrect Password",
VersionIncompatible => "Version Incompatible",
Network => "Network Error",
Registry => "Registry Error",

View File

@@ -4,6 +4,7 @@ use bollard::image::ListImagesOptions;
use patch_db::{DbHandle, PatchDbHandle};
use tracing::instrument;
use super::PKG_ARCHIVE_DIR;
use super::PKG_DOCKER_DIR;
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencyInfo, InstalledPackageDataEntry, PackageDataEntry};
@@ -93,6 +94,14 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Res
ctx.docker.remove_image(&image.id, None, None).await
}))
.await?;
let pkg_archive_dir = ctx
.datadir
.join(PKG_ARCHIVE_DIR)
.join(id)
.join(version.as_str());
if tokio::fs::metadata(&pkg_archive_dir).await.is_ok() {
tokio::fs::remove_dir_all(&pkg_archive_dir).await?;
}
let docker_path = ctx
.datadir
.join(PKG_DOCKER_DIR)

View File

@@ -1,12 +1,11 @@
use std::collections::{BTreeMap, BTreeSet};
use std::ffi::OsStr;
use std::io::SeekFrom;
use std::path::Path;
use std::process::Stdio;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use color_eyre::eyre::{self, eyre};
use color_eyre::eyre::eyre;
use emver::VersionRange;
use futures::future::BoxFuture;
use futures::{FutureExt, StreamExt, TryStreamExt};
@@ -14,7 +13,7 @@ use http::StatusCode;
use patch_db::{DbHandle, LockType};
use reqwest::Response;
use rpc_toolkit::command;
use tokio::fs::{DirEntry, File, OpenOptions};
use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt};
use tokio::process::Command;
use tokio_stream::wrappers::ReadDirStream;
@@ -44,7 +43,7 @@ use crate::{Error, ResultExt};
pub mod cleanup;
pub mod progress;
pub const PKG_CACHE: &'static str = "package-data/cache";
pub const PKG_ARCHIVE_DIR: &'static str = "package-data/archive";
pub const PKG_PUBLIC_DIR: &'static str = "package-data/public";
pub const PKG_DOCKER_DIR: &'static str = "package-data/docker";
pub const PKG_WASM_DIR: &'static str = "package-data/wasm";
@@ -226,7 +225,7 @@ pub async fn download_install_s9pk(
let pkg_cache_dir = ctx
.datadir
.join(PKG_CACHE)
.join(PKG_ARCHIVE_DIR)
.join(pkg_id)
.join(version.as_str());
tokio::fs::create_dir_all(&pkg_cache_dir).await?;
@@ -615,6 +614,12 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
}
deps
};
let mut pde = model
.clone()
.expect(&mut tx)
.await?
.get_mut(&mut tx)
.await?;
let installed = InstalledPackageDataEntry {
status: Status {
configured: manifest.config.is_none(),
@@ -622,18 +627,23 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
dependency_errors: DependencyErrors::default(),
},
manifest: manifest.clone(),
last_backup: match &*pde {
PackageDataEntry::Updating {
installed:
InstalledPackageDataEntry {
last_backup: Some(time),
..
},
..
} => Some(*time),
_ => None,
},
system_pointers: Vec::new(),
dependency_info,
current_dependents: current_dependents.clone(),
current_dependencies: current_dependencies.clone(),
interface_addresses,
};
let mut pde = model
.clone()
.expect(&mut tx)
.await?
.get_mut(&mut tx)
.await?;
let prev = std::mem::replace(
&mut *pde,
PackageDataEntry::Installed {

View File

@@ -12,7 +12,7 @@ use tokio::io::{AsyncRead, AsyncSeek, AsyncWrite};
use crate::Error;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[derive(Debug, Deserialize, Serialize, HasModel, Default)]
#[serde(rename_all = "kebab-case")]
pub struct InstallProgress {
pub size: Option<u64>,

View File

@@ -1,8 +1,4 @@
pub const CONFIG_PATH: &'static str = "/etc/embassy/config.toml";
pub const SERVICES_YAML: &'static str = "tor/services.yaml";
pub const VOLUMES: &'static str = "/root/volumes";
pub const BACKUP_MOUNT_POINT: &'static str = "/mnt/backup_drive";
pub const BACKUP_DIR: &'static str = "Embassy Backups";
pub const BUFFER_SIZE: usize = 1024;
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1];
@@ -46,7 +42,7 @@ pub use config::Config;
pub use error::{Error, ErrorKind, ResultExt};
use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError;
pub use version::{init, self_update};
pub use version::init;
#[command(metadata(authenticated = false))]
pub fn echo(#[arg] message: String) -> Result<String, RpcError> {
@@ -66,6 +62,7 @@ pub fn echo(#[arg] message: String) -> Result<String, RpcError> {
net::wifi::wifi,
disk::disk,
notifications::notification,
backup::backup,
))]
pub fn main_api() -> Result<(), RpcError> {
Ok(())

View File

@@ -0,0 +1,83 @@
use std::collections::BTreeMap;
use itertools::Itertools;
use patch_db::DbHandle;
use tracing::instrument;
use crate::context::RpcContext;
use crate::dependencies::{break_transitive, DependencyError};
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
use crate::status::MainStatus;
use crate::Error;
#[instrument(skip(ctx, db))]
pub async fn check<Db: DbHandle>(
ctx: &RpcContext,
db: &mut Db,
id: &PackageId,
) -> Result<(), Error> {
let mut tx = db.begin().await?;
let installed_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.expect(&mut tx)
.await?
.installed()
.expect(&mut tx)
.await?;
let mut checkpoint = tx.begin().await?;
let manifest = installed_model
.clone()
.manifest()
.get(&mut checkpoint, true)
.await?;
let mut status = installed_model
.clone()
.status()
.get_mut(&mut checkpoint)
.await?;
status.main.check(&ctx, &mut checkpoint, &*manifest).await?;
let failed = match &status.main {
MainStatus::Running { health, .. } => health.clone(),
MainStatus::BackingUp { health, .. } => health.clone(),
_ => BTreeMap::new(),
};
status.save(&mut checkpoint).await?;
checkpoint.save().await?;
let current_dependents = installed_model
.current_dependents()
.get(&mut tx, true)
.await?;
for (dependent, info) in &*current_dependents {
let failures: BTreeMap<HealthCheckId, HealthCheckResult> = failed
.iter()
.filter(|(hc_id, _)| info.health_checks.contains(hc_id))
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
if !failures.is_empty() {
break_transitive(
&mut tx,
&dependent,
id,
DependencyError::HealthChecksFailed { failures },
&mut BTreeMap::new(),
)
.await?;
}
}
tx.save().await?;
Ok(())
}

View File

@@ -3,6 +3,7 @@ use std::future::Future;
use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::task::Poll;
use std::time::Duration;
use bollard::container::StopContainerOptions;
use color_eyre::eyre::eyre;
@@ -18,11 +19,13 @@ use crate::action::docker::DockerAction;
use crate::action::NoOutput;
use crate::context::RpcContext;
use crate::net::interface::InterfaceId;
use crate::notifications::{NotificationLevel, NotificationSubtype};
use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::util::{Container, NonDetachingJoinHandle, Version};
use crate::Error;
pub mod health;
#[derive(Default)]
pub struct ManagerMap(RwLock<BTreeMap<(PackageId, Version), Arc<Manager>>>);
impl ManagerMap {
@@ -98,11 +101,14 @@ impl ManagerMap {
#[instrument(skip(self))]
pub async fn empty(&self) -> Result<(), Error> {
let res = futures::future::join_all(
std::mem::take(&mut *self.0.write().await)
.into_iter()
.map(|(_, man)| async move { man.exit().await }),
)
let res =
futures::future::join_all(std::mem::take(&mut *self.0.write().await).into_iter().map(
|((id, version), man)| async move {
man.exit().await?;
tracing::debug!("Manager for {}@{} shutdown", id, version);
Ok::<_, Error>(())
},
))
.await;
res.into_iter().fold(Ok(()), |res, x| match (res, x) {
(Ok(()), x) => x,
@@ -231,10 +237,24 @@ async fn run_main(
.collect::<Result<Vec<_>, Error>>()?,
)
.await?;
let res = runtime
.await
.map_err(|_| Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker))
.and_then(|a| a);
let health = async {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
let mut db = state.ctx.db.handle();
if let Err(e) = health::check(&state.ctx, &mut db, &state.manifest.id).await {
tracing::error!(
"Failed to run health check for {}: {}",
&state.manifest.id,
e
);
tracing::debug!("{:?}", e);
}
}
};
let res = tokio::select! {
a = runtime => a.map_err(|_| Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker)).and_then(|a| a),
_ = health => Err(Error::new(eyre!("Health check daemon exited!"), crate::ErrorKind::Unknown)),
};
state
.ctx
.net_controller
@@ -319,7 +339,7 @@ impl Manager {
NotificationLevel::Warning,
String::from("Service Crashed"),
format!("The service {} has crashed with the following exit code: {}\nDetails: {}", thread_shared.manifest.id.clone(), e.0, e.1),
NotificationSubtype::General,
(),
Some(900) // 15 minutes
)
.await;

View File

@@ -8,7 +8,6 @@ use futures::future::BoxFuture;
use futures::{FutureExt, Stream};
use hmac::Hmac;
use http::{HeaderMap, HeaderValue};
use pbkdf2::pbkdf2;
use rpc_toolkit::hyper::http::Error as HttpError;
use rpc_toolkit::hyper::{self, Body, Request, Response, StatusCode};
use rpc_toolkit::rpc_server_helpers::{
@@ -21,6 +20,42 @@ use sha2::Sha256;
use crate::util::Apply;
use crate::Error;
pub fn pbkdf2(password: impl AsRef<[u8]>, salt: impl AsRef<[u8]>) -> CipherKey<Aes256Ctr> {
let mut aeskey = CipherKey::<Aes256Ctr>::default();
pbkdf2::pbkdf2::<Hmac<Sha256>>(
password.as_ref(),
salt.as_ref(),
1000,
aeskey.as_mut_slice(),
);
aeskey
}
pub fn encrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec<u8> {
let prefix: [u8; 32] = rand::random();
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut res = Vec::with_capacity(32 + input.as_ref().len());
res.extend_from_slice(&prefix[..]);
res.extend_from_slice(input.as_ref());
aes.apply_keystream(&mut res[32..]);
res
}
pub fn decrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec<u8> {
if input.as_ref().len() < 32 {
return Vec::new();
}
let (prefix, rest) = input.as_ref().split_at(32);
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut res = rest.to_vec();
aes.apply_keystream(&mut res);
res
}
#[pin_project::pin_project]
pub struct DecryptStream {
key: Arc<String>,
@@ -68,13 +103,7 @@ impl Stream for DecryptStream {
buf = &buf[to_read..];
}
if this.ctr.len() == 16 && this.salt.len() == 16 {
let mut aeskey = CipherKey::<Aes256Ctr>::default();
pbkdf2::<Hmac<Sha256>>(
this.key.as_bytes(),
&this.salt,
1000,
aeskey.as_mut_slice(),
);
let aeskey = pbkdf2(this.key.as_bytes(), &this.salt);
let ctr = Nonce::<Aes256Ctr>::from_slice(&this.ctr);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut res = buf.to_vec();
@@ -101,8 +130,7 @@ pub struct EncryptStream {
impl EncryptStream {
pub fn new(key: &str, body: Body) -> Self {
let prefix: [u8; 32] = rand::random();
let mut aeskey = CipherKey::<Aes256Ctr>::default();
pbkdf2::<Hmac<Sha256>>(key.as_bytes(), &prefix[16..], 1000, aeskey.as_mut_slice());
let aeskey = pbkdf2(key.as_bytes(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let aes = Aes256Ctr::new(&aeskey, &ctr);
EncryptStream {

View File

@@ -1,11 +1,9 @@
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::net::Ipv4Addr;
use std::path::{Path, PathBuf};
use futures::FutureExt;
use indexmap::IndexSet;
use sqlx::SqlitePool;
use tokio::sync::Mutex;
use tracing::instrument;

View File

@@ -113,6 +113,10 @@ impl TorController {
self.0.lock().await.replace().await
}
pub async fn embassyd_tor_key(&self) -> TorSecretKeyV3 {
self.0.lock().await.embassyd_tor_key.clone()
}
pub async fn embassyd_onion(&self) -> OnionAddressV3 {
self.0.lock().await.embassyd_onion()
}

View File

@@ -1,4 +1,4 @@
use std::collections::{BTreeMap, HashMap};
use std::collections::HashMap;
use std::fmt;
use std::str::FromStr;
@@ -10,11 +10,12 @@ use sqlx::SqlitePool;
use tokio::sync::Mutex;
use tracing::instrument;
use crate::backup::BackupReport;
use crate::context::RpcContext;
use crate::db::util::WithRevision;
use crate::s9pk::manifest::PackageId;
use crate::util::{display_none, display_serializable};
use crate::{Error, ErrorKind};
use crate::{Error, ErrorKind, ResultExt};
#[command(subcommands(list, delete, delete_before, create))]
pub async fn notification() -> Result<(), Error> {
@@ -150,7 +151,7 @@ pub async fn create(
level,
title,
message,
NotificationSubtype::General,
(),
None,
)
.await
@@ -213,54 +214,17 @@ pub struct Notification {
data: serde_json::Value,
}
#[derive(Debug)]
pub enum NotificationSubtype {
General,
BackupReport {
server_attempted: bool,
server_error: Option<String>,
packages: BTreeMap<String, Option<String>>,
},
pub trait NotificationType:
serde::Serialize + for<'de> serde::Deserialize<'de> + std::fmt::Debug
{
const CODE: u32;
}
impl NotificationSubtype {
fn to_json(&self) -> serde_json::Value {
match self {
NotificationSubtype::General => serde_json::Value::Null,
NotificationSubtype::BackupReport {
server_attempted,
server_error,
packages,
} => {
let mut pkgs_map = serde_json::Map::new();
for (k, v) in packages.iter() {
pkgs_map.insert(
k.clone(),
match v {
None => serde_json::json!({ "error": serde_json::Value::Null }),
Some(x) => serde_json::json!({ "error": x }),
},
);
}
serde_json::json!({
"server": {
"attempted": server_attempted,
"error": server_error,
},
"packages": serde_json::Value::Object(pkgs_map)
})
}
}
}
fn code(&self) -> u32 {
match self {
Self::General => 0,
Self::BackupReport {
server_attempted: _,
server_error: _,
packages: _,
} => 1,
}
}
impl NotificationType for () {
const CODE: u32 = 0;
}
impl NotificationType for BackupReport {
const CODE: u32 = 1;
}
pub struct NotificationManager {
@@ -275,14 +239,14 @@ impl NotificationManager {
}
}
#[instrument(skip(self, db))]
pub async fn notify<Db: DbHandle>(
pub async fn notify<Db: DbHandle, T: NotificationType>(
&self,
db: &mut Db,
package_id: Option<PackageId>,
level: NotificationLevel,
title: String,
message: String,
subtype: NotificationSubtype,
subtype: T,
debounce_interval: Option<u32>,
) -> Result<(), Error> {
if !self
@@ -297,9 +261,10 @@ impl NotificationManager {
.get_mut(db)
.await?;
let sql_package_id = package_id.map::<String, _>(|p| p.into());
let sql_code = subtype.code();
let sql_code = T::CODE;
let sql_level = format!("{}", level);
let sql_data = format!("{}", subtype.to_json());
let sql_data =
serde_json::to_string(&subtype).with_kind(crate::ErrorKind::Serialization)?;
sqlx::query!(
"INSERT INTO notifications (package_id, code, level, title, message, data) VALUES (?, ?, ?, ?, ?, ?)",
sql_package_id,

View File

@@ -18,13 +18,13 @@ use tracing::instrument;
use crate::context::SetupContext;
use crate::db::model::RecoveredPackageInfo;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::util::{mount, unmount, DiskInfo};
use crate::disk::util::{DiskInfo, PartitionInfo, TmpMountGuard};
use crate::id::Id;
use crate::install::PKG_PUBLIC_DIR;
use crate::s9pk::manifest::PackageId;
use crate::sound::BEETHOVEN;
use crate::util::io::from_yaml_async_reader;
use crate::util::{GeneralGuard, Invoke, Version};
use crate::util::{Invoke, Version};
use crate::volume::{data_dir, VolumeId};
use crate::{Error, ResultExt};
@@ -91,14 +91,14 @@ pub async fn execute(
#[context] ctx: SetupContext,
#[arg(rename = "embassy-logicalname")] embassy_logicalname: PathBuf,
#[arg(rename = "embassy-password")] embassy_password: String,
#[arg(rename = "recovery-drive")] recovery_drive: Option<DiskInfo>,
#[arg(rename = "recovery-partition")] recovery_partition: Option<PartitionInfo>,
#[arg(rename = "recovery-password")] recovery_password: Option<String>,
) -> Result<SetupResult, Error> {
match execute_inner(
ctx,
embassy_logicalname,
embassy_password,
recovery_drive,
recovery_partition,
recovery_password,
)
.await
@@ -132,7 +132,7 @@ pub async fn execute_inner(
ctx: SetupContext,
embassy_logicalname: PathBuf,
embassy_password: String,
recovery_drive: Option<DiskInfo>,
recovery_partition: Option<PartitionInfo>,
recovery_password: Option<String>,
) -> Result<String, Error> {
if ctx.recovery_status.read().await.is_some() {
@@ -180,8 +180,8 @@ pub async fn execute_inner(
.await?;
sqlite_pool.close().await;
if let Some(recovery_drive) = recovery_drive {
if recovery_drive
if let Some(recovery_partition) = recovery_partition {
if recovery_partition
.embassy_os
.as_ref()
.map(|v| &*v.version < &emver::Version::new(0, 2, 8, 0))
@@ -190,7 +190,8 @@ pub async fn execute_inner(
return Err(Error::new(eyre!("Unsupported version of EmbassyOS. Please update to at least 0.2.8 before recovering."), crate::ErrorKind::VersionIncompatible));
}
tokio::spawn(async move {
if let Err(e) = recover(ctx.clone(), guid, recovery_drive, recovery_password).await {
if let Err(e) = recover(ctx.clone(), guid, recovery_partition, recovery_password).await
{
BEETHOVEN.play().await.unwrap_or_default(); // ignore error in playing the song
tracing::error!("Error recovering drive!: {}", e);
tracing::debug!("{:?}", e);
@@ -208,18 +209,18 @@ pub async fn execute_inner(
async fn recover(
ctx: SetupContext,
guid: String,
recovery_drive: DiskInfo,
recovery_partition: PartitionInfo,
recovery_password: Option<String>,
) -> Result<(), Error> {
let recovery_version = recovery_drive
let recovery_version = recovery_partition
.embassy_os
.as_ref()
.map(|i| i.version.clone())
.unwrap_or_default();
if recovery_version.major() == 0 && recovery_version.minor() == 2 {
recover_v2(&ctx, recovery_drive).await?;
recover_v2(&ctx, recovery_partition).await?;
} else if recovery_version.major() == 0 && recovery_version.minor() == 3 {
recover_v3(&ctx, recovery_drive, recovery_password).await?;
recover_v3(&ctx, recovery_partition, recovery_password).await?;
} else {
return Err(Error::new(
eyre!("Unsupported version of EmbassyOS: {}", recovery_version),
@@ -267,7 +268,7 @@ fn dir_copy<'a, P0: AsRef<Path> + 'a + Send + Sync, P1: AsRef<Path> + 'a + Send
tokio::fs::copy(&src_path, &dst_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("{} -> {}", src_path.display(), dst_path.display()),
format!("cp {} -> {}", src_path.display(), dst_path.display()),
)
})?;
ctr.fetch_add(m.len(), Ordering::Relaxed);
@@ -301,7 +302,7 @@ fn dir_copy<'a, P0: AsRef<Path> + 'a + Send + Sync, P1: AsRef<Path> + 'a + Send
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("{} -> {}", src_path.display(), dst_path.display()),
format!("cp -P {} -> {}", src_path.display(), dst_path.display()),
)
})?;
tokio::fs::set_permissions(&dst_path, m.permissions())
@@ -322,29 +323,18 @@ fn dir_copy<'a, P0: AsRef<Path> + 'a + Send + Sync, P1: AsRef<Path> + 'a + Send
}
#[instrument(skip(ctx))]
async fn recover_v2(ctx: &SetupContext, recovery_drive: DiskInfo) -> Result<(), Error> {
let tmp_mountpoint = Path::new("/mnt/recovery");
mount(
&recovery_drive
.partitions
.get(1)
.ok_or_else(|| {
Error::new(
eyre!("missing rootfs partition"),
crate::ErrorKind::Filesystem,
)
})?
.logicalname,
tmp_mountpoint,
)
.await?;
let mount_guard = GeneralGuard::new(|| tokio::spawn(unmount(tmp_mountpoint)));
async fn recover_v2(ctx: &SetupContext, recovery_partition: PartitionInfo) -> Result<(), Error> {
let recovery = TmpMountGuard::mount(&recovery_partition.logicalname).await?;
let secret_store = ctx.secret_store().await?;
let db = ctx.db(&secret_store).await?;
let mut handle = db.handle();
let apps_yaml_path = tmp_mountpoint.join("root").join("appmgr").join("apps.yaml");
let apps_yaml_path = recovery
.as_ref()
.join("root")
.join("appmgr")
.join("apps.yaml");
#[derive(Deserialize)]
struct LegacyAppInfo {
title: String,
@@ -359,7 +349,7 @@ async fn recover_v2(ctx: &SetupContext, recovery_drive: DiskInfo) -> Result<(),
})?)
.await?;
let volume_path = tmp_mountpoint.join("root/volumes");
let volume_path = recovery.as_ref().join("root/volumes");
let total_bytes = AtomicU64::new(0);
for (pkg_id, _) in &packages {
let volume_src_path = volume_path.join(&pkg_id);
@@ -409,9 +399,11 @@ async fn recover_v2(ctx: &SetupContext, recovery_drive: DiskInfo) -> Result<(),
let icon_leaf = AsRef::<Path>::as_ref(&pkg_id)
.join(info.version.as_str())
.join("icon.png");
let icon_src_path = tmp_mountpoint
let icon_src_path = recovery
.as_ref()
.join("root/agent/icons")
.join(format!("{}.png", pkg_id));
// TODO: tor address
let icon_dst_path = ctx.datadir.join(PKG_PUBLIC_DIR).join(&icon_leaf);
if let Some(parent) = icon_dst_path.parent() {
tokio::fs::create_dir_all(&parent)
@@ -423,7 +415,11 @@ async fn recover_v2(ctx: &SetupContext, recovery_drive: DiskInfo) -> Result<(),
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("{} -> {}", icon_src_path.display(), icon_dst_path.display()),
format!(
"cp {} -> {}",
icon_src_path.display(),
icon_dst_path.display()
),
)
})?;
let icon_url = Path::new("/public/package-data").join(&icon_leaf);
@@ -441,16 +437,14 @@ async fn recover_v2(ctx: &SetupContext, recovery_drive: DiskInfo) -> Result<(),
.await?;
}
mount_guard
.drop()
.await
.with_kind(crate::ErrorKind::Unknown)?
recovery.unmount().await?;
Ok(())
}
#[instrument(skip(ctx))]
async fn recover_v3(
ctx: &SetupContext,
recovery_drive: DiskInfo,
recovery_partition: PartitionInfo,
recovery_password: Option<String>,
) -> Result<(), Error> {
todo!()

View File

@@ -4,9 +4,9 @@ use std::time::Duration;
use divrem::DivRem;
use proptest_derive::Arbitrary;
use tokio::sync::{Mutex, MutexGuard};
use tracing::instrument;
use crate::util::FileLock;
use crate::{Error, ErrorKind, ResultExt};
lazy_static::lazy_static! {
@@ -18,32 +18,15 @@ lazy_static::lazy_static! {
static ref PERIOD_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/pwm0/period");
static ref DUTY_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/pwm0/duty_cycle");
static ref SWITCH_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/pwm0/enable");
static ref SOUND_MUTEX: Mutex<Option<fd_lock_rs::FdLock<tokio::fs::File>>> = Mutex::new(None);
}
pub const SOUND_LOCK_FILE: &'static str = "/etc/embassy/sound.lock";
struct SoundInterface(Option<MutexGuard<'static, Option<fd_lock_rs::FdLock<tokio::fs::File>>>>);
struct SoundInterface(Option<FileLock>);
impl SoundInterface {
#[instrument]
pub async fn lease() -> Result<Self, Error> {
let mut guard = SOUND_MUTEX.lock().await;
let sound_file = tokio::fs::File::create(SOUND_LOCK_FILE)
.await
.with_ctx(|_| (ErrorKind::Filesystem, SOUND_LOCK_FILE))?;
*guard = Some(
tokio::task::spawn_blocking(move || {
fd_lock_rs::FdLock::lock(sound_file, fd_lock_rs::LockType::Exclusive, true)
})
.await
.map_err(|e| {
Error::new(
color_eyre::eyre::eyre!("Sound file lock panicked: {}", e),
ErrorKind::SoundError,
)
})?
.with_kind(ErrorKind::SoundError)?,
);
let guard = FileLock::new(SOUND_LOCK_FILE).await?;
tokio::fs::write(&*EXPORT_FILE, "0")
.await
.or_else(|e| {
@@ -88,18 +71,21 @@ impl SoundInterface {
note: &Note,
time_slice: &TimeSlice,
) -> Result<(), Error> {
{
if let Err(e) = async {
self.play(note).await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) * 19 / 20).await;
self.stop().await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) / 20).await;
Ok::<_, Error>(())
}
.await
{
// we could catch this error and propagate but I'd much prefer the original error bubble up
let _mute = self.stop().await;
Err(e)
} else {
Ok(())
}
.or_else(|e: Error| {
// we could catch this error and propagate but I'd much prefer the original error bubble up
let _mute = self.stop();
Err(e)
})
}
#[instrument(skip(self))]
pub async fn stop(&mut self) -> Result<(), Error> {
@@ -141,14 +127,10 @@ impl Drop for SoundInterface {
tracing::error!("Failed to Unexport Sound Interface: {}", e);
tracing::debug!("{:?}", e);
}
if let Some(mut guard) = guard {
if let Some(lock) = guard.take() {
if let Err(e) = tokio::task::spawn_blocking(|| lock.unlock(true))
.await
.unwrap()
{
tracing::error!("Failed to drop Sound Interface File Lock: {}", e.1)
}
if let Some(guard) = guard {
if let Err(e) = guard.unlock().await {
tracing::error!("Failed to drop Sound Interface File Lock: {}", e);
tracing::debug!("{:?}", e);
}
}
});

View File

@@ -1,47 +1,32 @@
use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc;
use std::collections::BTreeMap;
use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre;
use futures::{FutureExt, StreamExt};
use patch_db::{DbHandle, HasModel, LockType, Map, ModelData};
use patch_db::{DbHandle, HasModel, Map};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use self::health_check::HealthCheckId;
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencyInfo, InstalledPackageDataEntryModel};
use crate::dependencies::{break_transitive, DependencyError, DependencyErrors};
use crate::dependencies::DependencyErrors;
use crate::manager::{Manager, Status as ManagerStatus};
use crate::notifications::{NotificationLevel, NotificationSubtype};
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::Manifest;
use crate::status::health_check::HealthCheckResult;
use crate::Error;
pub mod health_check;
// Assume docker for now
#[instrument(skip(ctx))]
pub async fn synchronize_all(ctx: &RpcContext) -> Result<(), Error> {
let mut db = ctx.db.handle();
let pkg_ids = crate::db::DatabaseModel::new()
.package_data()
.keys(&mut ctx.db.handle(), false)
.keys(&mut db, false)
.await?;
futures::stream::iter(pkg_ids)
.for_each_concurrent(None, |id| async move {
#[instrument(skip(ctx))]
async fn status(ctx: &RpcContext, id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
// TODO: DRAGONS!!
// this locks all of package data to solve a deadlock issue below. As of the writing of this comment, it
// hangs during the `check` operation on /package-data/<id>. There is another daemon loop somewhere that
// is likely iterating through packages in a different order.
crate::db::DatabaseModel::new()
.package_data()
.lock(&mut db, LockType::Write)
.await;
// Without the above lock, the below check operation will deadlock
for id in pkg_ids {
if let Err(e) = async {
let (mut status, manager) = if let Some(installed) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&id)
@@ -53,7 +38,7 @@ pub async fn synchronize_all(ctx: &RpcContext) -> Result<(), Error> {
installed.clone().status().get_mut(&mut db).await?,
ctx.managers
.get(&(
id,
id.clone(),
installed
.manifest()
.version()
@@ -62,12 +47,10 @@ pub async fn synchronize_all(ctx: &RpcContext) -> Result<(), Error> {
.to_owned(),
))
.await
.ok_or_else(|| {
Error::new(eyre!("No Manager"), crate::ErrorKind::Docker)
})?,
.ok_or_else(|| Error::new(eyre!("No Manager"), crate::ErrorKind::Docker))?,
)
} else {
return Ok(());
return Ok::<_, Error>(());
};
let res = status.main.synchronize(&manager).await?;
@@ -76,179 +59,12 @@ pub async fn synchronize_all(ctx: &RpcContext) -> Result<(), Error> {
Ok(res)
}
if let Err(e) = status(ctx, id.clone()).await {
.await
{
tracing::error!("Error syncronizing status of {}: {}", id, e);
tracing::debug!("{:?}", e);
}
})
.await;
Ok(())
}
#[instrument(skip(ctx))]
pub async fn check_all(ctx: &RpcContext) -> Result<(), Error> {
let mut db = ctx.db.handle();
// TODO: DRAGONS!!
// this locks all of package data to solve a deadlock issue below. As of the writing of this comment, it
// hangs during the `check` operation on /package-data/<id>. There is another daemon loop somewhere that
// is likely iterating through packages in a different order.
let pkg_ids = crate::db::DatabaseModel::new()
.package_data()
.keys(&mut db, true)
.await?;
let mut status_manifest = Vec::with_capacity(pkg_ids.len());
let mut installed_deps = Vec::with_capacity(pkg_ids.len());
for id in &pkg_ids {
if let Some(installed) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|m| m.installed())
.check(&mut db)
.await?
{
let listed_deps = installed
.clone()
.manifest()
.dependencies()
.get(&mut db, false)
.await?
.to_owned()
.0
.into_iter()
.map(|x| x.0)
.collect::<BTreeSet<PackageId>>();
status_manifest.push((
installed.clone().status(),
Arc::new(installed.clone().manifest().get(&mut db, true).await?),
));
installed_deps.push((
installed.clone(),
Arc::new({
installed
.current_dependencies()
.get(&mut db, true)
.await?
.to_owned()
.into_iter()
.filter(|(id, _)| listed_deps.contains(id))
.collect::<BTreeMap<PackageId, CurrentDependencyInfo>>()
}),
));
}
}
drop(db);
#[instrument(skip(ctx, db))]
async fn main_status<Db: DbHandle>(
ctx: RpcContext,
status_model: StatusModel,
manifest: Arc<ModelData<Manifest>>,
mut db: Db,
) -> Result<MainStatus, Error> {
let mut status = status_model.get_mut(&mut db).await?;
status.main.check(&ctx, &mut db, &*manifest).await?;
let res = status.main.clone();
status.save(&mut db).await?;
Ok(res)
}
let (status_sender, mut statuses_recv) = tokio::sync::mpsc::channel(status_manifest.len() + 1);
let mut statuses = BTreeMap::new();
futures::stream::iter(
status_manifest
.into_iter()
.zip(pkg_ids.clone())
.zip(std::iter::repeat(ctx)),
)
.for_each_concurrent(None, move |(((status, manifest), id), ctx)| {
let status_sender = status_sender.clone();
async move {
match main_status(ctx.clone(), status, manifest, ctx.db.handle()).await {
Err(e) => {
tracing::error!("Error running main health check for {}: {}", id, e);
tracing::debug!("{:?}", e);
}
Ok(status) => {
status_sender.send((id, status)).await.expect("unreachable");
}
}
}
})
.await;
while let Some((id, status)) = statuses_recv.recv().await {
statuses.insert(id, status);
}
let statuses = Arc::new(statuses);
#[instrument(skip(db))]
async fn dependency_status<Db: DbHandle>(
id: &PackageId,
statuses: Arc<BTreeMap<PackageId, MainStatus>>,
model: InstalledPackageDataEntryModel,
current_deps: Arc<BTreeMap<PackageId, CurrentDependencyInfo>>,
mut db: Db,
) -> Result<(), Error> {
for (dep_id, dep_info) in current_deps.iter().filter(|(dep_id, _)| dep_id != &id) {
if let Some(err) = match statuses.get(dep_id) {
Some(MainStatus::Running { ref health, .. })
| Some(MainStatus::BackingUp {
started: Some(_),
ref health,
}) => {
let mut failures = BTreeMap::new();
for check in &dep_info.health_checks {
let res = health
.get(check)
.cloned()
.unwrap_or_else(|| HealthCheckResult::Disabled);
if !matches!(res, HealthCheckResult::Success) {
failures.insert(check.clone(), res);
}
}
if !failures.is_empty() {
Some(DependencyError::HealthChecksFailed { failures })
} else {
None
}
}
_ => Some(DependencyError::NotRunning),
} {
break_transitive(&mut db, id, dep_id, err, &mut BTreeMap::new()).await?;
} else {
let mut errs = model
.clone()
.status()
.dependency_errors()
.get_mut(&mut db)
.await?;
if matches!(
errs.get(dep_id),
Some(DependencyError::HealthChecksFailed { .. })
) {
errs.0.remove(dep_id);
errs.save(&mut db).await?;
}
}
}
Ok(())
}
futures::stream::iter(installed_deps.into_iter().zip(pkg_ids.clone()))
.for_each_concurrent(None, |((installed, deps), id)| {
let statuses = statuses.clone();
async move {
if let Err(e) =
dependency_status(&id, statuses, installed, deps, ctx.db.handle()).await
{
tracing::error!("Error running dependency health check for {}: {}", id, e);
tracing::debug!("{:?}", e);
}
}
})
.await;
Ok(())
}
@@ -316,7 +132,7 @@ impl MainStatus {
}
Ok(())
}
#[instrument(skip(ctx, db))]
#[instrument(skip(ctx, db, manifest))]
pub async fn check<Db: DbHandle>(
&mut self,
ctx: &RpcContext,
@@ -352,8 +168,8 @@ impl MainStatus {
NotificationLevel::Error,
String::from("Critical Health Check Failed"),
format!("{} was shut down because a health check required for its operation failed\n{}", manifest.title, error),
NotificationSubtype::General,
None
(),
None,
)
.await?;
should_stop = true;

View File

@@ -25,7 +25,7 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::{ServerStatus, UpdateProgress};
use crate::db::util::WithRevision;
use crate::notifications::{NotificationLevel, NotificationSubtype};
use crate::notifications::NotificationLevel;
use crate::update::latest_information::LatestInformation;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
@@ -213,7 +213,7 @@ async fn maybe_do_update(ctx: RpcContext) -> Result<Option<Arc<Revision>>, Error
crate::ErrorKind::InvalidRequest,
))
}
_ => (),
ServerStatus::Running => (),
}
let mounted_boot = mount_label(Boot).await?;
@@ -259,7 +259,7 @@ async fn maybe_do_update(ctx: RpcContext) -> Result<Option<Arc<Revision>>, Error
NotificationLevel::Error,
"EmbassyOS Update Failed".to_owned(),
format!("Update was not successful because of {}", e),
NotificationSubtype::General,
(),
None,
)
.await

View File

@@ -1,21 +1,27 @@
use std::collections::BTreeMap;
use std::future::Future;
use std::hash::{Hash, Hasher};
use std::marker::PhantomData;
use std::ops::Deref;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::process::{exit, Stdio};
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use clap::ArgMatches;
use color_eyre::eyre::{self, eyre};
use digest::Digest;
use fd_lock_rs::FdLock;
use futures::future::BoxFuture;
use futures::FutureExt;
use lazy_static::lazy_static;
use patch_db::{HasModel, Model};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::Value;
use tokio::fs::File;
use tokio::sync::RwLock;
use tokio::sync::{Mutex, OwnedMutexGuard, RwLock};
use tokio::task::{JoinError, JoinHandle};
use crate::shutdown::Shutdown;
@@ -935,6 +941,10 @@ impl<F: FnOnce() -> T, T> GeneralGuard<F, T> {
pub fn drop(mut self) -> T {
self.0.take().unwrap()()
}
pub fn drop_without_action(mut self) {
self.0 = None;
}
}
impl<F: FnOnce() -> T, T> Drop for GeneralGuard<F, T> {
@@ -944,3 +954,142 @@ impl<F: FnOnce() -> T, T> Drop for GeneralGuard<F, T> {
}
}
}
pub async fn canonicalize(
path: impl AsRef<Path> + Send + Sync,
create_parent: bool,
) -> Result<PathBuf, Error> {
fn create_canonical_folder<'a>(
path: impl AsRef<Path> + Send + Sync + 'a,
) -> BoxFuture<'a, Result<PathBuf, Error>> {
async move {
let path = canonicalize(path, true).await?;
tokio::fs::create_dir(&path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, path.display().to_string()))?;
Ok(path)
}
.boxed()
}
let path = path.as_ref();
if tokio::fs::metadata(path).await.is_err() {
if let (Some(parent), Some(file_name)) = (path.parent(), path.file_name()) {
if create_parent && tokio::fs::metadata(parent).await.is_err() {
return Ok(create_canonical_folder(parent).await?.join(file_name));
} else {
return Ok(tokio::fs::canonicalize(parent)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?
.join(file_name));
}
}
}
tokio::fs::canonicalize(&path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, path.display().to_string()))
}
pub struct FileLock(OwnedMutexGuard<()>, Option<FdLock<File>>);
impl Drop for FileLock {
fn drop(&mut self) {
if let Some(fd_lock) = self.1.take() {
tokio::task::spawn_blocking(|| fd_lock.unlock(true).map_err(|(_, e)| e).unwrap());
}
}
}
impl FileLock {
pub async fn new(path: impl AsRef<Path> + Send + Sync) -> Result<Self, Error> {
lazy_static! {
static ref INTERNAL_LOCKS: Mutex<BTreeMap<PathBuf, Arc<Mutex<()>>>> =
Mutex::new(BTreeMap::new());
}
let path = canonicalize(path.as_ref(), true).await?;
let mut internal_locks = INTERNAL_LOCKS.lock().await;
if !internal_locks.contains_key(&path) {
internal_locks.insert(path.clone(), Arc::new(Mutex::new(())));
}
let tex = internal_locks.get(&path).unwrap().clone();
drop(internal_locks);
let tex_guard = tex.lock_owned().await;
let parent = path.parent().unwrap_or(Path::new("/"));
if tokio::fs::metadata(parent).await.is_err() {
tokio::fs::create_dir_all(parent)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
}
let f = File::create(&path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, path.display().to_string()))?;
let file_guard = tokio::task::spawn_blocking(|| {
fd_lock_rs::FdLock::lock(f, fd_lock_rs::LockType::Exclusive, true)
})
.await
.with_kind(crate::ErrorKind::Unknown)?
.with_kind(crate::ErrorKind::Filesystem)?;
Ok(FileLock(tex_guard, Some(file_guard)))
}
pub async fn unlock(mut self) -> Result<(), Error> {
if let Some(fd_lock) = self.1.take() {
tokio::task::spawn_blocking(|| fd_lock.unlock(true).map_err(|(_, e)| e))
.await
.with_kind(crate::ErrorKind::Unknown)?
.with_kind(crate::ErrorKind::Filesystem)?;
}
Ok(())
}
}
pub struct AtomicFile {
tmp_path: PathBuf,
path: PathBuf,
file: File,
}
impl AtomicFile {
pub async fn new(path: impl AsRef<Path> + Send + Sync) -> Result<Self, Error> {
let path = canonicalize(&path, true).await?;
let tmp_path = if let (Some(parent), Some(file_name)) =
(path.parent(), path.file_name().and_then(|f| f.to_str()))
{
parent.join(format!(".{}.tmp", file_name))
} else {
return Err(Error::new(
eyre!("invalid path: {}", path.display()),
crate::ErrorKind::Filesystem,
));
};
let file = File::create(&tmp_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, tmp_path.display().to_string()))?;
Ok(Self {
tmp_path,
path,
file,
})
}
pub async fn save(mut self) -> Result<(), Error> {
use tokio::io::AsyncWriteExt;
self.file.flush().await?;
self.file.shutdown().await?;
self.file.sync_all().await?;
tokio::fs::rename(&self.tmp_path, &self.path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("mv {} -> {}", self.tmp_path.display(), self.path.display()),
)
})
}
}
impl std::ops::Deref for AtomicFile {
type Target = File;
fn deref(&self) -> &Self::Target {
&self.file
}
}
impl std::ops::DerefMut for AtomicFile {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.file
}
}

View File

@@ -1,59 +1,21 @@
use std::cmp::Ordering;
use async_trait::async_trait;
use lazy_static::lazy_static;
use patch_db::DbHandle;
use color_eyre::eyre::eyre;
use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, LockType};
use rpc_toolkit::command;
use crate::Error;
use crate::{Error, ResultExt};
// mod v0_1_0;
// mod v0_1_1;
// mod v0_1_2;
// mod v0_1_3;
// mod v0_1_4;
// mod v0_1_5;
// mod v0_2_0;
// mod v0_2_1;
// mod v0_2_2;
// mod v0_2_3;
// mod v0_2_4;
// mod v0_2_5;
// mod v0_2_6;
// mod v0_2_7;
// mod v0_2_8;
// mod v0_2_9;
mod v0_3_0;
// mod v0_2_10;
// mod v0_2_11;
// mod v0_2_12;
// pub use v0_2_12::Version as Current;
pub type Current = ();
pub type Current = v0_3_0::Version;
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(untagged)]
enum Version {
V0_0_0(Wrapper<()>),
// V0_1_0(Wrapper<v0_1_0::Version>),
// V0_1_1(Wrapper<v0_1_1::Version>),
// V0_1_2(Wrapper<v0_1_2::Version>),
// V0_1_3(Wrapper<v0_1_3::Version>),
// V0_1_4(Wrapper<v0_1_4::Version>),
// V0_1_5(Wrapper<v0_1_5::Version>),
// V0_2_0(Wrapper<v0_2_0::Version>),
// V0_2_1(Wrapper<v0_2_1::Version>),
// V0_2_2(Wrapper<v0_2_2::Version>),
// V0_2_3(Wrapper<v0_2_3::Version>),
// V0_2_4(Wrapper<v0_2_4::Version>),
// V0_2_5(Wrapper<v0_2_5::Version>),
// V0_2_6(Wrapper<v0_2_6::Version>),
// V0_2_7(Wrapper<v0_2_7::Version>),
// V0_2_8(Wrapper<v0_2_8::Version>),
// V0_2_9(Wrapper<v0_2_9::Version>),
// V0_2_10(Wrapper<v0_2_10::Version>),
// V0_2_11(Wrapper<v0_2_11::Version>),
// V0_2_12(Wrapper<v0_2_12::Version>),
V0_3_0(Wrapper<v0_3_0::Version>),
Other(emver::Version),
}
@@ -64,14 +26,14 @@ where
{
type Previous: VersionT;
fn new() -> Self;
fn semver(&self) -> &'static crate::util::Version;
fn semver(&self) -> emver::Version;
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
async fn commit<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
crate::db::DatabaseModel::new()
.server_info()
.version()
.put(db, self.semver())
.put(db, &self.semver().into())
.await?;
Ok(())
@@ -81,7 +43,7 @@ where
version: &V,
db: &mut Db,
) -> Result<(), Error> {
match self.semver().cmp(version.semver()) {
match self.semver().cmp(&version.semver()) {
Ordering::Greater => self.rollback_to_unchecked(version, db).await,
Ordering::Less => version.migrate_from_unchecked(self, db).await,
Ordering::Equal => Ok(()),
@@ -96,11 +58,7 @@ where
if version.semver() != previous.semver() {
previous.migrate_from_unchecked(version, db).await?;
}
tracing::info!(
"{} -> {}",
previous.semver().as_str(),
self.semver().as_str()
);
tracing::info!("{} -> {}", previous.semver(), self.semver(),);
self.up(db).await?;
self.commit(db).await?;
Ok(())
@@ -111,11 +69,7 @@ where
db: &mut Db,
) -> Result<(), Error> {
let previous = Self::Previous::new();
tracing::info!(
"{} -> {}",
self.semver().as_str(),
previous.semver().as_str()
);
tracing::info!("{} -> {}", self.semver(), previous.semver(),);
self.down(db).await?;
previous.commit(db).await?;
if version.semver() != previous.semver() {
@@ -140,39 +94,30 @@ where
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let v = crate::util::Version::deserialize(deserializer)?;
let version = T::new();
if &v == version.semver() {
if &*v == &version.semver() {
Ok(Wrapper(version))
} else {
Err(serde::de::Error::custom("Mismatched Version"))
}
}
}
lazy_static! {
static ref V0_0_0: crate::util::Version = emver::Version::new(0, 0, 0, 0).into();
}
#[async_trait]
impl VersionT for () {
type Previous = ();
fn new() -> Self {
()
}
fn semver(&self) -> &'static crate::util::Version {
&*V0_0_0
}
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
Ok(())
}
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
Ok(())
}
}
pub async fn init() -> Result<(), Error> {
todo!()
}
pub async fn self_update(requirement: emver::VersionRange) -> Result<(), Error> {
todo!()
pub async fn init<Db: DbHandle>(db: &mut Db) -> Result<(), Error> {
let ptr: JsonPointer = "/server-info/version"
.parse()
.with_kind(crate::ErrorKind::Database)?;
db.lock(ptr.clone(), LockType::Write).await;
let version: Version = db.get(&ptr).await?;
match version {
Version::V0_3_0(v) => v.0.migrate_to(&Current::new(), db).await?,
Version::Other(_) => {
return Err(Error::new(
eyre!("Cannot downgrade"),
crate::ErrorKind::InvalidRequest,
))
}
}
Ok(())
}
pub const COMMIT_HASH: &'static str =

View File

@@ -0,0 +1,21 @@
use super::*;
const V0_3_0: emver::Version = emver::Version::new(0, 3, 0, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_0::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_0
}
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
Ok(())
}
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -14,7 +14,7 @@ use crate::util::Version;
use crate::Error;
pub const PKG_VOLUME_DIR: &'static str = "package-data/volumes";
pub const BACKUP_DIR: &'static str = "/mnt/embassy-os-backups/EmbassyBackups";
pub const BACKUP_DIR: &'static str = "/media/embassy-os/backups";
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum VolumeId<S: AsRef<str> = String> {
@@ -156,6 +156,10 @@ pub fn asset_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, version: &Versi
.join(version.as_str())
}
pub fn backup_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(BACKUP_DIR).join(pkg_id).join("data")
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(tag = "type")]
#[serde(rename_all = "kebab-case")]
@@ -225,7 +229,7 @@ impl Volume {
Volume::Certificate { interface_id: _ } => {
ctx.net_controller.nginx.ssl_directory_for(pkg_id)
}
Volume::Backup { .. } => Path::new(BACKUP_DIR).join(pkg_id),
Volume::Backup { .. } => backup_dir(pkg_id),
}
}
pub fn set_readonly(&mut self) {

View File

@@ -2,6 +2,9 @@
# Update repositories, install dependencies, do some initial configurations, set hostname, enable embassy-init, and config Tor
set -e
! test -f /etc/docker/daemon.json || rm /etc/docker/daemon.json
apt update
apt install -y \
docker.io \
@@ -16,7 +19,8 @@ apt install -y \
sqlite3 \
wireless-tools \
net-tools \
ifupdown
ifupdown \
ecryptfs-utils
sed -i 's/"1"/"0"/g' /etc/apt/apt.conf.d/20auto-upgrades
sed -i 's/Restart=on-failure/Restart=always/g' /lib/systemd/system/tor@default.service
sed -i '/}/i \ \ \ \ application\/wasm \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ wasm;' /etc/nginx/mime.types
@@ -28,7 +32,7 @@ echo "iface wlan0 inet dhcp" >> /etc/network/interfaces
mkdir -p /etc/nginx/ssl
# fix to suppress docker warning, fixed in 21.xx release of docker cli: https://github.com/docker/cli/pull/2934
mkdir /root/.docker
mkdir -p /root/.docker
touch /root/.docker/config.json
docker run --privileged --rm tonistiigi/binfmt --install all

View File

@@ -16,4 +16,11 @@ export OUTPUT_DEVICE=$(sudo losetup --show -fP eos.img)
export LOOPDEV=$(sudo losetup --show -fP ubuntu.img)
./build/partitioning.sh
./build/write-image.sh
sudo e2fsck -f ${OUTPUT_DEVICE}p3
sudo resize2fs -M ${OUTPUT_DEVICE}p3
BLOCK_INFO=$(sudo dumpe2fs ${OUTPUT_DEVICE}p3)
BLOCK_COUNT=$(echo "$BLOCK_INFO" | grep "Block count:" | sed 's/Block count:\s\+//g')
BLOCK_SIZE=$(echo "$BLOCK_INFO" | grep "Block size:" | sed 's/Block size:\s\+//g')
echo "YOUR GREEN FILESYSTEM is '$[$BLOCK_COUNT*$BLOCK_SIZE]' BYTES"
echo "IF YOU ARE QUICK-FLASHING FROM MAC-OS, NOTE THIS NUMBER FOR LATER"
sudo losetup -d $OUTPUT_DEVICE

View File

@@ -2,14 +2,20 @@
set -e
function mktmpfifo () {
TMP_PATH=$(mktemp)
rm $TMP_PATH
mkfifo $TMP_PATH
echo $TMP_PATH
}
echo 'This script will only work on a card that has previously had a full image written to it.'
echo 'It will *only* flash the ext4 portion (`green` partition) of the img file onto the card.'
echo 'The product key, disk guid, and kernel data will *not* be affected.'
read -p "Continue? [y/N]" -n 1 -r
echo
if ! [[ "$REPLY" =~ ^[Yy]$ ]]; then
exit 1
else
echo
fi
if ! which pv > /dev/null; then
@@ -26,48 +32,101 @@ if ! which pv > /dev/null; then
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
sudo pacman -S pv
fi
elif which brew > /dev/null; then
read -p "Install? [y/N]" -n 1 -r
echo
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
brew install pv
fi
else
>&2 echo 'This script does not recognize what package manager you have available on your system.'
>&2 echo 'Please go install the utility manually if you want progress reporting.'
fi
fi
if ! test -e /dev/disk/by-label/green; then
if [[ "$(uname)" == "Darwin" ]]; then
export TARGET_PARTITION="disk$(diskutil list | grep EMBASSY | head -1 | rev | cut -b 3)s3"
if ! test -e $TARGET_PARTITION; then
>&2 echo '`green` partition not found'
exit 1
fi
export SOURCE_DEVICE="$(hdiutil attach -nomount eos.img | head -n1 | sed -E 's/([^ ]+).*$/\1/g')"
export SOURCE_PARTITION="${SOURCE_DEVICE}s3"
function detach () {
hdiutil detach $SOURCE_DEVICE
}
else
if ! test -e /dev/disk/by-label/green; then
>&2 echo '`green` partition not found'
exit 1
fi
export TARGET_PARTITION=$(readlink -f /dev/disk/by-label/green)
export SOURCE_DEVICE="$(sudo losetup --show -fP eos.img)"
export SOURCE_PARTITION="${SOURCE_DEVICE}p3"
function detach () {
sudo losetup -d ${SOURCE_DEVICE}
}
fi
export TARGET_PARTITION=$(readlink -f /dev/disk/by-label/green)
if [[ "$TARGET_PARTITION" =~ ^/dev/loop ]]; then
>&2 echo 'You are currently flashing onto a loop device.'
>&2 echo 'This is probably a mistake, and usually means you failed to detach a .img file.'
read -p "Continue anyway? [y/N]" -n 1 -r
echo
if ! [[ "$REPLY" =~ ^[Yy]$ ]]; then
exit 1
else
echo
fi
fi
export SOURCE_DEVICE=$(sudo losetup --show -fP eos.img)
sudo e2fsck -f ${SOURCE_DEVICE}p3
sudo resize2fs -M ${SOURCE_DEVICE}p3
export BLOCK_INFO=$(sudo dumpe2fs ${SOURCE_DEVICE}p3)
export BLOCK_COUNT=$(echo "$BLOCK_INFO" | grep "Block count:" | sed 's/Block count:\s\+//g')
export BLOCK_SIZE=$(echo "$BLOCK_INFO" | grep "Block size:" | sed 's/Block size:\s\+//g')
export FS_SIZE=$[$BLOCK_COUNT*$BLOCK_SIZE]
echo "Flashing $FS_SIZE bytes to $TARGET_PARTITION"
if which pv > /dev/null; then
sudo cat ${SOURCE_DEVICE}p3 | head -c $FS_SIZE | pv -s $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1M iflag=fullblock oflag=direct 2>/dev/null
if [[ "$(uname)" == "Darwin" ]]; then
if test -z "$FS_SIZE"; then
read -p "Enter FS Size (shown during make of eos.img)" -r
export FS_SIZE=$REPLY
fi
else
sudo cat ${SOURCE_DEVICE}p3 | head -c $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1M iflag=fullblock oflag=direct
sudo e2fsck -f ${SOURCE_PARTITION}
sudo resize2fs -M ${SOURCE_PARTITION}
export BLOCK_INFO=$(sudo dumpe2fs ${SOURCE_PARTITION})
export BLOCK_COUNT=$(echo "$BLOCK_INFO" | grep "Block count:" | sed 's/Block count:\s\+//g')
export BLOCK_SIZE=$(echo "$BLOCK_INFO" | grep "Block size:" | sed 's/Block size:\s\+//g')
export FS_SIZE=$[$BLOCK_COUNT*$BLOCK_SIZE]
fi
echo "Flashing $FS_SIZE bytes to $TARGET_PARTITION"
if [[ "$(uname)" == "Darwin" ]]; then
if which pv > /dev/null; then
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | pv -s $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1m 2>/dev/null
else
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1M iflag=fullblock oflag=direct
fi
else
if which pv > /dev/null; then
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | pv -s $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1M iflag=fullblock oflag=direct 2>/dev/null
else
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1M iflag=fullblock oflag=direct
fi
fi
echo Verifying...
export INPUT_HASH=$(sudo cat ${SOURCE_DEVICE}p3 | head -c $FS_SIZE | sha256sum)
export OUTPUT_HASH=$(sudo cat ${TARGET_PARTITION} | head -c $FS_SIZE | sha256sum)
sudo losetup -d ${SOURCE_DEVICE}
if ! [[ "$INPUT_HASH" == "$OUTPUT_HASH" ]]; then
export INPUT_HASH=$(mktemp)
export OUTPUT_HASH=$(mktemp)
if which pv > /dev/null; then
export PV_IN=$(mktmpfifo)
fi
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | tee -a $PV_IN | sha256sum > $INPUT_HASH &
export INPUT_CHILD=$!
sudo cat ${TARGET_PARTITION} | head -c $FS_SIZE | tee -a $PV_IN | sha256sum > $OUTPUT_HASH &
export OUTPUT_CHILD=$!
if which pv > /dev/null; then
pv -s $[$FS_SIZE*2] < $PV_IN > /dev/null &
fi
wait $INPUT_CHILD $OUTPUT_CHILD
if which pv > /dev/null; then
rm $PV_IN
fi
detach
if ! [[ "$(cat $INPUT_HASH)" == "$(cat $OUTPUT_HASH)" ]]; then
rm $INPUT_HASH $OUTPUT_HASH
>&2 echo Verification Failed
exit 1
fi
rm $INPUT_HASH $OUTPUT_HASH
echo "Verification Succeeded"

View File

@@ -21,6 +21,7 @@ sudo e2label ${OUTPUT_DEVICE}p4 blue
mkdir -p /tmp/eos-mnt
sudo mount ${OUTPUT_DEVICE}p1 /tmp/eos-mnt
sudo sed -i 's/^/usb-storage.quirks=152d:0562:u /g' /tmp/eos-mnt/cmdline.txt
sudo sed -i 's/LABEL=writable/LABEL=green/g' /tmp/eos-mnt/cmdline.txt
cat /tmp/eos-mnt/config.txt | grep -v "dtoverlay=" | sudo tee /tmp/eos-mnt/config.txt.tmp
echo "dtoverlay=pwm-2chan" | sudo tee -a /tmp/eos-mnt/config.txt.tmp

View File

@@ -5,7 +5,6 @@
"requires": true,
"packages": {
"": {
"name": "setup-wizard",
"version": "0.0.1",
"dependencies": {
"@angular/common": "^12.2.1",
@@ -15,6 +14,7 @@
"@angular/platform-browser-dynamic": "^12.2.1",
"@angular/router": "^12.2.1",
"@ionic/angular": "^5.7.0",
"@start9labs/argon2": "^0.1.0",
"@types/aes-js": "^3.1.1",
"@types/pbkdf2": "^3.1.0",
"aes-js": "^3.1.2",
@@ -2870,6 +2870,11 @@
}
}
},
"node_modules/@start9labs/argon2": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/@start9labs/argon2/-/argon2-0.1.0.tgz",
"integrity": "sha512-Ng9Ibuj0p2drQRW013AkUz6TqWysXw/9OyoEoXQZL7kfac0LrxWIDj+xvg+orqQMxcvClWgzeQY/c+IgJtcevA=="
},
"node_modules/@stencil/core": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/@stencil/core/-/core-2.9.0.tgz",
@@ -18314,6 +18319,11 @@
}
}
},
"@start9labs/argon2": {
"version": "0.1.0",
"resolved": "https://registry.npmjs.org/@start9labs/argon2/-/argon2-0.1.0.tgz",
"integrity": "sha512-Ng9Ibuj0p2drQRW013AkUz6TqWysXw/9OyoEoXQZL7kfac0LrxWIDj+xvg+orqQMxcvClWgzeQY/c+IgJtcevA=="
},
"@stencil/core": {
"version": "2.9.0",
"resolved": "https://registry.npmjs.org/@stencil/core/-/core-2.9.0.tgz",

View File

@@ -19,6 +19,7 @@
"@angular/platform-browser-dynamic": "^12.2.1",
"@angular/router": "^12.2.1",
"@ionic/angular": "^5.7.0",
"@start9labs/argon2": "^0.1.0",
"@types/aes-js": "^3.1.1",
"@types/pbkdf2": "^3.1.0",
"aes-js": "^3.1.2",

View File

@@ -38,7 +38,7 @@ export class EmbassyPage {
async getDrives () {
try {
this.storageDrives = (await this.apiService.getDrives()).filter(d => d.logicalname !== this.stateService.recoveryDrive?.logicalname)
this.storageDrives = (await this.apiService.getDrives()).filter(d => !d.partitions.map(p => p.logicalname).includes(this.stateService.recoveryPartition?.logicalname))
} catch (e) {
this.errorToastService.present(e.message)
} finally {
@@ -98,7 +98,7 @@ export class EmbassyPage {
console.error(e.details)
} finally {
loader.dismiss()
if (!!this.stateService.recoveryDrive) {
if (!!this.stateService.recoveryPartition) {
await this.navCtrl.navigateForward(`/loading`, { animationDirection: 'forward' })
} else {
await this.navCtrl.navigateForward(`/success`, { animationDirection: 'forward' })

View File

@@ -1,6 +1,7 @@
import { Component, Input } from '@angular/core'
import { LoadingController, ModalController } from '@ionic/angular'
import { ApiService, DiskInfo } from 'src/app/services/api/api.service'
import { ApiService, DiskInfo, PartitionInfo } from 'src/app/services/api/api.service'
import * as argon2 from '@start9labs/argon2'
@Component({
selector: 'app-password',
@@ -8,7 +9,7 @@ import { ApiService, DiskInfo } from 'src/app/services/api/api.service'
styleUrls: ['password.page.scss'],
})
export class PasswordPage {
@Input() recoveryDrive: DiskInfo
@Input() recoveryPartition: PartitionInfo
@Input() storageDrive: DiskInfo
pwError = ''
@@ -34,23 +35,13 @@ export class PasswordPage {
}
async verifyPw () {
if (!this.recoveryDrive) this.pwError = 'No recovery drive' // unreachable
const loader = await this.loadingCtrl.create({
message: 'Verifying Password',
})
await loader.present()
if (!this.recoveryPartition || !this.recoveryPartition['embassy-os']) this.pwError = 'No recovery drive' // unreachable
try {
const isCorrectPassword = await this.apiService.verify03XPassword(this.recoveryDrive.logicalname, this.password)
if (isCorrectPassword) {
argon2.verify( this.recoveryPartition['embassy-os']['password-hash'], this.password)
this.modalController.dismiss({ password: this.password })
} else {
this.pwError = 'Incorrect password provided'
}
} catch (e) {
this.pwError = 'Error connecting to Embassy'
} finally {
loader.dismiss()
this.pwError = 'Incorrect password provided'
}
}
@@ -65,7 +56,7 @@ export class PasswordPage {
}
validate () {
if (!!this.recoveryDrive) return this.pwError = ''
if (!!this.recoveryPartition) return this.pwError = ''
if (this.passwordVer) {
this.checkVer()

View File

@@ -1,6 +1,6 @@
import { Component, Input } from '@angular/core'
import { LoadingController, ModalController } from '@ionic/angular'
import { ApiService, DiskInfo } from 'src/app/services/api/api.service'
import { ApiService, PartitionInfo } from 'src/app/services/api/api.service'
import { HttpService } from 'src/app/services/api/http.service'
@Component({
@@ -9,7 +9,7 @@ import { HttpService } from 'src/app/services/api/http.service'
styleUrls: ['prod-key-modal.page.scss'],
})
export class ProdKeyModal {
@Input() recoveryDrive: DiskInfo
@Input() recoveryPartition: PartitionInfo
error = ''
productKey = ''
@@ -31,7 +31,7 @@ export class ProdKeyModal {
await loader.present()
try {
await this.apiService.set02XDrive(this.recoveryDrive.logicalname)
await this.apiService.set02XDrive(this.recoveryPartition.logicalname)
this.httpService.productKey = this.productKey
await this.apiService.verifyProductKey()
this.modalController.dismiss({ productKey: this.productKey })

View File

@@ -15,7 +15,7 @@
<ion-card-content class="ion-margin">
<ng-container *ngIf="!loading && !recoveryDrives.length">
<ng-container *ngIf="!loading && !recoveryPartitions.length">
<h2 color="light">No recovery drives found</h2>
<p color="light">Please connect a recovery drive to your Embassy and refresh the page.</p>
<ion-button
@@ -40,29 +40,29 @@
</ion-label>
</ion-item>
</ng-container>
<ng-container *ngIf="recoveryDrives.length">
<ion-item (click)="chooseDrive(drive)" class="ion-margin-bottom" button color="light" lines="none" *ngFor="let drive of recoveryDrives" [ngClass]="drive.logicalname === selectedDrive?.logicalname ? 'selected' : null">
<ng-container *ngIf="recoveryPartitions.length">
<ion-item (click)="choosePartition(p.partition)" class="ion-margin-bottom" button color="light" lines="none" *ngFor="let p of recoveryPartitions" [ngClass]="p.partition.logicalname === selectedPartition?.logicalname ? 'selected' : null">
<ion-icon slot="start" name="save-outline"></ion-icon>
<ion-label class="ion-text-wrap">
<h1>{{ drive.logicalname }} - {{ drive.capacity | convertBytes }}</h1>
<h2 *ngIf="drive.vendor || drive.model">
{{ drive.vendor }}
<span *ngIf="drive.vendor && drive.model"> - </span>
{{ drive.model }}
<h1>{{ p.partition.logicalname }} <span *ngIf="!!p.partition.label">-</span> {{ p.partition.label }}</h1>
<h2 *ngIf="p.vendor || p.model">
{{ p.vendor }}
<span *ngIf="p.vendor && p.model"> - </span>
{{ p.model }}
</h2>
<h2> Embassy version: {{drive['embassy-os'].version}}</h2>
<h2> Embassy version: {{p.partition['embassy-os'].version}}</h2>
</ion-label>
<ion-icon *ngIf="(drive['embassy-os'].version.startsWith('0.2') && stateService.hasProductKey) || passwords[drive.logicalname] || prodKeys[drive.logicalname]" color="success" slot="end" name="lock-open-outline"></ion-icon>
<ion-icon *ngIf="(drive['embassy-os'].version.startsWith('0.2') && !stateService.hasProductKey && !prodKeys[drive.logicalname]) || (!drive['embassy-os'].version.startsWith('0.2') && !passwords[drive.logicalname])" color="danger" slot="end" name="lock-closed-outline"></ion-icon>
<ion-icon *ngIf="(p.partition['embassy-os'].version.startsWith('0.2') && stateService.hasProductKey) || passwords[p.partition.logicalname] || prodKeys[p.partition.logicalname]" color="success" slot="end" name="lock-open-outline"></ion-icon>
<ion-icon *ngIf="(p.partition['embassy-os'].version.startsWith('0.2') && !stateService.hasProductKey && !prodKeys[p.partition.logicalname]) || (!p.partition['embassy-os'].version.startsWith('0.2') && !passwords[p.partition.logicalname])" color="danger" slot="end" name="lock-closed-outline"></ion-icon>
</ion-item>
</ng-container>
<ion-button
(click)="selectRecoveryDrive()"
(click)="selectRecoveryPartition()"
color="light"
[disabled]="!selectedDrive || (!passwords[selectedDrive.logicalname] && !selectedDrive['embassy-os'].version.startsWith('0.2'))"
[disabled]="!selectedPartition || (!passwords[selectedPartition.logicalname] && !selectedPartition['embassy-os'].version.startsWith('0.2'))"
class="claim-button"
*ngIf="recoveryDrives.length"
*ngIf="recoveryPartitions.length"
>
Next
</ion-button>

View File

@@ -1,6 +1,6 @@
import { Component } from '@angular/core'
import { ModalController, NavController } from '@ionic/angular'
import { ApiService, DiskInfo } from 'src/app/services/api/api.service'
import { ApiService, PartitionInfo } from 'src/app/services/api/api.service'
import { ErrorToastService } from 'src/app/services/error-toast.service'
import { StateService } from 'src/app/services/state.service'
import { PasswordPage } from '../password/password.page'
@@ -14,8 +14,8 @@ import { ProdKeyModal } from '../prod-key-modal/prod-key-modal.page'
export class RecoverPage {
passwords = { }
prodKeys = { }
recoveryDrives = []
selectedDrive: DiskInfo = null
recoveryPartitions: { partition: PartitionInfo, model: string, vendor: string }[] = []
selectedPartition: PartitionInfo = null
loading = true
constructor (
@@ -27,26 +27,25 @@ export class RecoverPage {
) { }
async ngOnInit () {
await this.getDrives()
await this.getPartitions()
}
async refresh () {
this.recoveryDrives = []
this.selectedDrive = null
this.recoveryPartitions = []
this.selectedPartition = null
this.loading = true
await this.getDrives()
await this.getPartitions()
}
async getDrives () {
async getPartitions () {
try {
let drives = (await this.apiService.getDrives()).filter(d => !!d['embassy-os'])
let drives = (await this.apiService.getDrives())
this.recoveryPartitions = drives.map(d => d.partitions.map(p => ({ partition: p, vendor: d.vendor, model: d.model})).filter(p => p.partition['embassy-os']?.full)).flat()
// if theres no product key, only show 0.2s
if (!this.stateService.hasProductKey) {
drives = drives.filter(d => d['embassy-os'].version.startsWith('0.2'))
this.recoveryPartitions = this.recoveryPartitions.filter(p => p.partition['embassy-os']?.version.startsWith('0.2'))
}
this.recoveryDrives = drives
} catch (e) {
this.errorToastService.present(`${e.message}: ${e.data}`)
} finally {
@@ -54,30 +53,29 @@ export class RecoverPage {
}
}
async chooseDrive (drive: DiskInfo) {
if (this.selectedDrive?.logicalname === drive.logicalname) {
this.selectedDrive = null
async choosePartition (partition: PartitionInfo) {
if (this.selectedPartition?.logicalname === partition.logicalname) {
this.selectedPartition = null
return
} else {
this.selectedDrive = drive
this.selectedPartition = partition
}
if ((drive['embassy-os'].version.startsWith('0.2') && this.stateService.hasProductKey) || this.passwords[drive.logicalname] || this.prodKeys[drive.logicalname]) return
if ((partition['embassy-os'].version.startsWith('0.2') && this.stateService.hasProductKey) || this.passwords[partition.logicalname] || this.prodKeys[partition.logicalname]) return
if (this.stateService.hasProductKey) {
const modal = await this.modalController.create({
component: PasswordPage,
componentProps: {
recoveryDrive: this.selectedDrive,
recoveryPartition: this.selectedPartition,
},
cssClass: 'alertlike-modal',
})
modal.onDidDismiss().then(async ret => {
if (!ret.data) {
this.selectedDrive = null
this.selectedPartition = null
} else if (ret.data.password) {
this.passwords[drive.logicalname] = ret.data.password
this.passwords[partition.logicalname] = ret.data.password
}
})
@@ -86,15 +84,15 @@ export class RecoverPage {
const modal = await this.modalController.create({
component: ProdKeyModal,
componentProps: {
recoveryDrive: this.selectedDrive,
recoveryPartition: this.selectedPartition,
},
cssClass: 'alertlike-modal',
})
modal.onDidDismiss().then(async ret => {
if (!ret.data) {
this.selectedDrive = null
this.selectedPartition = null
} else if (ret.data.productKey) {
this.prodKeys[drive.logicalname] = ret.data.productKey
this.prodKeys[partition.logicalname] = ret.data.productKey
}
})
@@ -102,9 +100,9 @@ export class RecoverPage {
}
}
async selectRecoveryDrive () {
this.stateService.recoveryDrive = this.selectedDrive
const pw = this.passwords[this.selectedDrive.logicalname]
async selectRecoveryPartition () {
this.stateService.recoveryPartition = this.selectedPartition
const pw = this.passwords[this.selectedPartition.logicalname]
if (pw) {
this.stateService.recoveryPassword = pw
}

View File

@@ -19,7 +19,7 @@ export interface GetStatusRes {
export interface SetupEmbassyReq {
'embassy-logicalname': string
'embassy-password': string
'recovery-drive'?: DiskInfo
'recovery-partition'?: PartitionInfo
'recovery-password'?: string
}
@@ -34,7 +34,6 @@ export interface DiskInfo {
model: string | null,
partitions: PartitionInfo[],
capacity: number,
'embassy-os': EmbassyOsDiskInfo | null,
}
export interface RecoveryStatusRes {
@@ -42,13 +41,16 @@ export interface RecoveryStatusRes {
'total-bytes': number
}
interface PartitionInfo {
export interface PartitionInfo {
logicalname: string,
label: string | null,
capacity: number,
used: number | null,
'embassy-os': EmbassyOsRecoveryInfo | null,
}
interface EmbassyOsDiskInfo {
export interface EmbassyOsRecoveryInfo {
version: string,
full: boolean, // contains full embassy backup
'password-hash': string | null, // null for 0.2.x
}

View File

@@ -18,7 +18,7 @@ export class MockApiService extends ApiService {
async getStatus () {
await pauseFor(1000)
return {
'product-key': true,
'product-key': false,
migrating: false,
}
}
@@ -36,31 +36,24 @@ export class MockApiService extends ApiService {
label: 'label 1',
capacity: 100000,
used: 200.1255312,
'embassy-os': null,
},
{
logicalname: 'sda2',
label: 'label 2',
capacity: 50000,
used: 200.1255312,
'embassy-os': null,
},
],
capacity: 150000,
'embassy-os': null,
},
{
vendor: 'Vendor',
model: 'Model',
logicalname: 'dev/sdb',
partitions: [
// {
// logicalname: 'sdb1',
// label: null,
// capacity: 1600.01234,
// used: 0.00,
// }
],
partitions: [],
capacity: 1600.01234,
'embassy-os': null,
},
{
vendor: 'Vendor',
@@ -72,12 +65,36 @@ export class MockApiService extends ApiService {
label: 'label 1',
capacity: null,
used: null,
'embassy-os': {
version: '0.3.3',
full: true,
'password-hash': 'asdfasdfasdf',
},
},
{
logicalname: 'sdc1MOCKTESTER',
label: 'label 1',
capacity: null,
used: null,
'embassy-os': {
version: '0.3.6',
full: true,
'password-hash': '$argon2d$v=19$m=1024,t=1,p=1$YXNkZmFzZGZhc2RmYXNkZg$Ceev1I901G6UwU+hY0sHrFZ56D+o+LNJ',
},
},
{
logicalname: 'sdc1',
label: 'label 1',
capacity: null,
used: null,
'embassy-os': {
version: '0.3.3',
full: false,
'password-hash': 'asdfasdfasdf',
},
},
],
capacity: 100000,
'embassy-os': {
version: '0.3.3',
},
},
{
vendor: 'Vendor',
@@ -89,12 +106,14 @@ export class MockApiService extends ApiService {
label: null,
capacity: 10000,
used: null,
'embassy-os': {
version: '0.2.7',
full: true,
'password-hash': 'asdfasdfasdf',
},
},
],
capacity: 10000,
'embassy-os': {
version: '0.2.7',
},
},
]
}

View File

@@ -1,6 +1,6 @@
import { Injectable } from '@angular/core'
import { BehaviorSubject } from 'rxjs'
import { ApiService, DiskInfo } from './api/api.service'
import { ApiService, DiskInfo, PartitionInfo } from './api/api.service'
import { ErrorToastService } from './error-toast.service'
@Injectable({
@@ -14,7 +14,7 @@ export class StateService {
storageDrive: DiskInfo
embassyPassword: string
recoveryDrive: DiskInfo
recoveryPartition: PartitionInfo
recoveryPassword: string
dataTransferProgress: { bytesTransferred: number; totalBytes: number } | null
dataProgress = 0
@@ -61,7 +61,7 @@ export class StateService {
const ret = await this.apiService.setupEmbassy({
'embassy-logicalname': this.storageDrive.logicalname,
'embassy-password': this.embassyPassword,
'recovery-drive': this.recoveryDrive,
'recovery-partition': this.recoveryPartition,
'recovery-password': this.recoveryPassword,
})
this.torAddress = ret['tor-address']

View File

@@ -212,9 +212,9 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb"
[[package]]
name = "bitflags"
version = "1.2.1"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitvec"
@@ -461,7 +461,7 @@ dependencies = [
"lazy_static",
"linear-map",
"log",
"nix 0.22.1",
"nix 0.23.0",
"pest",
"pest_derive",
"rand 0.7.3",
@@ -860,6 +860,7 @@ dependencies = [
"hex",
"hmac",
"http",
"hyper",
"hyper-ws-listener",
"indexmap",
"isocountry",
@@ -868,7 +869,7 @@ dependencies = [
"lazy_static",
"libc",
"log",
"nix 0.22.1",
"nix 0.23.0",
"num",
"openssh-keys",
"openssl",
@@ -1366,9 +1367,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]]
name = "hyper"
version = "0.14.12"
version = "0.14.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd"
checksum = "15d1cfb9e4f68655fa04c01f59edb405b6074a0f7118ea881e5026e4a1cd8593"
dependencies = [
"bytes 1.1.0",
"futures-channel",
@@ -1807,9 +1808,9 @@ dependencies = [
[[package]]
name = "nix"
version = "0.22.1"
version = "0.23.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7555d6c7164cc913be1ce7f95cbecdabda61eb2ccd89008524af306fb7f5031"
checksum = "f305c2c2e4c39a82f7bf0bf65fb557f9070ce06781d4f2454295cc34b1c43188"
dependencies = [
"bitflags",
"cc",
@@ -2072,14 +2073,15 @@ dependencies = [
"json-patch",
"json-ptr",
"lazy_static",
"log",
"nix 0.22.1",
"nix 0.23.0",
"patch-db-macro",
"serde",
"serde_cbor 0.11.1",
"serde_json",
"thiserror",
"tokio 1.12.0",
"tracing",
"tracing-error",
]
[[package]]
@@ -3677,9 +3679,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6"
[[package]]
name = "tracing"
version = "0.1.26"
version = "0.1.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d"
checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105"
dependencies = [
"cfg-if",
"pin-project-lite 0.2.7",
@@ -3700,9 +3702,9 @@ dependencies = [
[[package]]
name = "tracing-core"
version = "0.1.19"
version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2ca517f43f0fb96e0c3072ed5c275fe5eece87e8cb52f4a77b69226d3b1c9df8"
checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4"
dependencies = [
"lazy_static",
]

View File

@@ -18,7 +18,7 @@ itertools = "0.10.0"
lazy_static = "1.4"
linear-map = { version = "1.2", features = ["serde_impl"] }
log = "0.4.11"
nix = "0.22.0"
nix = "0.23.0"
pest = "2.1"
pest_derive = "2.1"
rand = "0.7"

View File

@@ -1,6 +1,6 @@
FROM alpine:latest
RUN apk update && apk add duplicity && apk add curl
RUN apk update && apk add duplicity curl
ADD ./target/aarch64-unknown-linux-musl/release/compat /usr/local/bin/compat
ENTRYPOINT ["compat"]

View File

@@ -1,74 +1,63 @@
use std::path::Path;
use std::{path::Path, process::Stdio};
pub fn create_backup<P: AsRef<Path>>(
mountpoint: P,
data_path: P,
app_id: &str,
pub fn create_backup(
mountpoint: impl AsRef<Path>,
data_path: impl AsRef<Path>,
) -> Result<(), anyhow::Error> {
let path = std::fs::canonicalize(mountpoint)?;
let volume_path = Path::new(embassy::VOLUMES).join(app_id);
let mountpoint = std::fs::canonicalize(mountpoint)?;
let data_path = std::fs::canonicalize(data_path)?;
let exclude = if volume_path.is_dir() {
let ignore_path = volume_path.join(".backupignore");
if ignore_path.is_file() {
std::fs::read(ignore_path)?
let ignore_path = data_path.join(".backupignore");
let exclude = if ignore_path.is_file() {
std::fs::read_to_string(ignore_path)?
} else {
Vec::new()
}
} else {
return Err(anyhow::anyhow!("Volume For {} Does Not Exist", app_id))
String::new()
};
let mut data_cmd = std::process::Command::new("duplicity");
for exclude in exclude {
for exclude in exclude.lines().map(|s| s.trim()).filter(|s| !s.is_empty()) {
if exclude.to_string().starts_with('!') {
data_cmd.arg(format!(
"--include={}",
volume_path.join(exclude.to_string().trim_start_matches('!')).display()
data_path
.join(exclude.to_string().trim_start_matches('!'))
.display()
));
} else {
data_cmd.arg(format!("--exclude={}", volume_path.join(exclude.to_string()).display()));
data_cmd.arg(format!(
"--exclude={}",
data_path.join(exclude.to_string()).display()
));
}
}
let data_res = data_cmd
.arg(volume_path)
.arg(format!("file://{}", data_path.as_ref().display().to_string()))
.arg(data_path)
.arg(format!("file://{}", mountpoint.display().to_string()))
.output();
data_res?;
Ok(())
}
pub fn restore_backup<P: AsRef<Path>>(
path: P,
data_path: P,
app_id: &str,
pub fn restore_backup(
mountpoint: impl AsRef<Path>,
data_path: impl AsRef<Path>,
) -> Result<(), anyhow::Error> {
let path = std::fs::canonicalize(path)?;
if !path.is_dir() {
anyhow::anyhow!("Backup Path Must Be Directory");
}
let metadata_path = path.join("metadata.yaml");
let volume_path = Path::new(embassy::VOLUMES).join(app_id);
let mountpoint = std::fs::canonicalize(mountpoint)?;
let data_path = std::fs::canonicalize(data_path)?;
let mut data_cmd = std::process::Command::new("duplicity");
data_cmd
let data_output = std::process::Command::new("duplicity")
.arg("--force")
.arg(format!("file://{:#?}", data_path.as_ref().display().to_string()))
.arg(&volume_path);
let data_output = data_cmd.status()?;
if !data_output.success() {
return Err(anyhow::anyhow!("duplicity error for {}", app_id))
.arg(format!("file://{}", mountpoint.display().to_string()))
.arg(&data_path)
.stderr(Stdio::piped())
.output()?;
if !data_output.status.success() {
return Err(anyhow::anyhow!(
"duplicity error: {}",
String::from_utf8(data_output.stderr).unwrap()
));
}
std::fs::copy(
metadata_path,
Path::new(embassy::VOLUMES)
.join(app_id)
.join("start9")
.join("restore.yaml"),
)?;
Ok(())
}

View File

@@ -125,11 +125,6 @@ fn inner_main() -> Result<(), anyhow::Error> {
SubCommand::with_name("duplicity")
.subcommand(
SubCommand::with_name("create")
.arg(
Arg::with_name("package-id")
.help("The `id` field from the manifest file")
.required(true),
)
.arg(
Arg::with_name("mountpoint")
.help("The backups mount point")
@@ -143,11 +138,6 @@ fn inner_main() -> Result<(), anyhow::Error> {
)
.subcommand(
SubCommand::with_name("restore")
.arg(
Arg::with_name("package-id")
.help("The `id` field from the manifest file")
.required(true),
)
.arg(
Arg::with_name("mountpoint")
.help("The backups mount point")
@@ -271,7 +261,6 @@ fn inner_main() -> Result<(), anyhow::Error> {
let res = create_backup(
sub_m.value_of("mountpoint").unwrap(),
sub_m.value_of("datapath").unwrap(),
sub_m.value_of("package-id").unwrap(),
);
match res {
Ok(r) => {
@@ -283,9 +272,8 @@ fn inner_main() -> Result<(), anyhow::Error> {
}
("restore", Some(sub_m)) => {
let res = restore_backup(
sub_m.value_of("package-id").unwrap(),
sub_m.value_of("datapath").unwrap(),
sub_m.value_of("mountpoint").unwrap(),
sub_m.value_of("datapath").unwrap(),
);
match res {
Ok(r) => {