Feature/multi platform (#1866)

* wip

* wip

* wip

* wip

* wip

* wip

* remove debian dir

* lazy env and git hash

* remove env and git hash on clean

* don't leave project dir

* use docker for native builds

* start9 rust

* correctly mount registry

* remove systemd config

* switch to /usr/bin

* disable sound for now

* wip

* change disk list

* multi-arch images

* multi-arch system images

* default aarch64

* edition 2021

* dynamic wifi interface name

* use wifi interface from config

* bugfixes

* add beep based sound

* wip

* wip

* wip

* separate out raspberry pi specific files

* fixes

* use new initramfs always

* switch journald conf to sed script

* fixes

* fix permissions

* talking about kernel modules not scripts

* fix

* fix

* switch to MBR

* install to /usr/lib

* fixes

* fixes

* fixes

* fixes

* add media config to cfg path

* fixes

* fixes

* fixes

* raspi image fixes

* fix test

* fix workflow

* sync boot partition

* gahhhhh
This commit is contained in:
Aiden McClelland
2022-10-19 23:01:23 -06:00
parent 0511680fc5
commit 6ad9a5952e
87 changed files with 1734 additions and 1162 deletions

View File

@@ -8,17 +8,15 @@ use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::util::Version;
use crate::volume::Volumes;
use crate::{
config::{Config, ConfigSpec},
procedure::docker::DockerContainer,
};
use crate::{Error, ResultExt};
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Actions(pub BTreeMap<ActionId, Action>);

View File

@@ -14,17 +14,18 @@ use tokio::io::AsyncWriteExt;
use tracing::instrument;
use self::target::PackageBackupInfo;
use crate::context::RpcContext;
use crate::dependencies::reconfigure_dependents_with_live_pointers;
use crate::id::ImageId;
use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces};
use crate::procedure::docker::DockerContainer;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::IoFormat;
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ErrorKind, ResultExt};
pub mod backup_bulk;

View File

@@ -236,7 +236,7 @@ pub async fn recover_full_embassy(
os_backup.tor_key.public().get_onion_address(),
os_backup.root_ca_cert,
async move {
let rpc_ctx = RpcContext::init(ctx.config_path.as_ref(), disk_guid).await?;
let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid).await?;
let mut db = rpc_ctx.db.handle();
let ids = backup_guard

View File

@@ -139,8 +139,10 @@ pub async fn list(
#[context] ctx: RpcContext,
) -> Result<BTreeMap<BackupTargetId, BackupTarget>, Error> {
let mut sql_handle = ctx.secret_store.acquire().await?;
let (disks_res, cifs) =
tokio::try_join!(crate::disk::util::list(), cifs::list(&mut sql_handle),)?;
let (disks_res, cifs) = tokio::try_join!(
crate::disk::util::list(&ctx.os_partitions),
cifs::list(&mut sql_handle),
)?;
Ok(disks_res
.into_iter()
.flat_map(|mut disk| {

View File

@@ -1,4 +1,4 @@
use std::path::Path;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
@@ -27,8 +27,11 @@ fn status_fn(_: i32) -> StatusCode {
}
#[instrument]
async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() {
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_err()
{
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init();
tokio::fs::write(
@@ -68,7 +71,7 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
.with_kind(embassy::ErrorKind::Network)?;
} else {
let cfg = RpcContextConfig::load(cfg_path).await?;
let guid_string = tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?;
let guid = guid_string.trim();
let requires_reboot = embassy::disk::main::import(
@@ -119,7 +122,7 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
}
#[instrument]
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
@@ -129,10 +132,10 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
embassy::sound::BEP.play().await?;
run_script_if_exists("/embassy-os/preinit.sh").await;
run_script_if_exists("/media/embassy/config/preinit.sh").await;
let res = if let Err(e) = setup_or_init(cfg_path).await {
async {
let res = if let Err(e) = setup_or_init(cfg_path.clone()).await {
async move {
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?;
@@ -156,9 +159,12 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
.await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
@@ -200,7 +206,7 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
Ok(None)
};
run_script_if_exists("/embassy-os/postinit.sh").await;
run_script_if_exists("/media/embassy/config/postinit.sh").await;
res
}
@@ -217,7 +223,7 @@ fn main() {
EmbassyLogger::init();
let cfg_path = matches.value_of("config");
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()

View File

@@ -1,3 +1,4 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
@@ -39,12 +40,12 @@ fn err_to_500(e: Error) -> Response<Body> {
}
#[instrument]
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, shutdown) = {
let rpc_ctx = RpcContext::init(
cfg_path,
Arc::new(
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
@@ -292,7 +293,7 @@ fn main() {
EmbassyLogger::init();
let cfg_path = matches.value_of("config");
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
@@ -300,7 +301,7 @@ fn main() {
.build()
.expect("failed to initialize runtime");
rt.block_on(async {
match inner_main(cfg_path).await {
match inner_main(cfg_path.clone()).await {
Ok(a) => Ok(a),
Err(e) => {
(|| async {
@@ -327,9 +328,12 @@ fn main() {
.await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() {
if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),

View File

@@ -7,14 +7,15 @@ use serde::{Deserialize, Serialize};
use tracing::instrument;
use super::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::dependencies::Dependencies;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::HealthCheckId;
use crate::util::Version;
use crate::volume::Volumes;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ResultExt};
#[derive(Debug, Deserialize, Serialize, HasModel)]

View File

@@ -13,6 +13,7 @@ use rpc_toolkit::command;
use serde_json::Value;
use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
use crate::dependencies::{
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
@@ -20,11 +21,11 @@ use crate::dependencies::{
DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts,
};
use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts};
use crate::procedure::docker::DockerContainer;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::util::display_none;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::Error;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
pub mod action;
pub mod spec;

View File

@@ -22,11 +22,12 @@ use sqlx::PgPool;
use super::util::{self, CharSet, NumRange, UniqueBy, STATIC_NULL};
use super::{Config, MatchError, NoMatchWithPath, TimeoutError, TypeOf};
use crate::config::ConfigurationError;
use crate::context::RpcContext;
use crate::net::interface::InterfaceId;
use crate::procedure::docker::DockerContainer;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::Error;
use crate::{config::ConfigurationError, procedure::docker::DockerContainer};
// Config Value Specifications
#[async_trait]

View File

@@ -16,13 +16,13 @@ use rpc_toolkit::Context;
use serde::Deserialize;
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::fs::File;
use tokio::process::Command;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tracing::instrument;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::disk::OsPartitionInfo;
use crate::hostname::HostNameReceipt;
use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
@@ -35,13 +35,16 @@ use crate::notifications::NotificationManager;
use crate::setup::password_hash;
use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status};
use crate::util::io::from_yaml_async_reader;
use crate::util::{AsyncFileExt, Invoke};
use crate::util::config::load_config_from_paths;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig {
pub wifi_interface: Option<String>,
pub ethernet_interface: String,
pub os_partitions: OsPartitionInfo,
pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>,
pub bind_rpc: Option<SocketAddr>,
@@ -55,19 +58,20 @@ pub struct RpcContextConfig {
pub log_server: Option<Url>,
}
impl RpcContextConfig {
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
let cfg_path = path
.as_ref()
.map(|p| p.as_ref())
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
if let Some(f) = File::maybe_open(cfg_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
{
from_yaml_async_reader(f).await
} else {
Ok(Self::default())
}
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || {
load_config_from_paths(
path.as_ref()
.into_iter()
.map(|p| p.as_ref())
.chain(std::iter::once(Path::new(
"/media/embassy/config/config.yaml",
)))
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
)
})
.await
.unwrap()
}
pub fn datadir(&self) -> &Path {
self.datadir
@@ -116,6 +120,9 @@ impl RpcContextConfig {
pub struct RpcContextSeed {
is_closed: AtomicBool,
pub os_partitions: OsPartitionInfo,
pub wifi_interface: Option<String>,
pub ethernet_interface: String,
pub bind_rpc: SocketAddr,
pub bind_ws: SocketAddr,
pub bind_static: SocketAddr,
@@ -134,7 +141,7 @@ pub struct RpcContextSeed {
pub notification_manager: NotificationManager,
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
pub wifi_manager: Arc<RwLock<WpaCli>>,
pub wifi_manager: Option<Arc<RwLock<WpaCli>>>,
}
pub struct RpcCleanReceipts {
@@ -209,7 +216,7 @@ impl RpcSetNginxReceipts {
pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext {
#[instrument(skip(cfg_path))]
pub async fn init<P: AsRef<Path>>(
pub async fn init<P: AsRef<Path> + Send + 'static>(
cfg_path: Option<P>,
disk_guid: Arc<String>,
) -> Result<Self, Error> {
@@ -248,10 +255,13 @@ impl RpcContext {
tracing::info!("Initialized Notification Manager");
let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false),
datadir: base.datadir().to_path_buf(),
os_partitions: base.os_partitions,
wifi_interface: base.wifi_interface.clone(),
ethernet_interface: base.ethernet_interface,
bind_rpc: base.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
bind_ws: base.bind_ws.unwrap_or(([127, 0, 0, 1], 5960).into()),
bind_static: base.bind_static.unwrap_or(([127, 0, 0, 1], 5961).into()),
datadir: base.datadir().to_path_buf(),
disk_guid,
db,
secret_store,
@@ -266,7 +276,9 @@ impl RpcContext {
notification_manager,
open_authed_websockets: Mutex::new(BTreeMap::new()),
rpc_stream_continuations: Mutex::new(BTreeMap::new()),
wifi_manager: Arc::new(RwLock::new(WpaCli::init("wlan0".to_string()))),
wifi_manager: base
.wifi_interface
.map(|i| Arc::new(RwLock::new(WpaCli::init(i)))),
});
let res = Self(seed);

View File

@@ -11,18 +11,17 @@ use rpc_toolkit::Context;
use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::fs::File;
use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock;
use tracing::instrument;
use url::Host;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader};
use crate::net::tor::os_key;
use crate::setup::{password_hash, RecoveryStatus};
use crate::util::io::from_yaml_async_reader;
use crate::util::AsyncFileExt;
use crate::util::config::load_config_from_paths;
use crate::{Error, ResultExt};
#[derive(Clone, Serialize, Deserialize)]
@@ -36,6 +35,7 @@ pub struct SetupResult {
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct SetupContextConfig {
pub os_partitions: OsPartitionInfo,
pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>,
pub bind_rpc: Option<SocketAddr>,
@@ -43,19 +43,20 @@ pub struct SetupContextConfig {
}
impl SetupContextConfig {
#[instrument(skip(path))]
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
let cfg_path = path
.as_ref()
.map(|p| p.as_ref())
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
if let Some(f) = File::maybe_open(cfg_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
{
from_yaml_async_reader(f).await
} else {
Ok(Self::default())
}
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || {
load_config_from_paths(
path.as_ref()
.into_iter()
.map(|p| p.as_ref())
.chain(std::iter::once(Path::new(
"/media/embassy/config/config.yaml",
)))
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
)
})
.await
.unwrap()
}
pub fn datadir(&self) -> &Path {
self.datadir
@@ -65,6 +66,7 @@ impl SetupContextConfig {
}
pub struct SetupContextSeed {
pub os_partitions: OsPartitionInfo,
pub config_path: Option<PathBuf>,
pub migration_batch_rows: usize,
pub migration_prefetch_rows: usize,
@@ -90,11 +92,12 @@ impl AsRef<Jwk> for SetupContextSeed {
pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext {
#[instrument(skip(path))]
pub async fn init<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> {
let cfg = SetupContextConfig::load(path.as_ref()).await?;
pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let cfg = SetupContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let datadir = cfg.datadir().to_owned();
Ok(Self(Arc::new(SetupContextSeed {
os_partitions: cfg.os_partitions,
config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000),
migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000),

View File

@@ -14,10 +14,12 @@ use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::config::action::{ConfigActions, ConfigRes};
use crate::config::spec::PackagePointerSpec;
use crate::config::{not_found, Config, ConfigReceipts, ConfigSpec};
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependents, InstalledPackageDataEntry};
use crate::procedure::docker::DockerContainer;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
@@ -26,10 +28,6 @@ use crate::util::serde::display_serializable;
use crate::util::{display_none, Version};
use crate::volume::Volumes;
use crate::Error;
use crate::{
config::action::{ConfigActions, ConfigRes},
procedure::docker::DockerContainer,
};
#[command(subcommands(configure))]
pub fn dependency() -> Result<(), Error> {

View File

@@ -58,7 +58,7 @@ pub fn disk() -> Result<(), Error> {
#[command(rename = "forget", display(display_none))]
pub async fn forget_disk() -> Result<(), Error> {
let disk_guid = Path::new("/embassy-os/disk.guid");
let disk_guid = Path::new("/media/embassy/config/disk.guid");
if tokio::fs::metadata(disk_guid).await.is_ok() {
tokio::fs::remove_file(disk_guid).await?;
}

View File

@@ -63,7 +63,7 @@ fn backup_existing_undo_file<'a>(path: &'a Path) -> BoxFuture<'a, Result<(), Err
pub async fn e2fsck_aggressive(
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
let undo_path = Path::new("/embassy-os")
let undo_path = Path::new("/media/embassy/config")
.join(
logicalname
.as_ref()

View File

@@ -1,6 +1,10 @@
use std::path::{Path, PathBuf};
use clap::ArgMatches;
use rpc_toolkit::command;
use serde::Deserialize;
use crate::context::RpcContext;
use crate::disk::util::DiskInfo;
use crate::util::display_none;
use crate::util::serde::{display_serializable, IoFormat};
@@ -12,7 +16,19 @@ pub mod mount;
pub mod util;
pub const BOOT_RW_PATH: &str = "/media/boot-rw";
pub const REPAIR_DISK_PATH: &str = "/embassy-os/repair-disk";
pub const REPAIR_DISK_PATH: &str = "/media/embassy/config/repair-disk";
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct OsPartitionInfo {
pub boot: PathBuf,
pub root: PathBuf,
}
impl OsPartitionInfo {
pub fn contains(&self, logicalname: impl AsRef<Path>) -> bool {
&*self.boot == logicalname.as_ref() || &*self.root == logicalname.as_ref()
}
}
#[command(subcommands(list, repair))]
pub fn disk() -> Result<(), Error> {
@@ -75,11 +91,12 @@ fn display_disk_info(info: Vec<DiskInfo>, matches: &ArgMatches) {
#[command(display(display_disk_info))]
pub async fn list(
#[context] ctx: RpcContext,
#[allow(unused_variables)]
#[arg]
format: Option<IoFormat>,
) -> Result<Vec<DiskInfo>, Error> {
crate::disk::util::list().await
crate::disk::util::list(&ctx.os_partitions).await
}
#[command(display(display_none))]

View File

@@ -0,0 +1,54 @@
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use async_trait::async_trait;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use sha2::Sha256;
use super::{FileSystem, MountType, ReadOnly};
use crate::disk::mount::util::bind;
use crate::{Error, ResultExt};
pub struct Bind<SrcDir: AsRef<Path>> {
src_dir: SrcDir,
}
impl<SrcDir: AsRef<Path>> Bind<SrcDir> {
pub fn new(src_dir: SrcDir) -> Self {
Self { src_dir }
}
}
#[async_trait]
impl<SrcDir: AsRef<Path> + Send + Sync> FileSystem for Bind<SrcDir> {
async fn mount<P: AsRef<Path> + Send + Sync>(
&self,
mountpoint: P,
mount_type: MountType,
) -> Result<(), Error> {
bind(
self.src_dir.as_ref(),
mountpoint,
matches!(mount_type, ReadOnly),
)
.await
}
async fn source_hash(
&self,
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
let mut sha = Sha256::new();
sha.update("Bind");
sha.update(
tokio::fs::canonicalize(self.src_dir.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
self.src_dir.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
Ok(sha.finalize())
}
}

View File

@@ -0,0 +1,52 @@
use std::path::Path;
use async_trait::async_trait;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use super::{FileSystem, MountType};
use crate::util::Invoke;
use crate::Error;
pub async fn mount_httpdirfs(url: &Url, mountpoint: impl AsRef<Path>) -> Result<(), Error> {
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;
let mut cmd = tokio::process::Command::new("httpdirfs");
cmd.arg("--cache")
.arg("--single-file-mode")
.arg(url.as_str())
.arg(mountpoint.as_ref());
cmd.invoke(crate::ErrorKind::Filesystem).await?;
Ok(())
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct HttpDirFS {
url: Url,
}
impl HttpDirFS {
pub fn new(url: Url) -> Self {
HttpDirFS { url }
}
}
#[async_trait]
impl FileSystem for HttpDirFS {
async fn mount<P: AsRef<Path> + Send + Sync>(
&self,
mountpoint: P,
_mount_type: MountType,
) -> Result<(), Error> {
mount_httpdirfs(&self.url, mountpoint).await
}
async fn source_hash(
&self,
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
let mut sha = Sha256::new();
sha.update("HttpDirFS");
sha.update(self.url.as_str());
Ok(sha.finalize())
}
}

View File

@@ -7,9 +7,11 @@ use sha2::Sha256;
use crate::Error;
pub mod bind;
pub mod block_dev;
pub mod cifs;
pub mod ecryptfs;
pub mod httpdirfs;
pub mod label;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]

View File

@@ -11,7 +11,7 @@ use super::util::unmount;
use crate::util::Invoke;
use crate::Error;
pub const TMP_MOUNTPOINT: &'static str = "/media/embassy-os/tmp";
pub const TMP_MOUNTPOINT: &'static str = "/media/embassy/tmp";
#[async_trait::async_trait]
pub trait GenericMountGuard: AsRef<Path> + std::fmt::Debug + Send + Sync + 'static {

View File

@@ -19,6 +19,7 @@ use tracing::instrument;
use super::mount::filesystem::block_dev::BlockDev;
use super::mount::filesystem::ReadOnly;
use super::mount::guard::TmpMountGuard;
use crate::disk::OsPartitionInfo;
use crate::util::io::from_yaml_async_reader;
use crate::util::serde::IoFormat;
use crate::util::{Invoke, Version};
@@ -232,7 +233,11 @@ pub async fn recovery_info(
}
#[instrument]
pub async fn list() -> Result<Vec<DiskInfo>, Error> {
pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
struct DiskIndex {
parts: IndexSet<PathBuf>,
internal: bool,
}
let disk_guids = pvscan().await?;
let disks = tokio_stream::wrappers::ReadDirStream::new(
tokio::fs::read_dir(DISK_PATH)
@@ -245,127 +250,157 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
crate::ErrorKind::Filesystem,
)
})
.try_fold(BTreeMap::new(), |mut disks, dir_entry| async move {
if let Some(disk_path) = dir_entry.path().file_name().and_then(|s| s.to_str()) {
let (disk_path, part_path) = if let Some(end) = PARTITION_REGEX.find(disk_path) {
(
disk_path.strip_suffix(end.as_str()).unwrap_or_default(),
Some(disk_path),
)
} else {
(disk_path, None)
};
let disk_path = Path::new(DISK_PATH).join(disk_path);
let disk = tokio::fs::canonicalize(&disk_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
disk_path.display().to_string(),
)
})?;
if &*disk == Path::new("/dev/mmcblk0") {
return Ok(disks);
}
if !disks.contains_key(&disk) {
disks.insert(disk.clone(), IndexSet::new());
}
if let Some(part_path) = part_path {
let part_path = Path::new(DISK_PATH).join(part_path);
let part = tokio::fs::canonicalize(&part_path).await.with_ctx(|_| {
.try_fold(
BTreeMap::<PathBuf, DiskIndex>::new(),
|mut disks, dir_entry| async move {
if let Some(disk_path) = dir_entry.path().file_name().and_then(|s| s.to_str()) {
let (disk_path, part_path) = if let Some(end) = PARTITION_REGEX.find(disk_path) {
(
disk_path.strip_suffix(end.as_str()).unwrap_or_default(),
Some(disk_path),
)
} else {
(disk_path, None)
};
let disk_path = Path::new(DISK_PATH).join(disk_path);
let disk = tokio::fs::canonicalize(&disk_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
part_path.display().to_string(),
disk_path.display().to_string(),
)
})?;
disks.get_mut(&disk).unwrap().insert(part);
let part = if let Some(part_path) = part_path {
let part_path = Path::new(DISK_PATH).join(part_path);
let part = tokio::fs::canonicalize(&part_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
part_path.display().to_string(),
)
})?;
Some(part)
} else {
None
};
if !disks.contains_key(&disk) {
disks.insert(
disk.clone(),
DiskIndex {
parts: IndexSet::new(),
internal: false,
},
);
}
if let Some(part) = part {
if os.contains(&part) {
disks.get_mut(&disk).unwrap().internal = true;
} else {
disks.get_mut(&disk).unwrap().parts.insert(part);
}
}
}
}
Ok(disks)
})
Ok(disks)
},
)
.await?;
let mut res = Vec::with_capacity(disks.len());
for (disk, parts) in disks {
let mut guid: Option<String> = None;
let mut partitions = Vec::with_capacity(parts.len());
let vendor = get_vendor(&disk)
.await
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let model = get_model(&disk)
.await
.map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let capacity = get_capacity(&disk)
.await
.map_err(|e| {
tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source)
})
.unwrap_or_default();
if let Some(g) = disk_guids.get(&disk) {
guid = g.clone();
} else {
for part in parts {
let mut embassy_os = None;
let label = get_label(&part).await?;
let capacity = get_capacity(&part)
.await
.map_err(|e| {
tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source)
})
.unwrap_or_default();
let mut used = None;
match TmpMountGuard::mount(&BlockDev::new(&part), ReadOnly).await {
Err(e) => tracing::warn!("Could not collect usage information: {}", e.source),
Ok(mount_guard) => {
used = get_used(&mount_guard)
.await
.map_err(|e| {
tracing::warn!(
"Could not get usage of {}: {}",
part.display(),
e.source
)
})
.ok();
if let Some(recovery_info) = match recovery_info(&mount_guard).await {
Ok(a) => a,
Err(e) => {
tracing::error!(
"Error fetching unencrypted backup metadata: {}",
e
);
None
}
} {
embassy_os = Some(recovery_info)
}
mount_guard.unmount().await?;
}
for (disk, index) in disks {
if index.internal {
for part in index.parts {
let mut disk_info = disk_info(disk.clone()).await;
disk_info.logicalname = part;
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
} else {
disk_info.partitions = vec![part_info(disk_info.logicalname.clone()).await];
}
partitions.push(PartitionInfo {
logicalname: part,
label,
capacity,
used,
embassy_os,
});
res.push(disk_info);
}
} else {
let mut disk_info = disk_info(disk).await;
disk_info.partitions = Vec::with_capacity(index.parts.len());
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
} else {
for part in index.parts {
disk_info.partitions.push(part_info(part).await);
}
}
res.push(disk_info);
}
res.push(DiskInfo {
logicalname: disk,
vendor,
model,
partitions,
capacity,
guid,
})
}
Ok(res)
}
async fn disk_info(disk: PathBuf) -> DiskInfo {
let vendor = get_vendor(&disk)
.await
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let model = get_model(&disk)
.await
.map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let capacity = get_capacity(&disk)
.await
.map_err(|e| tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source))
.unwrap_or_default();
DiskInfo {
logicalname: disk,
vendor,
model,
partitions: Vec::new(),
capacity,
guid: None,
}
}
async fn part_info(part: PathBuf) -> PartitionInfo {
let mut embassy_os = None;
let label = get_label(&part)
.await
.map_err(|e| tracing::warn!("Could not get label of {}: {}", part.display(), e.source))
.unwrap_or_default();
let capacity = get_capacity(&part)
.await
.map_err(|e| tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source))
.unwrap_or_default();
let mut used = None;
match TmpMountGuard::mount(&BlockDev::new(&part), ReadOnly).await {
Err(e) => tracing::warn!("Could not collect usage information: {}", e.source),
Ok(mount_guard) => {
used = get_used(&mount_guard)
.await
.map_err(|e| {
tracing::warn!("Could not get usage of {}: {}", part.display(), e.source)
})
.ok();
if let Some(recovery_info) = match recovery_info(&mount_guard).await {
Ok(a) => a,
Err(e) => {
tracing::error!("Error fetching unencrypted backup metadata: {}", e);
None
}
} {
embassy_os = Some(recovery_info)
}
if let Err(e) = mount_guard.unmount().await {
tracing::error!("Error unmounting partition {}: {}", part.display(), e);
}
}
}
PartitionInfo {
logicalname: part,
label,
capacity,
used,
embassy_os,
}
}
fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<String>> {
fn parse_line(line: &str) -> IResult<&str, (&str, Option<&str>)> {
let pv_parse = preceded(

View File

@@ -70,7 +70,7 @@ pub enum ErrorKind {
TLSInit = 61,
HttpRange = 62,
ContentLength = 63,
BytesError = 64
BytesError = 64,
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
@@ -139,7 +139,7 @@ impl ErrorKind {
TLSInit => "TLS Backend Initialize Error",
HttpRange => "No Support for Web Server HTTP Ranges",
ContentLength => "Request has no content length header",
BytesError => "Could not get the bytes for this request"
BytesError => "Could not get the bytes for this request",
}
}
}
@@ -244,8 +244,6 @@ impl From<openssl::error::ErrorStack> for Error {
fn from(e: openssl::error::ErrorStack) -> Self {
Error::new(eyre!("OpenSSL ERROR:\n{}", e), ErrorKind::OpenSsl)
}
}
impl From<Error> for RpcError {
fn from(e: Error) -> Self {

View File

@@ -17,8 +17,8 @@ use crate::util::Invoke;
use crate::version::VersionT;
use crate::Error;
pub const SYSTEM_REBUILD_PATH: &str = "/embassy-os/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/embassy-os/standby";
pub const SYSTEM_REBUILD_PATH: &str = "/media/embassy/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/media/embassy/config/standby";
pub async fn check_time_is_synchronized() -> Result<bool, Error> {
Ok(String::from_utf8(
@@ -309,7 +309,7 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tracing::info!("Created Docker Network");
tracing::info!("Loading System Docker Images");
crate::install::load_images("/var/lib/embassy/system-images").await?;
crate::install::load_images("/usr/lib/embassy/system-images").await?;
tracing::info!("Loaded System Docker Images");
tracing::info!("Loading Package Docker Images");
@@ -332,12 +332,15 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
tracing::info!("Synced SSH Keys");
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
&receipts.last_wifi_region.get(&mut handle).await?,
)
.await?;
tracing::info!("Synchronized wpa_supplicant.conf");
if let Some(wifi_interface) = &cfg.wifi_interface {
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
wifi_interface,
&receipts.last_wifi_region.get(&mut handle).await?,
)
.await?;
tracing::info!("Synchronized WiFi");
}
receipts
.status_info
.set(

View File

@@ -1,8 +1,6 @@
use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::Arc,
};
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use bollard::image::ListImagesOptions;
use color_eyre::Report;

View File

@@ -896,7 +896,7 @@ impl InstallS9Receipts {
}
#[instrument(skip(ctx, rdr))]
pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
ctx: &RpcContext,
pkg_id: &PackageId,
version: &Version,

View File

@@ -113,6 +113,7 @@ impl InstallProgress {
}
#[pin_project::pin_project]
#[derive(Debug)]
pub struct InstallProgressTracker<RW> {
#[pin]
inner: RW,

View File

@@ -22,17 +22,17 @@ use tokio::task::JoinHandle;
use torut::onion::TorSecretKeyV3;
use tracing::instrument;
use crate::context::RpcContext;
use crate::manager::sync::synchronizer;
use crate::net::interface::InterfaceId;
use crate::net::GeneratedCertificateMountPoint;
use crate::notifications::NotificationLevel;
use crate::procedure::docker::DockerProcedure;
use crate::procedure::docker::{DockerContainer, DockerInject, DockerProcedure};
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::MainStatus;
use crate::util::{Container, NonDetachingJoinHandle, Version};
use crate::Error;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{manager::sync::synchronizer, procedure::docker::DockerInject};
pub mod health;
mod sync;

View File

@@ -59,12 +59,10 @@ pub fn db<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
.append("X-Patch-Updates", HeaderValue::from_str(&a)?),
Err(e) => res.headers.append(
"X-Patch-Error",
HeaderValue::from_str(
&base64::encode_config(
&e.to_string(),
base64::URL_SAFE,
),
)?,
HeaderValue::from_str(&base64::encode_config(
&e.to_string(),
base64::URL_SAFE,
))?,
),
};
}

View File

@@ -8,12 +8,13 @@ use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::Volumes;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ResultExt};
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]

View File

@@ -18,6 +18,19 @@ use crate::util::serde::{display_serializable, IoFormat};
use crate::util::{display_none, Invoke};
use crate::{Error, ErrorKind};
type WifiManager = Arc<RwLock<WpaCli>>;
pub fn wifi_manager(ctx: &RpcContext) -> Result<&WifiManager, Error> {
if let Some(wifi_manager) = ctx.wifi_manager.as_ref() {
Ok(wifi_manager)
} else {
Err(Error::new(
color_eyre::eyre::eyre!("No WiFi interface available"),
ErrorKind::Wifi,
))
}
}
#[command(subcommands(add, connect, delete, get, country, available))]
pub async fn wifi() -> Result<(), Error> {
Ok(())
@@ -42,6 +55,7 @@ pub async fn add(
#[arg] priority: isize,
#[arg] connect: bool,
) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() {
return Err(Error::new(
color_eyre::eyre::eyre!("SSID may not have special characters"),
@@ -56,7 +70,7 @@ pub async fn add(
}
async fn add_procedure(
db: impl DbHandle,
wifi_manager: Arc<RwLock<WpaCli>>,
wifi_manager: WifiManager,
ssid: &Ssid,
password: &Psk,
priority: isize,
@@ -71,7 +85,7 @@ pub async fn add(
}
if let Err(err) = add_procedure(
&mut ctx.db.handle(),
ctx.wifi_manager.clone(),
wifi_manager.clone(),
&Ssid(ssid.clone()),
&Psk(password.clone()),
priority,
@@ -91,6 +105,7 @@ pub async fn add(
#[command(display(display_none))]
#[instrument(skip(ctx))]
pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() {
return Err(Error::new(
color_eyre::eyre::eyre!("SSID may not have special characters"),
@@ -99,7 +114,7 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
}
async fn connect_procedure(
mut db: impl DbHandle,
wifi_manager: Arc<RwLock<WpaCli>>,
wifi_manager: WifiManager,
ssid: &Ssid,
) -> Result<(), Error> {
let wpa_supplicant = wifi_manager.read().await;
@@ -125,7 +140,7 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
if let Err(err) = connect_procedure(
&mut ctx.db.handle(),
ctx.wifi_manager.clone(),
wifi_manager.clone(),
&Ssid(ssid.clone()),
)
.await
@@ -142,20 +157,21 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
#[command(display(display_none))]
#[instrument(skip(ctx))]
pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() {
return Err(Error::new(
color_eyre::eyre::eyre!("SSID may not have special characters"),
ErrorKind::Wifi,
));
}
let wpa_supplicant = ctx.wifi_manager.read().await;
let wpa_supplicant = wifi_manager.read().await;
let current = wpa_supplicant.get_current_network().await?;
drop(wpa_supplicant);
let mut wpa_supplicant = ctx.wifi_manager.write().await;
let mut wpa_supplicant = wifi_manager.write().await;
let ssid = Ssid(ssid);
let is_current_being_removed = matches!(current, Some(current) if current == ssid);
let is_current_removed_and_no_hardwire =
is_current_being_removed && !interface_connected("eth0").await?;
is_current_being_removed && !interface_connected(&ctx.ethernet_interface).await?;
if is_current_removed_and_no_hardwire {
return Err(Error::new(color_eyre::eyre::eyre!("Forbidden: Deleting this Network would make your Embassy Unreachable. Either connect to ethernet or connect to a different WiFi network to remedy this."), ErrorKind::Wifi));
}
@@ -284,12 +300,13 @@ pub async fn get(
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<WiFiInfo, Error> {
let wpa_supplicant = ctx.wifi_manager.read().await;
let wifi_manager = wifi_manager(&ctx)?;
let wpa_supplicant = wifi_manager.read().await;
let (list_networks, current_res, country_res, ethernet_res, signal_strengths) = tokio::join!(
wpa_supplicant.list_networks_low(),
wpa_supplicant.get_current_network(),
wpa_supplicant.get_country_low(),
interface_connected("eth0"), // TODO: pull from config
interface_connected(&ctx.ethernet_interface),
wpa_supplicant.list_wifi_low()
);
let signal_strengths = signal_strengths?;
@@ -337,7 +354,8 @@ pub async fn get_available(
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<Vec<WifiListOut>, Error> {
let wpa_supplicant = ctx.wifi_manager.read().await;
let wifi_manager = wifi_manager(&ctx)?;
let wpa_supplicant = wifi_manager.read().await;
let (wifi_list, network_list) = tokio::join!(
wpa_supplicant.list_wifi_low(),
wpa_supplicant.list_networks_low()
@@ -365,13 +383,14 @@ pub async fn set_country(
#[context] ctx: RpcContext,
#[arg(parse(country_code_parse))] country: CountryCode,
) -> Result<(), Error> {
if !interface_connected("eth0").await? {
let wifi_manager = wifi_manager(&ctx)?;
if !interface_connected(&ctx.ethernet_interface).await? {
return Err(Error::new(
color_eyre::eyre::eyre!("Won't change country without hardwire connection"),
crate::ErrorKind::Wifi,
));
}
let mut wpa_supplicant = ctx.wifi_manager.write().await;
let mut wpa_supplicant = wifi_manager.write().await;
wpa_supplicant.set_country_low(country.alpha2()).await?;
for (network_id, _wifi_info) in wpa_supplicant.list_networks_low().await? {
wpa_supplicant.remove_network_low(network_id).await?;
@@ -776,6 +795,7 @@ pub fn country_code_parse(code: &str, _matches: &ArgMatches) -> Result<CountryCo
#[instrument(skip(main_datadir))]
pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
main_datadir: P,
wifi_iface: &str,
last_country_code: &Option<CountryCode>,
) -> Result<(), Error> {
let persistent = main_datadir.as_ref().join("system-connections");
@@ -797,7 +817,7 @@ pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
.invoke(ErrorKind::Wifi)
.await?;
Command::new("ifconfig")
.arg("wlan0")
.arg(wifi_iface)
.arg("up")
.invoke(ErrorKind::Wifi)
.await?;

View File

@@ -0,0 +1,95 @@
use std::borrow::Cow;
use std::collections::BTreeSet;
use std::io::SeekFrom;
use std::path::Path;
use color_eyre::eyre::eyre;
use futures::{FutureExt, TryStreamExt};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt};
use tokio_tar::{Archive, Entry};
use crate::util::io::from_cbor_async_reader;
use crate::{Error, ErrorKind, ARCH};
#[derive(Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DockerMultiArch {
pub default: String,
pub available: BTreeSet<String>,
}
#[pin_project::pin_project(project = DockerReaderProject)]
#[derive(Debug)]
pub enum DockerReader<R: AsyncRead + Unpin> {
SingleArch(#[pin] R),
MultiArch(#[pin] Entry<Archive<R>>),
}
impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> DockerReader<R> {
pub async fn new(mut rdr: R) -> Result<Self, Error> {
let arch = if let Some(multiarch) = tokio_tar::Archive::new(&mut rdr)
.entries()?
.try_filter_map(|e| {
async move {
Ok(if &*e.path()? == Path::new("multiarch.cbor") {
Some(e)
} else {
None
})
}
.boxed()
})
.try_next()
.await?
{
let multiarch: DockerMultiArch = from_cbor_async_reader(multiarch).await?;
Some(if multiarch.available.contains(&**ARCH) {
Cow::Borrowed(&**ARCH)
} else {
Cow::Owned(multiarch.default)
})
} else {
None
};
rdr.seek(SeekFrom::Start(0)).await?;
if let Some(arch) = arch {
if let Some(image) = tokio_tar::Archive::new(rdr)
.entries()?
.try_filter_map(|e| {
let arch = arch.clone();
async move {
Ok(if &*e.path()? == Path::new(&format!("{}.tar", arch)) {
Some(e)
} else {
None
})
}
.boxed()
})
.try_next()
.await?
{
Ok(Self::MultiArch(image))
} else {
Err(Error::new(
eyre!("Docker image section does not contain tarball for architecture"),
ErrorKind::ParseS9pk,
))
}
} else {
Ok(Self::SingleArch(rdr))
}
}
}
impl<R: AsyncRead + Unpin + Send + Sync> AsyncRead for DockerReader<R> {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
match self.project() {
DockerReaderProject::SingleArch(r) => r.poll_read(cx, buf),
DockerReaderProject::MultiArch(r) => r.poll_read(cx, buf),
}
}
}

View File

@@ -6,18 +6,19 @@ use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use url::Url;
use crate::action::Actions;
use crate::backup::BackupActions;
use crate::config::action::ConfigActions;
use crate::dependencies::Dependencies;
use crate::migration::Migrations;
use crate::net::interface::Interfaces;
use crate::procedure::docker::DockerContainer;
use crate::procedure::PackageProcedure;
use crate::status::health_check::HealthChecks;
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::volume::Volumes;
use crate::Error;
use crate::{action::Actions, procedure::docker::DockerContainer};
fn current_version() -> Version {
Current::new().semver().into()
@@ -143,7 +144,7 @@ impl Assets {
self.docker_images
.as_ref()
.map(|a| a.as_path())
.unwrap_or(Path::new("image.tar"))
.unwrap_or(Path::new("docker-images"))
}
pub fn assets_path(&self) -> &Path {
self.assets

View File

@@ -1,22 +1,27 @@
use std::ffi::OsStr;
use std::path::PathBuf;
use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use imbl::OrdMap;
use patch_db::{LockReceipt, LockType};
use rpc_toolkit::command;
use serde_json::Value;
use tokio::io::AsyncRead;
use tracing::instrument;
use crate::context::SdkContext;
use crate::s9pk::builder::S9pkPacker;
use crate::s9pk::docker::DockerMultiArch;
use crate::s9pk::manifest::Manifest;
use crate::s9pk::reader::S9pkReader;
use crate::util::display_none;
use crate::util::io::BufferedWriteReader;
use crate::util::serde::IoFormat;
use crate::volume::Volume;
use crate::{context::SdkContext, procedure::docker::DockerContainer};
use crate::{Error, ErrorKind, ResultExt};
pub mod builder;
pub mod docker;
pub mod header;
pub mod manifest;
pub mod reader;
@@ -94,33 +99,73 @@ pub async fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> R
)
})?,
)
.docker_images(
File::open(path.join(manifest.assets.docker_images_path()))
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
manifest.assets.docker_images_path().display().to_string(),
)
})?,
)
.docker_images({
let docker_images_path = path.join(manifest.assets.docker_images_path());
let res: Box<dyn AsyncRead + Unpin + Send + Sync> = if tokio::fs::metadata(&docker_images_path).await?.is_dir() {
let tars: Vec<_> = tokio_stream::wrappers::ReadDirStream::new(tokio::fs::read_dir(&docker_images_path).await?).try_collect().await?;
let mut arch_info = DockerMultiArch::default();
for tar in &tars {
if tar.path().extension() == Some(OsStr::new("tar")) {
arch_info.available.insert(tar.path().file_stem().unwrap_or_default().to_str().unwrap_or_default().to_owned());
}
}
if arch_info.available.contains("aarch64") {
arch_info.default = "aarch64".to_owned();
} else {
arch_info.default = arch_info.available.iter().next().cloned().unwrap_or_default();
}
let arch_info_cbor = IoFormat::Cbor.to_vec(&arch_info)?;
Box::new(BufferedWriteReader::new(|w| async move {
let mut docker_images = tokio_tar::Builder::new(w);
let mut multiarch_header = tokio_tar::Header::new_gnu();
multiarch_header.set_path("multiarch.cbor")?;
multiarch_header.set_size(arch_info_cbor.len() as u64);
multiarch_header.set_cksum();
docker_images.append(&multiarch_header, std::io::Cursor::new(arch_info_cbor)).await?;
for tar in tars
{
docker_images
.append_path_with_name(
tar.path(),
tar.file_name(),
)
.await?;
}
Ok::<_, std::io::Error>(())
}, 1024 * 1024))
} else {
Box::new(File::open(docker_images_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
manifest.assets.docker_images_path().display().to_string(),
)
})?)
};
res
})
.assets({
let mut assets = tokio_tar::Builder::new(Vec::new()); // TODO: Ideally stream this? best not to buffer in memory
let asset_volumes = manifest
.volumes
.iter()
.filter(|(_, v)| matches!(v, &&Volume::Assets {})).map(|(id, _)| id.clone()).collect::<Vec<_>>();
let assets_path = manifest.assets.assets_path().to_owned();
let path = path.clone();
for (asset_volume, _) in manifest
.volumes
.iter()
.filter(|(_, v)| matches!(v, &&Volume::Assets {}))
{
assets
.append_dir_all(
asset_volume,
path.join(manifest.assets.assets_path()).join(asset_volume),
)
.await?;
}
std::io::Cursor::new(assets.into_inner().await?)
BufferedWriteReader::new(|w| async move {
let mut assets = tokio_tar::Builder::new(w);
for asset_volume in asset_volumes
{
assets
.append_dir_all(
&asset_volume,
path.join(&assets_path).join(&asset_volume),
)
.await?;
}
Ok::<_, std::io::Error>(())
}, 1024 * 1024)
})
.scripts({
let script_path = path.join(manifest.assets.scripts_path()).join("embassy.js");

View File

@@ -1,5 +1,6 @@
use std::collections::BTreeSet;
use std::io::SeekFrom;
use std::ops::Range;
use std::path::Path;
use std::pin::Pin;
use std::str::FromStr;
@@ -11,44 +12,74 @@ use ed25519_dalek::PublicKey;
use futures::TryStreamExt;
use sha2_old::{Digest, Sha512};
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf, Take};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf};
use tracing::instrument;
use super::header::{FileSection, Header, TableOfContents};
use super::manifest::{Manifest, PackageId};
use super::SIG_CONTEXT;
use crate::id::ImageId;
use crate::install::progress::InstallProgressTracker;
use crate::s9pk::docker::DockerReader;
use crate::util::Version;
use crate::{id::ImageId, procedure::docker::DockerContainer};
use crate::{Error, ResultExt};
#[pin_project::pin_project]
pub struct ReadHandle<'a, R: AsyncRead + AsyncSeek + Unpin = File> {
#[derive(Debug)]
pub struct ReadHandle<'a, R = File> {
pos: &'a mut u64,
range: Range<u64>,
#[pin]
rdr: Take<&'a mut R>,
rdr: &'a mut R,
}
impl<'a, R: AsyncRead + AsyncSeek + Unpin> ReadHandle<'a, R> {
impl<'a, R: AsyncRead + Unpin> ReadHandle<'a, R> {
pub async fn to_vec(mut self) -> std::io::Result<Vec<u8>> {
let mut buf = vec![0; self.rdr.limit() as usize];
let mut buf = vec![0; (self.range.end - self.range.start) as usize];
self.read_exact(&mut buf).await?;
Ok(buf)
}
}
impl<'a, R: AsyncRead + AsyncSeek + Unpin> AsyncRead for ReadHandle<'a, R> {
impl<'a, R: AsyncRead + Unpin> AsyncRead for ReadHandle<'a, R> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let start = buf.filled().len();
let this = self.project();
let pos = this.pos;
let res = AsyncRead::poll_read(this.rdr, cx, buf);
**pos += (buf.filled().len() - start) as u64;
let start = buf.filled().len();
let mut take_buf = buf.take(this.range.end.saturating_sub(**this.pos) as usize);
let res = AsyncRead::poll_read(this.rdr, cx, &mut take_buf);
let n = take_buf.filled().len();
unsafe { buf.assume_init(start + n) };
buf.advance(n);
**this.pos += n as u64;
res
}
}
impl<'a, R: AsyncSeek + Unpin> AsyncSeek for ReadHandle<'a, R> {
fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> std::io::Result<()> {
let this = self.project();
AsyncSeek::start_seek(
this.rdr,
match position {
SeekFrom::Current(n) => SeekFrom::Current(n),
SeekFrom::End(n) => SeekFrom::Start((this.range.end as i64 + n) as u64),
SeekFrom::Start(n) => SeekFrom::Start(this.range.start + n),
},
)
}
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<u64>> {
let this = self.project();
match AsyncSeek::poll_complete(this.rdr, cx) {
Poll::Ready(Ok(n)) => {
let res = n.saturating_sub(this.range.start);
**this.pos = this.range.start + res;
Poll::Ready(Ok(res))
}
a => a,
}
}
}
#[derive(Debug)]
pub struct ImageTag {
@@ -110,7 +141,7 @@ impl FromStr for ImageTag {
}
}
pub struct S9pkReader<R: AsyncRead + AsyncSeek + Unpin = File> {
pub struct S9pkReader<R: AsyncRead + AsyncSeek + Unpin + Send + Sync = File> {
hash: Option<Output<Sha512>>,
hash_string: Option<String>,
developer_key: PublicKey,
@@ -128,12 +159,12 @@ impl S9pkReader {
Self::from_reader(rdr, check_sig).await
}
}
impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<InstallProgressTracker<R>> {
impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<InstallProgressTracker<R>> {
pub fn validated(&mut self) {
self.rdr.validated()
}
}
impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> {
impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
#[instrument(skip(self))]
pub async fn validate(&mut self) -> Result<(), Error> {
if self.toc.icon.length > 102_400 {
@@ -309,8 +340,9 @@ impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> {
self.pos = section.position;
}
Ok(ReadHandle {
range: self.pos..(self.pos + section.length),
pos: &mut self.pos,
rdr: (&mut self.rdr).take(section.length),
rdr: &mut self.rdr,
})
}
@@ -336,8 +368,8 @@ impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> {
Ok(self.read_handle(self.toc.icon).await?)
}
pub async fn docker_images<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {
Ok(self.read_handle(self.toc.docker_images).await?)
pub async fn docker_images<'a>(&'a mut self) -> Result<DockerReader<ReadHandle<'a, R>>, Error> {
DockerReader::new(self.read_handle(self.toc.docker_images).await?).await
}
pub async fn assets<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {

View File

@@ -88,8 +88,8 @@ pub fn disk() -> Result<(), Error> {
}
#[command(rename = "list", rpc_only, metadata(authenticated = false))]
pub async fn list_disks() -> Result<Vec<DiskInfo>, Error> {
crate::disk::list(None).await
pub async fn list_disks(#[context] ctx: SetupContext) -> Result<Vec<DiskInfo>, Error> {
crate::disk::util::list(&ctx.os_partitions).await
}
#[command(rpc_only)]
@@ -135,7 +135,7 @@ pub async fn attach(
ErrorKind::DiskManagement,
));
}
init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?).await?;
init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?;
let secrets = ctx.secret_store().await?;
let db = ctx.db(&secrets).await?;
let mut secrets_handle = secrets.acquire().await?;
@@ -331,7 +331,7 @@ pub async fn complete(#[context] ctx: SetupContext) -> Result<SetupResult, Error
si.lan_address()
.put(&mut db, &hostname.lan_address().parse().unwrap())
.await?;
let mut guid_file = File::create("/embassy-os/disk.guid").await?;
let mut guid_file = File::create("/media/embassy/config/disk.guid").await?;
guid_file.write_all(guid.as_bytes()).await?;
guid_file.sync_all().await?;
ctx.shutdown.send(()).expect("failed to shutdown");
@@ -378,7 +378,7 @@ pub async fn execute_inner(
recovery_password,
)
.await?;
let db = init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?)
let db = init(&RpcContextConfig::load(ctx.config_path.clone()).await?)
.await?
.db;
let hostname = {
@@ -416,7 +416,7 @@ pub async fn execute_inner(
res
} else {
let (tor_addr, root_ca) = fresh_setup(&ctx, &embassy_password).await?;
let db = init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?)
let db = init(&RpcContextConfig::load(ctx.config_path.clone()).await?)
.await?
.db;
let mut handle = db.handle();

View File

@@ -4,9 +4,10 @@ use std::time::{Duration, Instant};
use divrem::DivRem;
use proptest_derive::Arbitrary;
use tokio::process::Command;
use tracing::instrument;
use crate::util::FileLock;
use crate::util::{FileLock, Invoke};
use crate::{Error, ErrorKind, ResultExt};
lazy_static::lazy_static! {
@@ -22,31 +23,49 @@ lazy_static::lazy_static! {
pub const SOUND_LOCK_FILE: &'static str = "/etc/embassy/sound.lock";
struct SoundInterface(Option<FileLock>);
struct SoundInterface {
use_beep: bool,
guard: Option<FileLock>,
}
impl SoundInterface {
#[instrument]
pub async fn lease() -> Result<Self, Error> {
let guard = FileLock::new(SOUND_LOCK_FILE, true).await?;
tokio::fs::write(&*EXPORT_FILE, "0")
if Command::new("which")
.arg("beep")
.invoke(ErrorKind::NotFound)
.await
.or_else(|e| {
if e.raw_os_error() == Some(16) {
Ok(())
} else {
Err(e)
}
})
.with_ctx(|_| (ErrorKind::SoundError, EXPORT_FILE.to_string_lossy()))?;
let instant = Instant::now();
while tokio::fs::metadata(&*PERIOD_FILE).await.is_err()
&& instant.elapsed() < Duration::from_secs(1)
.is_ok()
{
tokio::time::sleep(Duration::from_millis(1)).await;
Ok(SoundInterface {
use_beep: true,
guard: Some(guard),
})
} else {
tokio::fs::write(&*EXPORT_FILE, "0")
.await
.or_else(|e| {
if e.raw_os_error() == Some(16) {
Ok(())
} else {
Err(e)
}
})
.with_ctx(|_| (ErrorKind::SoundError, EXPORT_FILE.to_string_lossy()))?;
let instant = Instant::now();
while tokio::fs::metadata(&*PERIOD_FILE).await.is_err()
&& instant.elapsed() < Duration::from_secs(1)
{
tokio::time::sleep(Duration::from_millis(1)).await;
}
Ok(SoundInterface {
use_beep: false,
guard: Some(guard),
})
}
Ok(SoundInterface(Some(guard)))
}
#[instrument(skip(self))]
pub async fn play(&mut self, note: &Note) -> Result<(), Error> {
async fn play_pwm(&mut self, note: &Note) -> Result<(), Error> {
let curr_period = tokio::fs::read_to_string(&*PERIOD_FILE)
.await
.with_ctx(|_| (ErrorKind::SoundError, PERIOD_FILE.to_string_lossy()))?;
@@ -71,41 +90,50 @@ impl SoundInterface {
Ok(())
}
#[instrument(skip(self))]
pub async fn play_for_time_slice(
&mut self,
tempo_qpm: u16,
note: &Note,
time_slice: &TimeSlice,
) -> Result<(), Error> {
if let Err(e) = async {
self.play(note).await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) * 19 / 20).await;
self.stop().await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) / 20).await;
Ok::<_, Error>(())
}
.await
{
// we could catch this error and propagate but I'd much prefer the original error bubble up
let _mute = self.stop().await;
Err(e)
} else {
Ok(())
}
}
#[instrument(skip(self))]
pub async fn stop(&mut self) -> Result<(), Error> {
async fn stop_pwm(&mut self) -> Result<(), Error> {
tokio::fs::write(&*SWITCH_FILE, "0")
.await
.with_ctx(|_| (ErrorKind::SoundError, SWITCH_FILE.to_string_lossy()))
}
#[instrument(skip(self))]
pub async fn close(mut self) -> Result<(), Error> {
if let Some(lock) = self.0.take() {
if let Some(lock) = self.guard.take() {
lock.unlock().await?;
}
Ok(())
}
#[instrument(skip(self))]
pub async fn play_for_time_slice(
&mut self,
tempo_qpm: u16,
note: &Note,
time_slice: &TimeSlice,
) -> Result<(), Error> {
if self.use_beep {
Command::new("beep")
.arg("-f")
.arg(note.frequency().to_string())
.arg("-l")
.arg(time_slice.to_duration(tempo_qpm).as_millis().to_string())
.invoke(ErrorKind::SoundError)
.await?;
} else {
if let Err(e) = async {
self.play_pwm(note).await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) * 19 / 20).await;
self.stop_pwm().await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) / 20).await;
Ok::<_, Error>(())
}
.await
{
// we could catch this error and propagate but I'd much prefer the original error bubble up
let _mute = self.stop_pwm().await;
return Err(e);
}
}
Ok(())
}
}
pub struct Song<Notes> {
@@ -139,7 +167,7 @@ where
impl Drop for SoundInterface {
fn drop(&mut self) {
let guard = self.0.take();
let guard = self.guard.take();
tokio::spawn(async move {
if let Err(e) = tokio::fs::write(&*UNEXPORT_FILE, "0").await {
tracing::error!("Failed to Unexport Sound Interface: {}", e);

View File

@@ -5,13 +5,14 @@ pub use models::HealthCheckId;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::Duration;
use crate::util::Version;
use crate::volume::Volumes;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ResultExt};
#[derive(Clone, Debug, Deserialize, Serialize)]

View File

@@ -1,38 +1,31 @@
use std::future::Future;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use clap::ArgMatches;
use color_eyre::eyre::{eyre, Result};
use digest::Digest;
use emver::Version;
use futures::Stream;
use lazy_static::lazy_static;
use patch_db::{DbHandle, LockType, Revision};
use regex::Regex;
use reqwest::Url;
use rpc_toolkit::command;
use sha2::Sha256;
use tokio::io::AsyncWriteExt;
use tokio::pin;
use tokio::process::Command;
use tokio::time::Instant;
use tokio_stream::StreamExt;
use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::UpdateProgress;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadWrite};
use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::BOOT_RW_PATH;
use crate::disk::mount::filesystem::httpdirfs::HttpDirFS;
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{MountGuard, TmpMountGuard};
use crate::notifications::NotificationLevel;
use crate::sound::{
CIRCLE_OF_5THS_SHORT, UPDATE_FAILED_1, UPDATE_FAILED_2, UPDATE_FAILED_3, UPDATE_FAILED_4,
};
use crate::update::latest_information::LatestInformation;
use crate::util::rsync::{Rsync, RsyncOptions};
use crate::util::Invoke;
use crate::version::{Current, VersionT};
use crate::{Error, ErrorKind, ResultExt};
@@ -84,48 +77,6 @@ fn display_update_result(status: UpdateResult, _: &ArgMatches) {
}
}
const HEADER_KEY: &str = "x-eos-hash";
#[derive(Debug, Clone, Copy)]
pub enum WritableDrives {
Green,
Blue,
}
impl WritableDrives {
pub fn label(&self) -> &'static str {
match self {
Self::Green => "green",
Self::Blue => "blue",
}
}
pub fn block_dev(&self) -> &'static Path {
Path::new(match self {
Self::Green => "/dev/mmcblk0p3",
Self::Blue => "/dev/mmcblk0p4",
})
}
pub fn part_uuid(&self) -> &'static str {
match self {
Self::Green => "cb15ae4d-03",
Self::Blue => "cb15ae4d-04",
}
}
pub fn as_fs(&self) -> impl FileSystem {
BlockDev::new(self.block_dev())
}
}
/// This will be where we are going to be putting the new update
#[derive(Debug, Clone, Copy)]
pub struct NewLabel(pub WritableDrives);
/// This is our current label where the os is running
pub struct CurrentLabel(pub WritableDrives);
lazy_static! {
static ref PARSE_COLOR: Regex = Regex::new("LABEL=(\\w+)[ \t]+/").unwrap();
}
#[instrument(skip(ctx))]
async fn maybe_do_update(
ctx: RpcContext,
@@ -172,26 +123,41 @@ async fn maybe_do_update(
return Ok(None);
}
let (new_label, _current_label) = query_mounted_label().await?;
let (size, download) = download_file(
ctx.db.handle(),
&EosUrl {
base: marketplace_url,
version: latest_version.clone(),
},
new_label,
// mount httpdirfs
// losetup remote fs
// BEGIN TASK
// rsync fs
// validate (hash) fs
// kernel update?
// swap selected fs
let new_block_dev = TmpMountGuard::mount(
&HttpDirFS::new(
EosUrl {
base: marketplace_url,
version: latest_version,
}
.to_string()
.parse()?,
),
ReadOnly,
)
.await?;
let new_fs = TmpMountGuard::mount(
&BlockDev::new(new_block_dev.as_ref().join("eos.img")),
ReadOnly,
)
.await?;
status.update_progress = Some(UpdateProgress {
size,
size: Some(100),
downloaded: 0,
});
status.save(&mut tx).await?;
let rev = tx.commit().await?;
tokio::spawn(async move {
let res = do_update(ctx.clone(), new_fs, new_block_dev).await;
let mut db = ctx.db.handle();
let res = do_update(download, new_label).await;
let mut status = crate::db::DatabaseModel::new()
.server_info()
.status_info()
@@ -245,49 +211,44 @@ async fn maybe_do_update(
Ok(rev)
}
#[instrument(skip(download))]
#[instrument(skip(ctx, new_fs, new_block_dev))]
async fn do_update(
download: impl Future<Output = Result<(), Error>>,
new_label: NewLabel,
ctx: RpcContext,
new_fs: TmpMountGuard,
new_block_dev: TmpMountGuard,
) -> Result<(), Error> {
download.await?;
copy_machine_id(new_label).await?;
copy_ssh_host_keys(new_label).await?;
swap_boot_label(new_label).await?;
let mut rsync = Rsync::new(
new_fs.as_ref().join(""),
"/media/embassy/next",
Default::default(),
)?;
while let Some(progress) = rsync.progress.next().await {
crate::db::DatabaseModel::new()
.server_info()
.status_info()
.update_progress()
.put(
&mut ctx.db.handle(),
&UpdateProgress {
size: Some(100),
downloaded: (100.0 * progress) as u64,
},
)
.await?;
}
rsync.wait().await?;
new_fs.unmount().await?;
new_block_dev.unmount().await?;
copy_fstab().await?;
copy_machine_id().await?;
copy_ssh_host_keys().await?;
sync_boot().await?;
swap_boot_label().await?;
Ok(())
}
#[instrument]
pub async fn query_mounted_label() -> Result<(NewLabel, CurrentLabel), Error> {
let output = tokio::fs::read_to_string("/etc/fstab")
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, "/etc/fstab"))?;
match &PARSE_COLOR.captures(&output).ok_or_else(|| {
Error::new(
eyre!("Can't find pattern in {}", output),
crate::ErrorKind::Filesystem,
)
})?[1]
{
x if x == WritableDrives::Green.label() => Ok((
NewLabel(WritableDrives::Blue),
CurrentLabel(WritableDrives::Green),
)),
x if x == WritableDrives::Blue.label() => Ok((
NewLabel(WritableDrives::Green),
CurrentLabel(WritableDrives::Blue),
)),
e => {
return Err(Error::new(
eyre!("Could not find a mounted resource for {}", e),
crate::ErrorKind::Filesystem,
))
}
}
}
#[derive(Debug)]
struct EosUrl {
base: Url,
@@ -306,187 +267,84 @@ impl std::fmt::Display for EosUrl {
}
}
#[instrument(skip(db))]
async fn download_file<'a, Db: DbHandle + 'a>(
mut db: Db,
eos_url: &EosUrl,
new_label: NewLabel,
) -> Result<(Option<u64>, impl Future<Output = Result<(), Error>> + 'a), Error> {
let download_request = reqwest::get(eos_url.to_string())
.await
.with_kind(ErrorKind::Network)?;
let size = download_request
.headers()
.get("content-length")
.and_then(|a| a.to_str().ok())
.map(|l| l.parse())
.transpose()?;
Ok((size, async move {
let hash_from_header: String = download_request
.headers()
.get(HEADER_KEY)
.ok_or_else(|| Error::new(eyre!("No {} in headers", HEADER_KEY), ErrorKind::Network))?
.to_str()
.with_kind(ErrorKind::InvalidRequest)?
.to_owned();
let stream_download = download_request.bytes_stream();
let file_sum = write_stream_to_label(&mut db, size, stream_download, new_label).await?;
check_download(&hash_from_header, file_sum).await?;
Ok(())
}))
}
#[instrument(skip(db, stream_download))]
async fn write_stream_to_label<Db: DbHandle>(
db: &mut Db,
size: Option<u64>,
stream_download: impl Stream<Item = Result<rpc_toolkit::hyper::body::Bytes, reqwest::Error>>,
file: NewLabel,
) -> Result<Vec<u8>, Error> {
let block_dev = file.0.block_dev();
let mut file = tokio::fs::OpenOptions::new()
.write(true)
.open(&block_dev)
.await
.with_kind(ErrorKind::Filesystem)?;
let mut hasher = Sha256::new();
pin!(stream_download);
let mut downloaded = 0;
let mut last_progress_update = Instant::now();
while let Some(item) = stream_download
.next()
.await
.transpose()
.with_kind(ErrorKind::Network)?
{
file.write_all(&item)
.await
.with_kind(ErrorKind::Filesystem)?;
downloaded += item.len() as u64;
if last_progress_update.elapsed() > Duration::from_secs(1) {
last_progress_update = Instant::now();
crate::db::DatabaseModel::new()
.server_info()
.status_info()
.update_progress()
.put(db, &UpdateProgress { size, downloaded })
.await?;
}
hasher.update(item);
}
file.flush().await.with_kind(ErrorKind::Filesystem)?;
file.shutdown().await.with_kind(ErrorKind::Filesystem)?;
file.sync_all().await.with_kind(ErrorKind::Filesystem)?;
drop(file);
Ok(hasher.finalize().to_vec())
}
#[instrument]
async fn check_download(hash_from_header: &str, file_digest: Vec<u8>) -> Result<(), Error> {
if hex::decode(hash_from_header).with_kind(ErrorKind::Network)? != file_digest {
return Err(Error::new(
eyre!("Hash sum does not match source"),
ErrorKind::Network,
));
}
async fn copy_fstab() -> Result<(), Error> {
tokio::fs::copy("/etc/fstab", "/media/embassy/next/etc/fstab").await?;
Ok(())
}
async fn copy_machine_id(new_label: NewLabel) -> Result<(), Error> {
let new_guard = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
tokio::fs::copy("/etc/machine-id", new_guard.as_ref().join("etc/machine-id")).await?;
new_guard.unmount().await?;
async fn copy_machine_id() -> Result<(), Error> {
tokio::fs::copy("/etc/machine-id", "/media/embassy/next/etc/machine-id").await?;
Ok(())
}
async fn copy_ssh_host_keys(new_label: NewLabel) -> Result<(), Error> {
let new_guard = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
async fn copy_ssh_host_keys() -> Result<(), Error> {
tokio::fs::copy(
"/etc/ssh/ssh_host_rsa_key",
new_guard.as_ref().join("etc/ssh/ssh_host_rsa_key"),
"/media/embassy/next/etc/ssh/ssh_host_rsa_key",
)
.await?;
tokio::fs::copy(
"/etc/ssh/ssh_host_rsa_key.pub",
new_guard.as_ref().join("etc/ssh/ssh_host_rsa_key.pub"),
"/media/embassy/next/etc/ssh/ssh_host_rsa_key.pub",
)
.await?;
tokio::fs::copy(
"/etc/ssh/ssh_host_ecdsa_key",
new_guard.as_ref().join("etc/ssh/ssh_host_ecdsa_key"),
"/media/embassy/next/etc/ssh/ssh_host_ecdsa_key",
)
.await?;
tokio::fs::copy(
"/etc/ssh/ssh_host_ecdsa_key.pub",
new_guard.as_ref().join("etc/ssh/ssh_host_ecdsa_key.pub"),
"/media/embassy/next/etc/ssh/ssh_host_ecdsa_key.pub",
)
.await?;
tokio::fs::copy(
"/etc/ssh/ssh_host_ed25519_key",
new_guard.as_ref().join("etc/ssh/ssh_host_ed25519_key"),
"/media/embassy/next/etc/ssh/ssh_host_ed25519_key",
)
.await?;
tokio::fs::copy(
"/etc/ssh/ssh_host_ed25519_key.pub",
new_guard.as_ref().join("etc/ssh/ssh_host_ed25519_key.pub"),
"/media/embassy/next/etc/ssh/ssh_host_ed25519_key.pub",
)
.await?;
new_guard.unmount().await?;
Ok(())
}
async fn sync_boot() -> Result<(), Error> {
Rsync::new(
"/media/embassy/next/boot/",
"/boot",
RsyncOptions {
delete: false,
force: false,
ignore_existing: true,
},
)?
.wait()
.await?;
let dev_mnt =
MountGuard::mount(&Bind::new("/dev"), "/media/embassy/next/dev", ReadWrite).await?;
let sys_mnt =
MountGuard::mount(&Bind::new("/sys"), "/media/embassy/next/sys", ReadWrite).await?;
let proc_mnt =
MountGuard::mount(&Bind::new("/proc"), "/media/embassy/next/proc", ReadWrite).await?;
let boot_mnt =
MountGuard::mount(&Bind::new("/boot"), "/media/embassy/next/boot", ReadWrite).await?;
Command::new("chroot")
.arg("/media/embassy/next")
.arg("update-grub")
.invoke(ErrorKind::MigrationFailed)
.await?;
boot_mnt.unmount().await?;
proc_mnt.unmount().await?;
sys_mnt.unmount().await?;
dev_mnt.unmount().await?;
Ok(())
}
#[instrument]
async fn swap_boot_label(new_label: NewLabel) -> Result<(), Error> {
let block_dev = new_label.0.block_dev();
Command::new("e2label")
.arg(block_dev)
.arg(new_label.0.label())
.invoke(crate::ErrorKind::BlockDevice)
.await?;
let mounted = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/LABEL=\\(blue\\|green\\)/LABEL={}/g",
new_label.0.label()
))
.arg(mounted.as_ref().join("etc/fstab"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
mounted.unmount().await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
new_label.0.part_uuid()
))
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt.orig"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
new_label.0.part_uuid()
))
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
UPDATED.store(true, Ordering::SeqCst);
async fn swap_boot_label() -> Result<(), Error> {
tokio::fs::write("/media/embassy/config/upgrade", b"").await?;
Ok(())
}
/// Captured from doing an fstab with an embassy box and the cat from the /etc/fstab
#[test]
fn test_capture() {
let output = r#"
LABEL=blue / ext4 discard,errors=remount-ro 0 1
LABEL=system-boot /media/boot-rw vfat defaults 0 1
/media/boot-rw /boot none defaults,bind,ro 0 0
LABEL=EMBASSY /embassy-os vfat defaults 0 1
# a swapfile is not a swap partition, no line here
# use dphys-swapfile swap[on|off] for that
"#;
assert_eq!(&PARSE_COLOR.captures(&output).unwrap()[1], "blue");
}

View File

@@ -1,14 +1,13 @@
use std::cmp::min;
use std::convert::TryFrom;
use std::fmt::Display;
use std::future::Future;
use std::io::Error as StdIOError;
use std::pin::Pin;
use std::task::{Context, Poll};
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::stream::BoxStream;
use futures::{FutureExt, StreamExt};
use futures::{FutureExt, Stream};
use http::header::{ACCEPT_RANGES, CONTENT_LENGTH, RANGE};
use hyper::body::Bytes;
use pin_project::pin_project;
@@ -30,9 +29,27 @@ pub struct HttpReader {
enum ReadInProgress {
None,
InProgress(
BoxFuture<'static, Result<BoxStream<'static, Result<Bytes, reqwest::Error>>, Error>>,
Pin<
Box<
dyn Future<
Output = Result<
Pin<
Box<
dyn Stream<Item = Result<Bytes, reqwest::Error>>
+ Send
+ Sync
+ 'static,
>,
>,
Error,
>,
> + Send
+ Sync
+ 'static,
>,
>,
),
Complete(BoxStream<'static, Result<Bytes, reqwest::Error>>),
Complete(Pin<Box<dyn Stream<Item = Result<Bytes, reqwest::Error>> + Send + Sync + 'static>>),
}
impl ReadInProgress {
fn take(&mut self) -> Self {
@@ -62,6 +79,7 @@ impl Display for RangeUnit {
impl HttpReader {
pub async fn new(http_url: Url) -> Result<Self, Error> {
let http_client = Client::builder()
// .proxy(reqwest::Proxy::all("socks5h://127.0.0.1:9050").unwrap())
.build()
.with_kind(crate::ErrorKind::TLSInit)?;
@@ -141,11 +159,14 @@ impl HttpReader {
start: usize,
len: usize,
total_bytes: usize,
) -> Result<BoxStream<'static, Result<Bytes, reqwest::Error>>, Error> {
) -> Result<
Pin<Box<dyn Stream<Item = Result<Bytes, reqwest::Error>> + Send + Sync + 'static>>,
Error,
> {
let end = min(start + len, total_bytes) - 1;
if start > end {
return Ok(futures::stream::empty().boxed());
return Ok(Box::pin(futures::stream::empty()));
}
let data_range = format!("{}={}-{} ", range_unit.unwrap_or_default(), start, end);
@@ -159,7 +180,7 @@ impl HttpReader {
.error_for_status()
.with_kind(crate::ErrorKind::Network)?;
Ok(data_resp.bytes_stream().boxed())
Ok(Box::pin(data_resp.bytes_stream()))
}
}
@@ -170,7 +191,9 @@ impl AsyncRead for HttpReader {
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
fn poll_complete(
body: &mut BoxStream<'static, Result<Bytes, reqwest::Error>>,
body: &mut Pin<
Box<dyn Stream<Item = Result<Bytes, reqwest::Error>> + Send + Sync + 'static>,
>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<Option<std::io::Result<usize>>> {
@@ -220,15 +243,14 @@ impl AsyncRead for HttpReader {
continue;
}
},
ReadInProgress::None => HttpReader::get_range(
ReadInProgress::None => Box::pin(HttpReader::get_range(
*this.range_unit,
this.http_client.clone(),
this.http_url.clone(),
*this.cursor_pos,
buf.remaining(),
*this.total_bytes,
)
.boxed(),
)),
ReadInProgress::InProgress(fut) => fut,
};
@@ -339,19 +361,20 @@ async fn main_test() {
}
#[tokio::test]
#[ignore]
async fn s9pk_test() {
use tokio::io::BufReader;
let http_url = Url::parse("https://github.com/Start9Labs/hello-world-wrapper/releases/download/v0.3.0/hello-world.s9pk").unwrap();
let http_url = Url::parse("http://qhc6ac47cytstejcepk2ia3ipadzjhlkc5qsktsbl4e7u2krfmfuaqqd.onion/content/files/2022/09/ghost.s9pk").unwrap();
println!("Getting this resource: {}", http_url);
let test_reader =
BufReader::with_capacity(1024 * 1024, HttpReader::new(http_url).await.unwrap());
let mut s9pk = crate::s9pk::reader::S9pkReader::from_reader(test_reader, true)
let mut s9pk = crate::s9pk::reader::S9pkReader::from_reader(test_reader, false)
.await
.unwrap();
let manifest = s9pk.manifest().await.unwrap();
assert_eq!(&**manifest.id, "hello-world");
assert_eq!(&**manifest.id, "ghost");
}

View File

@@ -1,11 +1,19 @@
use std::future::Future;
use std::path::Path;
use std::task::Poll;
use futures::future::BoxFuture;
use futures::{FutureExt, TryStreamExt};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf};
use futures::future::{BoxFuture, Fuse};
use futures::{AsyncSeek, FutureExt, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use tokio::io::{
duplex, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, DuplexStream, ReadBuf, WriteHalf,
};
use crate::ResultExt;
pub trait AsyncReadSeek: AsyncRead + AsyncSeek {}
impl<T: AsyncRead + AsyncSeek> AsyncReadSeek for T {}
#[derive(Clone, Debug)]
pub struct AsyncCompat<T>(pub T);
impl<T> futures::io::AsyncRead for AsyncCompat<T>
@@ -246,3 +254,72 @@ pub fn response_to_reader(response: reqwest::Response) -> impl AsyncRead + Unpin
)
}))
}
#[pin_project::pin_project]
pub struct BufferedWriteReader {
#[pin]
hdl: Fuse<NonDetachingJoinHandle<Result<(), std::io::Error>>>,
#[pin]
rdr: DuplexStream,
}
impl BufferedWriteReader {
pub fn new<
W: FnOnce(WriteHalf<DuplexStream>) -> Fut,
Fut: Future<Output = Result<(), std::io::Error>> + Send + Sync + 'static,
>(
write_fn: W,
max_buf_size: usize,
) -> Self {
let (w, rdr) = duplex(max_buf_size);
let (_, w) = tokio::io::split(w);
BufferedWriteReader {
hdl: NonDetachingJoinHandle::from(tokio::spawn(write_fn(w))).fuse(),
rdr,
}
}
}
impl AsyncRead for BufferedWriteReader {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.project();
let res = this.rdr.poll_read(cx, buf);
match this.hdl.poll(cx) {
Poll::Ready(Ok(Err(e))) => return Poll::Ready(Err(e)),
Poll::Ready(Err(e)) => {
return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)))
}
_ => res,
}
}
}
#[pin_project::pin_project]
pub struct ByteReplacementReader<R> {
pub replace: u8,
pub with: u8,
#[pin]
pub inner: R,
}
impl<R: AsyncRead> AsyncRead for ByteReplacementReader<R> {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.project();
match this.inner.poll_read(cx, buf) {
Poll::Ready(Ok(())) => {
for idx in 0..buf.filled().len() {
if buf.filled()[idx] == *this.replace {
buf.filled_mut()[idx] = *this.with;
}
}
Poll::Ready(Ok(()))
}
a => a,
}
}
}

View File

@@ -24,9 +24,10 @@ use tracing::instrument;
use crate::shutdown::Shutdown;
use crate::{Error, ErrorKind, ResultExt as _};
pub mod config;
pub mod io;
pub mod http_reader;
pub mod io;
pub mod logger;
pub mod rsync;
pub mod serde;
#[derive(Clone, Copy, Debug)]

105
backend/src/util/rsync.rs Normal file
View File

@@ -0,0 +1,105 @@
use color_eyre::eyre::eyre;
use std::path::Path;
use helpers::NonDetachingJoinHandle;
use tokio::io::{AsyncBufReadExt, AsyncReadExt, BufReader};
use tokio::process::{Child, Command};
use tokio::sync::watch;
use tokio_stream::wrappers::WatchStream;
use crate::util::io::ByteReplacementReader;
use crate::{Error, ErrorKind};
pub struct RsyncOptions {
pub delete: bool,
pub force: bool,
pub ignore_existing: bool,
}
impl Default for RsyncOptions {
fn default() -> Self {
Self {
delete: true,
force: true,
ignore_existing: false,
}
}
}
pub struct Rsync {
pub command: Child,
_progress_task: NonDetachingJoinHandle<Result<(), Error>>,
stderr: NonDetachingJoinHandle<Result<String, Error>>,
pub progress: WatchStream<f64>,
}
impl Rsync {
pub fn new(
src: impl AsRef<Path>,
dst: impl AsRef<Path>,
options: RsyncOptions,
) -> Result<Self, Error> {
let mut cmd = Command::new("rsync");
if options.delete {
cmd.arg("--delete");
}
if options.force {
cmd.arg("--force");
}
if options.ignore_existing {
cmd.arg("--ignore-existing");
}
let mut command = cmd
.arg("-a")
.arg("--info=progress2")
.arg(src.as_ref())
.arg(dst.as_ref())
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.spawn()?;
let cmd_stdout = command.stdout.take().unwrap();
let mut cmd_stderr = command.stderr.take().unwrap();
let (send, recv) = watch::channel(0.0);
let stderr = tokio::spawn(async move {
let mut res = String::new();
cmd_stderr.read_to_string(&mut res).await?;
Ok(res)
})
.into();
let progress_task = tokio::spawn(async move {
let mut lines = BufReader::new(ByteReplacementReader {
replace: b'\r',
with: b'\n',
inner: cmd_stdout,
})
.lines();
while let Some(line) = lines.next_line().await? {
if let Some(percentage) = line
.split_ascii_whitespace()
.find_map(|col| col.strip_suffix("%"))
{
send.send(percentage.parse::<f64>()? / 100.0).unwrap();
}
}
Ok(())
})
.into();
Ok(Rsync {
command,
_progress_task: progress_task,
stderr,
progress: WatchStream::new(recv),
})
}
pub async fn wait(mut self) -> Result<(), Error> {
let status = self.command.wait().await?;
let stderr = self.stderr.await.unwrap()?;
if status.success() {
tracing::info!("rsync: {}", stderr);
} else {
return Err(Error::new(
eyre!("rsync error: {}", stderr),
ErrorKind::Filesystem,
));
}
Ok(())
}
}

View File

@@ -5,7 +5,6 @@ use tokio::process::Command;
use super::*;
use crate::disk::BOOT_RW_PATH;
use crate::update::query_mounted_label;
use crate::util::Invoke;
const V0_3_0_1: emver::Version = emver::Version::new(0, 3, 0, 1);
@@ -25,16 +24,6 @@ impl VersionT for Version {
&*v0_3_0::V0_3_0_COMPAT
}
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
let (_, current) = query_mounted_label().await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
current.0.part_uuid()
))
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt.orig"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
Ok(())
}
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {

View File

@@ -1,9 +1,8 @@
use emver::VersionRange;
use crate::hostname::{generate_id, sync_hostname};
use super::v0_3_0::V0_3_0_COMPAT;
use super::*;
use crate::hostname::{generate_id, sync_hostname};
const V0_3_2: emver::Version = emver::Version::new(0, 3, 2, 0);

View File

@@ -16,7 +16,7 @@ use crate::util::Version;
use crate::{Error, ResultExt};
pub const PKG_VOLUME_DIR: &str = "package-data/volumes";
pub const BACKUP_DIR: &str = "/media/embassy-os/backups";
pub const BACKUP_DIR: &str = "/media/embassy/backups";
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Volumes(BTreeMap<VolumeId, Volume>);