Feat/combine uis (#2633)

* wip

* restructure backend for new ui structure

* new patchdb bootstrap, single websocket api, local storage migration, more

* update db websocket

* init apis

* update patch-db

* setup progress

* feat: implement state service, alert and routing

Signed-off-by: waterplea <alexander@inkin.ru>

* update setup wizard for new types

* feat: add init page

Signed-off-by: waterplea <alexander@inkin.ru>

* chore: refactor message, patch-db source stream and connection service

Signed-off-by: waterplea <alexander@inkin.ru>

* fix method not found on state

* fix backend bugs

* fix compat assets

* address comments

* remove unneeded styling

* cleaner progress

* bugfixes

* fix init logs

* fix progress reporting

* fix navigation by getting state after init

* remove patch dependency from live api

* fix caching

* re-add patchDB to live api

* fix metrics values

* send close frame

* add bootId and fix polling

---------

Signed-off-by: waterplea <alexander@inkin.ru>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: waterplea <alexander@inkin.ru>
This commit is contained in:
Matt Hill
2024-06-19 13:51:44 -06:00
committed by GitHub
parent e92d4ff147
commit da3720c7a9
147 changed files with 3939 additions and 2637 deletions

View File

@@ -7,7 +7,7 @@ PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
BINS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox
WEB_UIS := web/dist/raw/ui web/dist/raw/setup-wizard web/dist/raw/diagnostic-ui web/dist/raw/install-wizard
WEB_UIS := web/dist/raw/ui web/dist/raw/setup-wizard web/dist/raw/install-wizard
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(shell git ls-files debian/)
@@ -20,7 +20,6 @@ CORE_SRC := $(shell git ls-files core) $(shell git ls-files --recurse-submodules
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist web/patchdb-ui-seed.json
WEB_UI_SRC := $(shell git ls-files web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(shell git ls-files web/projects/setup-wizard)
WEB_DIAGNOSTIC_UI_SRC := $(shell git ls-files web/projects/diagnostic-ui)
WEB_INSTALL_WIZARD_SRC := $(shell git ls-files web/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
@@ -244,10 +243,6 @@ web/dist/raw/setup-wizard: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC)
npm --prefix web run build:setup
touch web/dist/raw/setup-wizard
web/dist/raw/diagnostic-ui: $(WEB_DIAGNOSTIC_UI_SRC) $(WEB_SHARED_SRC)
npm --prefix web run build:dui
touch web/dist/raw/diagnostic-ui
web/dist/raw/install-wizard: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC)
npm --prefix web run build:install-wiz
touch web/dist/raw/install-wizard

718
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -59,6 +59,7 @@ async-stream = "0.3.5"
async-trait = "0.1.74"
axum = { version = "0.7.3", features = ["ws"] }
axum-server = "0.6.0"
barrage = "0.2.3"
backhand = "0.18.0"
base32 = "0.4.0"
base64 = "0.21.4"
@@ -102,7 +103,7 @@ id-pool = { version = "0.2.2", default-features = false, features = [
] }
imbl = "2.0.2"
imbl-value = { git = "https://github.com/Start9Labs/imbl-value.git" }
include_dir = "0.7.3"
include_dir = { version = "0.7.3", features = ["metadata"] }
indexmap = { version = "2.0.2", features = ["serde"] }
indicatif = { version = "0.17.7", features = ["tokio"] }
integer-encoding = { version = "4.0.0", features = ["tokio_async"] }
@@ -178,6 +179,7 @@ tokio-util = { version = "0.7.9", features = ["io"] }
torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies", features = [
"serialize",
] }
tower-service = "0.3.2"
tracing = "0.1.39"
tracing-error = "0.2.0"
tracing-futures = "0.2.5"

View File

@@ -4,25 +4,25 @@ use std::sync::Arc;
use clap::Parser;
use futures::{stream, StreamExt};
use models::PackageId;
use openssl::x509::X509;
use patch_db::json_ptr::ROOT;
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use tokio::sync::Mutex;
use tracing::instrument;
use ts_rs::TS;
use super::target::BackupTargetId;
use crate::backup::os::OsBackup;
use crate::context::setup::SetupResult;
use crate::context::{RpcContext, SetupContext};
use crate::db::model::Database;
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::hostname::Hostname;
use crate::init::init;
use crate::init::{init, InitResult};
use crate::prelude::*;
use crate::s9pk::S9pk;
use crate::service::service_map::DownloadInstallFuture;
use crate::setup::SetupExecuteProgress;
use crate::util::serde::IoFormat;
#[derive(Deserialize, Serialize, Parser, TS)]
@@ -67,14 +67,21 @@ pub async fn restore_packages_rpc(
Ok(())
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn recover_full_embassy(
ctx: SetupContext,
ctx: &SetupContext,
disk_guid: Arc<String>,
start_os_password: String,
recovery_source: TmpMountGuard,
recovery_password: Option<String>,
) -> Result<(Arc<String>, Hostname, OnionAddressV3, X509), Error> {
SetupExecuteProgress {
init_phases,
restore_phase,
rpc_ctx_phases,
}: SetupExecuteProgress,
) -> Result<(SetupResult, RpcContext), Error> {
let mut restore_phase = restore_phase.or_not_found("restore progress")?;
let backup_guard = BackupMountGuard::mount(
recovery_source,
recovery_password.as_deref().unwrap_or_default(),
@@ -99,10 +106,17 @@ pub async fn recover_full_embassy(
db.put(&ROOT, &Database::init(&os_backup.account)?).await?;
drop(db);
init(&ctx.config).await?;
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
let rpc_ctx = RpcContext::init(&ctx.config, disk_guid.clone()).await?;
let rpc_ctx = RpcContext::init(
&ctx.config,
disk_guid.clone(),
Some(net_ctrl),
rpc_ctx_phases,
)
.await?;
restore_phase.start();
let ids: Vec<_> = backup_guard
.metadata
.package_backups
@@ -110,26 +124,26 @@ pub async fn recover_full_embassy(
.cloned()
.collect();
let tasks = restore_packages(&rpc_ctx, backup_guard, ids).await?;
restore_phase.set_total(tasks.len() as u64);
let restore_phase = Arc::new(Mutex::new(restore_phase));
stream::iter(tasks)
.for_each_concurrent(5, |(id, res)| async move {
match async { res.await?.await }.await {
Ok(_) => (),
Err(err) => {
tracing::error!("Error restoring package {}: {}", id, err);
tracing::debug!("{:?}", err);
.for_each_concurrent(5, |(id, res)| {
let restore_phase = restore_phase.clone();
async move {
match async { res.await?.await }.await {
Ok(_) => (),
Err(err) => {
tracing::error!("Error restoring package {}: {}", id, err);
tracing::debug!("{:?}", err);
}
}
*restore_phase.lock().await += 1;
}
})
.await;
restore_phase.lock().await.complete();
rpc_ctx.shutdown().await?;
Ok((
disk_guid,
os_backup.account.hostname,
os_backup.account.tor_key.public().get_onion_address(),
os_backup.account.root_ca_cert,
))
Ok(((&os_backup.account).try_into()?, rpc_ctx))
}
#[instrument(skip(ctx, backup_guard))]

View File

@@ -14,7 +14,8 @@ use crate::util::logger::EmbassyLogger;
async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
let server = async {
let ctx = RegistryContext::init(config).await?;
let server = WebServer::registry(ctx.listen, ctx.clone());
let mut server = WebServer::new(ctx.listen);
server.serve_registry(ctx.clone());
let mut shutdown_recv = ctx.shutdown.subscribe();

View File

@@ -1,47 +1,56 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use helpers::NonDetachingJoinHandle;
use tokio::process::Command;
use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::context::{DiagnosticContext, InstallContext, SetupContext};
use crate::disk::fsck::{RepairStrategy, RequiresReboot};
use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::update_firmware;
use crate::init::STANDBY_MODE_PATH;
use crate::firmware::{check_for_firmware_update, update_firmware};
use crate::init::{InitPhases, InitResult, STANDBY_MODE_PATH};
use crate::net::web_server::WebServer;
use crate::prelude::*;
use crate::progress::FullProgressTracker;
use crate::shutdown::Shutdown;
use crate::sound::{BEP, CHIME};
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, PLATFORM};
use crate::PLATFORM;
#[instrument(skip_all)]
async fn setup_or_init(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
let song = NonDetachingJoinHandle::from(tokio::spawn(async {
loop {
BEP.play().await.unwrap();
BEP.play().await.unwrap();
tokio::time::sleep(Duration::from_secs(30)).await;
}
}));
async fn setup_or_init(
server: &mut WebServer,
config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if let Some(firmware) = check_for_firmware_update()
.await
.map_err(|e| {
tracing::warn!("Error checking for firmware update: {e}");
tracing::debug!("{e:?}");
})
.ok()
.and_then(|a| a)
{
let init_ctx = InitContext::init(config).await?;
let handle = &init_ctx.progress;
let mut update_phase = handle.add_phase("Updating Firmware".into(), Some(10));
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
match update_firmware().await {
Ok(RequiresReboot(true)) => {
return Ok(Some(Shutdown {
export_args: None,
restart: true,
}))
}
Err(e) => {
server.serve_init(init_ctx);
update_phase.start();
if let Err(e) = update_firmware(firmware).await {
tracing::warn!("Error performing firmware update: {e}");
tracing::debug!("{e:?}");
} else {
update_phase.complete();
reboot_phase.start();
return Ok(Err(Shutdown {
export_args: None,
restart: true,
}));
}
_ => (),
}
Command::new("ln")
@@ -84,14 +93,7 @@ async fn setup_or_init(config: &ServerConfig) -> Result<Option<Shutdown>, Error>
let ctx = InstallContext::init().await?;
let server = WebServer::install(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)?;
drop(song);
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
server.serve_install(ctx.clone());
ctx.shutdown
.subscribe()
@@ -99,33 +101,23 @@ async fn setup_or_init(config: &ServerConfig) -> Result<Option<Shutdown>, Error>
.await
.expect("context dropped");
server.shutdown().await;
return Ok(Err(Shutdown {
export_args: None,
restart: true,
}));
}
Command::new("reboot")
.invoke(crate::ErrorKind::Unknown)
.await?;
} else if tokio::fs::metadata("/media/startos/config/disk.guid")
if tokio::fs::metadata("/media/startos/config/disk.guid")
.await
.is_err()
{
let ctx = SetupContext::init(config)?;
let server = WebServer::setup(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)?;
drop(song);
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
server.serve_setup(ctx.clone());
let mut shutdown = ctx.shutdown.subscribe();
shutdown.recv().await.expect("context dropped");
server.shutdown().await;
drop(shutdown);
tokio::task::yield_now().await;
if let Err(e) = Command::new("killall")
.arg("firefox-esr")
@@ -135,19 +127,40 @@ async fn setup_or_init(config: &ServerConfig) -> Result<Option<Shutdown>, Error>
tracing::error!("Failed to kill kiosk: {}", e);
tracing::debug!("{:?}", e);
}
Ok(Ok(match ctx.result.get() {
Some(Ok((_, rpc_ctx))) => (rpc_ctx.clone(), ctx.progress.clone()),
Some(Err(e)) => return Err(e.clone_output()),
None => {
return Err(Error::new(
eyre!("Setup mode exited before setup completed"),
ErrorKind::Unknown,
))
}
}))
} else {
let init_ctx = InitContext::init(config).await?;
let handle = init_ctx.progress.clone();
let mut disk_phase = handle.add_phase("Opening data drive".into(), Some(10));
let init_phases = InitPhases::new(&handle);
let rpc_ctx_phases = InitRpcContextPhases::new(&handle);
server.serve_init(init_ctx);
disk_phase.start();
let guid_string = tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?;
let guid = guid_string.trim();
let disk_guid = Arc::new(String::from(guid_string.trim()));
let requires_reboot = crate::disk::main::import(
guid,
&**disk_guid,
config.datadir(),
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
} else {
RepairStrategy::Preen
},
if guid.ends_with("_UNENC") {
if disk_guid.ends_with("_UNENC") {
None
} else {
Some(DEFAULT_PASSWORD)
@@ -159,40 +172,31 @@ async fn setup_or_init(config: &ServerConfig) -> Result<Option<Shutdown>, Error>
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
}
if requires_reboot.0 {
crate::disk::main::export(guid, config.datadir()).await?;
Command::new("reboot")
.invoke(crate::ErrorKind::Unknown)
.await?;
}
disk_phase.complete();
tracing::info!("Loaded Disk");
crate::init::init(config).await?;
drop(song);
}
Ok(None)
}
async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
let script = path.as_ref();
if script.exists() {
match Command::new("/bin/bash").arg(script).spawn() {
Ok(mut c) => {
if let Err(e) = c.wait().await {
tracing::error!("Error Running {}: {}", script.display(), e);
tracing::debug!("{:?}", e);
}
}
Err(e) => {
tracing::error!("Error Running {}: {}", script.display(), e);
tracing::debug!("{:?}", e);
}
if requires_reboot.0 {
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
reboot_phase.start();
return Ok(Err(Shutdown {
export_args: Some((disk_guid, config.datadir().to_owned())),
restart: true,
}));
}
let InitResult { net_ctrl } = crate::init::init(config, init_phases).await?;
let rpc_ctx = RpcContext::init(config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
Ok(Ok((rpc_ctx, handle)))
}
}
#[instrument(skip_all)]
async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
pub async fn main(
server: &mut WebServer,
config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
@@ -200,16 +204,11 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
futures::future::pending::<()>().await;
}
crate::sound::BEP.play().await?;
run_script_if_exists("/media/startos/config/preinit.sh").await;
let res = match setup_or_init(config).await {
let res = match setup_or_init(server, config).await {
Err(e) => {
async move {
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
crate::sound::BEETHOVEN.play().await?;
tracing::error!("{e}");
tracing::debug!("{e:?}");
let ctx = DiagnosticContext::init(
config,
@@ -229,44 +228,16 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
e,
)?;
let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)?;
server.serve_diagnostic(ctx.clone());
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
server.shutdown().await;
Ok(shutdown)
Ok(Err(shutdown))
}
.await
}
Ok(s) => Ok(s),
};
run_script_if_exists("/media/startos/config/postinit.sh").await;
res
}
pub fn main(config: &ServerConfig) {
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
rt.block_on(inner_main(config))
};
match res {
Ok(Some(shutdown)) => shutdown.execute(),
Ok(None) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -1,6 +1,5 @@
use std::ffi::OsString;
use std::net::{Ipv6Addr, SocketAddr};
use std::path::Path;
use std::sync::Arc;
use clap::Parser;
@@ -10,7 +9,8 @@ use tokio::signal::unix::signal;
use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::context::{DiagnosticContext, RpcContext};
use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, RpcContext};
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task;
@@ -18,9 +18,31 @@ use crate::util::logger::EmbassyLogger;
use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)]
async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, server, shutdown) = async {
let rpc_ctx = RpcContext::init(
async fn inner_main(
server: &mut WebServer,
config: &ServerConfig,
) -> Result<Option<Shutdown>, Error> {
let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized")
.await
.is_ok()
{
let (ctx, handle) = match super::start_init::main(server, &config).await? {
Err(s) => return Ok(Some(s)),
Ok(ctx) => ctx,
};
tokio::fs::write("/run/startos/initialized", "").await?;
server.serve_main(ctx.clone());
handle.complete();
ctx
} else {
let init_ctx = InitContext::init(config).await?;
let handle = init_ctx.progress.clone();
let rpc_ctx_phases = InitRpcContextPhases::new(&handle);
server.serve_init(init_ctx);
let ctx = RpcContext::init(
config,
Arc::new(
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
@@ -28,13 +50,19 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
.trim()
.to_owned(),
),
None,
rpc_ctx_phases,
)
.await?;
server.serve_main(ctx.clone());
handle.complete();
ctx
};
let (rpc_ctx, shutdown) = async {
crate::hostname::sync_hostname(&rpc_ctx.account.read().await.hostname).await?;
let server = WebServer::main(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
rpc_ctx.clone(),
)?;
let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
@@ -74,8 +102,6 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
.await
});
crate::sound::CHIME.play().await?;
metrics_task
.map_err(|e| {
Error::new(
@@ -93,10 +119,9 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
sig_handler.abort();
Ok::<_, Error>((rpc_ctx, server, shutdown))
Ok::<_, Error>((rpc_ctx, shutdown))
}
.await?;
server.shutdown().await;
rpc_ctx.shutdown().await?;
tracing::info!("RPC Context is dropped");
@@ -109,24 +134,22 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
let config = ServerConfig::parse_from(args).load().unwrap();
if !Path::new("/run/embassy/initialized").exists() {
super::start_init::main(&config);
std::fs::write("/run/embassy/initialized", "").unwrap();
}
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
rt.block_on(async {
match inner_main(&config).await {
Ok(a) => Ok(a),
let mut server = WebServer::new(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80));
match inner_main(&mut server, &config).await {
Ok(a) => {
server.shutdown().await;
Ok(a)
}
Err(e) => {
async {
tracing::error!("{}", e.source);
tracing::debug!("{:?}", e.source);
crate::sound::BEETHOVEN.play().await?;
tracing::error!("{e}");
tracing::debug!("{e:?}");
let ctx = DiagnosticContext::init(
&config,
if tokio::fs::metadata("/media/startos/config/disk.guid")
@@ -145,10 +168,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
e,
)?;
let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)?;
server.serve_diagnostic(ctx.clone());
let mut shutdown = ctx.shutdown.subscribe();
@@ -157,7 +177,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
server.shutdown().await;
Ok::<_, Error>(shutdown)
Ok::<_, Error>(Some(shutdown))
}
.await
}

View File

@@ -18,7 +18,7 @@ use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::context::config::{local_config_path, ClientConfig};
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
@@ -271,6 +271,11 @@ impl CallRemote<DiagnosticContext> for CliContext {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
}
}
impl CallRemote<InitContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
}
}
impl CallRemote<SetupContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await

View File

@@ -14,7 +14,7 @@ use crate::Error;
pub struct DiagnosticContextSeed {
pub datadir: PathBuf,
pub shutdown: Sender<Option<Shutdown>>,
pub shutdown: Sender<Shutdown>,
pub error: Arc<RpcError>,
pub disk_guid: Option<Arc<String>>,
pub rpc_continuations: RpcContinuations,

View File

@@ -0,0 +1,47 @@
use std::ops::Deref;
use std::sync::Arc;
use rpc_toolkit::Context;
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::progress::FullProgressTracker;
use crate::rpc_continuations::RpcContinuations;
use crate::Error;
pub struct InitContextSeed {
pub config: ServerConfig,
pub progress: FullProgressTracker,
pub shutdown: Sender<()>,
pub rpc_continuations: RpcContinuations,
}
#[derive(Clone)]
pub struct InitContext(Arc<InitContextSeed>);
impl InitContext {
#[instrument(skip_all)]
pub async fn init(cfg: &ServerConfig) -> Result<Self, Error> {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
Ok(Self(Arc::new(InitContextSeed {
config: cfg.clone(),
progress: FullProgressTracker::new(),
shutdown,
rpc_continuations: RpcContinuations::new(),
})))
}
}
impl AsRef<RpcContinuations> for InitContext {
fn as_ref(&self) -> &RpcContinuations {
&self.rpc_continuations
}
}
impl Context for InitContext {}
impl Deref for InitContext {
type Target = InitContextSeed;
fn deref(&self) -> &Self::Target {
&*self.0
}
}

View File

@@ -6,11 +6,13 @@ use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::net::utils::find_eth_iface;
use crate::rpc_continuations::RpcContinuations;
use crate::Error;
pub struct InstallContextSeed {
pub ethernet_interface: String,
pub shutdown: Sender<()>,
pub rpc_continuations: RpcContinuations,
}
#[derive(Clone)]
@@ -22,10 +24,17 @@ impl InstallContext {
Ok(Self(Arc::new(InstallContextSeed {
ethernet_interface: find_eth_iface().await?,
shutdown,
rpc_continuations: RpcContinuations::new(),
})))
}
}
impl AsRef<RpcContinuations> for InstallContext {
fn as_ref(&self) -> &RpcContinuations {
&self.rpc_continuations
}
}
impl Context for InstallContext {}
impl Deref for InstallContext {
type Target = InstallContextSeed;

View File

@@ -1,12 +1,14 @@
pub mod cli;
pub mod config;
pub mod diagnostic;
pub mod init;
pub mod install;
pub mod rpc;
pub mod setup;
pub use cli::CliContext;
pub use diagnostic::DiagnosticContext;
pub use init::InitContext;
pub use install::InstallContext;
pub use rpc::RpcContext;
pub use setup::SetupContext;

View File

@@ -6,11 +6,12 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use imbl_value::InternedString;
use josekit::jwk::Jwk;
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty};
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tokio::sync::{broadcast, Mutex, RwLock};
use tokio::time::Instant;
use tracing::instrument;
@@ -22,12 +23,12 @@ use crate::dependencies::compute_dependency_config_errs;
use crate::disk::OsPartitionInfo;
use crate::init::check_time_is_synchronized;
use crate::lxc::{ContainerId, LxcContainer, LxcManager};
use crate::middleware::auth::HashSessionToken;
use crate::net::net_controller::NetController;
use crate::net::net_controller::{NetController, PreInitNetController};
use crate::net::utils::{find_eth_iface, find_wifi_iface};
use crate::net::wifi::WpaCli;
use crate::prelude::*;
use crate::rpc_continuations::RpcContinuations;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
use crate::rpc_continuations::{OpenAuthedContinuations, RpcContinuations};
use crate::service::ServiceMap;
use crate::shutdown::Shutdown;
use crate::system::get_mem_info;
@@ -49,7 +50,7 @@ pub struct RpcContextSeed {
pub shutdown: broadcast::Sender<Option<Shutdown>>,
pub tor_socks: SocketAddr,
pub lxc_manager: Arc<LxcManager>,
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
pub open_authed_continuations: OpenAuthedContinuations<InternedString>,
pub rpc_continuations: RpcContinuations,
pub wifi_manager: Option<Arc<RwLock<WpaCli>>>,
pub current_secret: Arc<Jwk>,
@@ -68,45 +69,103 @@ pub struct Hardware {
pub ram: u64,
}
pub struct InitRpcContextPhases {
load_db: PhaseProgressTrackerHandle,
init_net_ctrl: PhaseProgressTrackerHandle,
read_device_info: PhaseProgressTrackerHandle,
cleanup_init: CleanupInitPhases,
}
impl InitRpcContextPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
Self {
load_db: handle.add_phase("Loading database".into(), Some(5)),
init_net_ctrl: handle.add_phase("Initializing network".into(), Some(1)),
read_device_info: handle.add_phase("Reading device information".into(), Some(1)),
cleanup_init: CleanupInitPhases::new(handle),
}
}
}
pub struct CleanupInitPhases {
init_services: PhaseProgressTrackerHandle,
check_dependencies: PhaseProgressTrackerHandle,
}
impl CleanupInitPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
Self {
init_services: handle.add_phase("Initializing services".into(), Some(10)),
check_dependencies: handle.add_phase("Checking dependencies".into(), Some(1)),
}
}
}
#[derive(Clone)]
pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext {
#[instrument(skip_all)]
pub async fn init(config: &ServerConfig, disk_guid: Arc<String>) -> Result<Self, Error> {
tracing::info!("Loaded Config");
pub async fn init(
config: &ServerConfig,
disk_guid: Arc<String>,
net_ctrl: Option<PreInitNetController>,
InitRpcContextPhases {
mut load_db,
mut init_net_ctrl,
mut read_device_info,
cleanup_init,
}: InitRpcContextPhases,
) -> Result<Self, Error> {
let tor_proxy = config.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
)));
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let db = TypedPatchDb::<Database>::load(config.db().await?).await?;
load_db.start();
let db = if let Some(net_ctrl) = &net_ctrl {
net_ctrl.db.clone()
} else {
TypedPatchDb::<Database>::load(config.db().await?).await?
};
let peek = db.peek().await;
let account = AccountInfo::load(&peek)?;
load_db.complete();
tracing::info!("Opened PatchDB");
init_net_ctrl.start();
let net_controller = Arc::new(
NetController::init(
db.clone(),
config
.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
if let Some(net_ctrl) = net_ctrl {
net_ctrl
} else {
PreInitNetController::init(
db.clone(),
config
.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
&account.hostname,
account.tor_key.clone(),
)
.await?
},
config
.dns_bind
.as_deref()
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
&account.hostname,
account.tor_key.clone(),
)
.await?,
);
init_net_ctrl.complete();
tracing::info!("Initialized Net Controller");
let services = ServiceMap::default();
let metrics_cache = RwLock::<Option<crate::system::Metrics>>::new(None);
tracing::info!("Initialized Notification Manager");
let tor_proxy_url = format!("socks5h://{tor_proxy}");
read_device_info.start();
let devices = lshw().await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
read_device_info.complete();
if !db
.peek()
@@ -163,7 +222,7 @@ impl RpcContext {
shutdown,
tor_socks: tor_proxy,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_websockets: Mutex::new(BTreeMap::new()),
open_authed_continuations: OpenAuthedContinuations::new(),
rpc_continuations: RpcContinuations::new(),
wifi_manager: wifi_interface
.clone()
@@ -196,7 +255,7 @@ impl RpcContext {
});
let res = Self(seed.clone());
res.cleanup_and_initialize().await?;
res.cleanup_and_initialize(cleanup_init).await?;
tracing::info!("Cleaned up transient states");
Ok(res)
}
@@ -210,11 +269,18 @@ impl RpcContext {
Ok(())
}
#[instrument(skip(self))]
pub async fn cleanup_and_initialize(&self) -> Result<(), Error> {
self.services.init(&self).await?;
#[instrument(skip_all)]
pub async fn cleanup_and_initialize(
&self,
CleanupInitPhases {
init_services,
mut check_dependencies,
}: CleanupInitPhases,
) -> Result<(), Error> {
self.services.init(&self, init_services).await?;
tracing::info!("Initialized Package Managers");
check_dependencies.start();
let mut updated_current_dependents = BTreeMap::new();
let peek = self.db.peek().await;
for (package_id, package) in peek.as_public().as_package_data().as_entries()?.into_iter() {
@@ -238,6 +304,7 @@ impl RpcContext {
Ok(())
})
.await?;
check_dependencies.complete();
Ok(())
}
@@ -274,6 +341,11 @@ impl AsRef<RpcContinuations> for RpcContext {
&self.rpc_continuations
}
}
impl AsRef<OpenAuthedContinuations<InternedString>> for RpcContext {
fn as_ref(&self) -> &OpenAuthedContinuations<InternedString> {
&self.open_authed_continuations
}
}
impl Context for RpcContext {}
impl Deref for RpcContext {
type Target = RpcContextSeed;

View File

@@ -1,23 +1,31 @@
use std::ops::Deref;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use futures::{Future, StreamExt};
use helpers::NonDetachingJoinHandle;
use josekit::jwk::Jwk;
use patch_db::PatchDb;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::Context;
use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock;
use tokio::sync::OnceCell;
use tracing::instrument;
use ts_rs::TS;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig;
use crate::context::RpcContext;
use crate::disk::OsPartitionInfo;
use crate::init::init_postgres;
use crate::prelude::*;
use crate::setup::SetupStatus;
use crate::progress::FullProgressTracker;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::setup::SetupProgress;
use crate::util::net::WebSocketExt;
lazy_static::lazy_static! {
pub static ref CURRENT_SECRET: Jwk = Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).unwrap_or_else(|e| {
@@ -27,30 +35,35 @@ lazy_static::lazy_static! {
});
}
#[derive(Clone, Serialize, Deserialize)]
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SetupResult {
pub tor_address: String,
pub lan_address: String,
pub root_ca: String,
}
impl TryFrom<&AccountInfo> for SetupResult {
type Error = Error;
fn try_from(value: &AccountInfo) -> Result<Self, Self::Error> {
Ok(Self {
tor_address: format!("https://{}", value.tor_key.public().get_onion_address()),
lan_address: value.hostname.lan_address(),
root_ca: String::from_utf8(value.root_ca_cert.to_pem()?)?,
})
}
}
pub struct SetupContextSeed {
pub config: ServerConfig,
pub os_partitions: OsPartitionInfo,
pub disable_encryption: bool,
pub progress: FullProgressTracker,
pub task: OnceCell<NonDetachingJoinHandle<()>>,
pub result: OnceCell<Result<(SetupResult, RpcContext), Error>>,
pub shutdown: Sender<()>,
pub datadir: PathBuf,
pub selected_v2_drive: RwLock<Option<PathBuf>>,
pub cached_product_key: RwLock<Option<Arc<String>>>,
pub setup_status: RwLock<Option<Result<SetupStatus, RpcError>>>,
pub setup_result: RwLock<Option<(Arc<String>, SetupResult)>>,
}
impl AsRef<Jwk> for SetupContextSeed {
fn as_ref(&self) -> &Jwk {
&*CURRENT_SECRET
}
pub rpc_continuations: RpcContinuations,
}
#[derive(Clone)]
@@ -69,12 +82,12 @@ impl SetupContext {
)
})?,
disable_encryption: config.disable_encryption.unwrap_or(false),
progress: FullProgressTracker::new(),
task: OnceCell::new(),
result: OnceCell::new(),
shutdown,
datadir,
selected_v2_drive: RwLock::new(None),
cached_product_key: RwLock::new(None),
setup_status: RwLock::new(None),
setup_result: RwLock::new(None),
rpc_continuations: RpcContinuations::new(),
})))
}
#[instrument(skip_all)]
@@ -97,6 +110,104 @@ impl SetupContext {
.with_kind(crate::ErrorKind::Database)?;
Ok(secret_store)
}
pub fn run_setup<F, Fut>(&self, f: F) -> Result<(), Error>
where
F: FnOnce() -> Fut + Send + 'static,
Fut: Future<Output = Result<(SetupResult, RpcContext), Error>> + Send,
{
let local_ctx = self.clone();
self.task
.set(
tokio::spawn(async move {
local_ctx
.result
.get_or_init(|| async {
match f().await {
Ok(res) => {
tracing::info!("Setup complete!");
Ok(res)
}
Err(e) => {
tracing::error!("Setup failed: {e}");
tracing::debug!("{e:?}");
Err(e)
}
}
})
.await;
local_ctx.progress.complete();
})
.into(),
)
.map_err(|_| {
if self.result.initialized() {
Error::new(eyre!("Setup already complete"), ErrorKind::InvalidRequest)
} else {
Error::new(
eyre!("Setup already in progress"),
ErrorKind::InvalidRequest,
)
}
})?;
Ok(())
}
pub async fn progress(&self) -> SetupProgress {
use axum::extract::ws;
let guid = Guid::new();
let progress_tracker = self.progress.clone();
let progress = progress_tracker.snapshot();
self.rpc_continuations
.add(
guid.clone(),
RpcContinuation::ws(
|mut ws| async move {
if let Err(e) = async {
let mut stream =
progress_tracker.stream(Some(Duration::from_millis(100)));
while let Some(progress) = stream.next().await {
ws.send(ws::Message::Text(
serde_json::to_string(&progress)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
if progress.overall.is_complete() {
break;
}
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error in setup progress websocket: {e}");
tracing::debug!("{e:?}");
}
},
Duration::from_secs(30),
),
)
.await;
SetupProgress { progress, guid }
}
}
impl AsRef<Jwk> for SetupContext {
fn as_ref(&self) -> &Jwk {
&*CURRENT_SECRET
}
}
impl AsRef<RpcContinuations> for SetupContext {
fn as_ref(&self) -> &RpcContinuations {
&self.rpc_continuations
}
}
impl Context for SetupContext {}

View File

@@ -3,175 +3,40 @@ pub mod prelude;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use axum::extract::ws::{self, WebSocket};
use axum::extract::WebSocketUpgrade;
use axum::response::Response;
use axum::extract::ws;
use clap::Parser;
use futures::{FutureExt, StreamExt};
use http::header::COOKIE;
use http::HeaderMap;
use imbl_value::InternedString;
use itertools::Itertools;
use patch_db::json_ptr::{JsonPointer, ROOT};
use patch_db::{Dump, Revision};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::sync::oneshot;
use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::middleware::auth::{HasValidSession, HashSessionToken};
use crate::prelude::*;
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::util::net::WebSocketExt;
use crate::util::serde::{apply_expr, HandlerExtSerde};
lazy_static::lazy_static! {
static ref PUBLIC: JsonPointer = "/public".parse().unwrap();
}
#[instrument(skip_all)]
async fn ws_handler(
ctx: RpcContext,
session: Option<(HasValidSession, HashSessionToken)>,
mut stream: WebSocket,
) -> Result<(), Error> {
let (dump, sub) = ctx.db.dump_and_sub(PUBLIC.clone()).await;
if let Some((session, token)) = session {
let kill = subscribe_to_session_kill(&ctx, token).await;
send_dump(session.clone(), &mut stream, dump).await?;
deal_with_messages(session, kill, sub, stream).await?;
} else {
stream
.send(ws::Message::Close(Some(ws::CloseFrame {
code: ws::close_code::ERROR,
reason: "UNAUTHORIZED".into(),
})))
.await
.with_kind(ErrorKind::Network)?;
drop(stream);
}
Ok(())
}
async fn subscribe_to_session_kill(
ctx: &RpcContext,
token: HashSessionToken,
) -> oneshot::Receiver<()> {
let (send, recv) = oneshot::channel();
let mut guard = ctx.open_authed_websockets.lock().await;
if !guard.contains_key(&token) {
guard.insert(token, vec![send]);
} else {
guard.get_mut(&token).unwrap().push(send);
}
recv
}
#[instrument(skip_all)]
async fn deal_with_messages(
_has_valid_authentication: HasValidSession,
mut kill: oneshot::Receiver<()>,
mut sub: patch_db::Subscriber,
mut stream: WebSocket,
) -> Result<(), Error> {
let mut timer = tokio::time::interval(tokio::time::Duration::from_secs(5));
loop {
futures::select! {
_ = (&mut kill).fuse() => {
tracing::info!("Closing WebSocket: Reason: Session Terminated");
stream
.send(ws::Message::Close(Some(ws::CloseFrame {
code: ws::close_code::ERROR,
reason: "UNAUTHORIZED".into(),
}))).await
.with_kind(ErrorKind::Network)?;
drop(stream);
return Ok(())
}
new_rev = sub.recv().fuse() => {
let rev = new_rev.expect("UNREACHABLE: patch-db is dropped");
stream
.send(ws::Message::Text(serde_json::to_string(&rev).with_kind(ErrorKind::Serialization)?))
.await
.with_kind(ErrorKind::Network)?;
}
message = stream.next().fuse() => {
let message = message.transpose().with_kind(ErrorKind::Network)?;
match message {
None => {
tracing::info!("Closing WebSocket: Stream Finished");
return Ok(())
}
_ => (),
}
}
// This is trying to give a health checks to the home to keep the ui alive.
_ = timer.tick().fuse() => {
stream
.send(ws::Message::Ping(vec![]))
.await
.with_kind(crate::ErrorKind::Network)?;
}
}
}
}
async fn send_dump(
_has_valid_authentication: HasValidSession,
stream: &mut WebSocket,
dump: Dump,
) -> Result<(), Error> {
stream
.send(ws::Message::Text(
serde_json::to_string(&dump).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
Ok(())
}
pub async fn subscribe(
ctx: RpcContext,
headers: HeaderMap,
ws: WebSocketUpgrade,
) -> Result<Response, Error> {
let session = match async {
let token = HashSessionToken::from_header(headers.get(COOKIE))?;
let session = HasValidSession::from_header(headers.get(COOKIE), &ctx).await?;
Ok::<_, Error>((session, token))
}
.await
{
Ok(a) => Some(a),
Err(e) => {
if e.kind != ErrorKind::Authorization {
tracing::error!("Error Authenticating Websocket: {}", e);
tracing::debug!("{:?}", e);
}
None
}
};
Ok(ws.on_upgrade(|ws| async move {
match ws_handler(ctx, session, ws).await {
Ok(()) => (),
Err(e) => {
tracing::error!("WebSocket Closed: {}", e);
tracing::debug!("{:?}", e);
}
}
}))
}
pub fn db<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("dump", from_fn_async(cli_dump).with_display_serializable())
.subcommand("dump", from_fn_async(dump).no_cli())
.subcommand(
"subscribe",
from_fn_async(subscribe)
.with_metadata("get_session", Value::Bool(true))
.no_cli(),
)
.subcommand("put", put::<C>())
.subcommand("apply", from_fn_async(cli_apply).no_display())
.subcommand("apply", from_fn_async(apply).no_cli())
@@ -215,7 +80,13 @@ async fn cli_dump(
context
.call_remote::<RpcContext>(
&method,
imbl_value::json!({ "includePrivate":include_private }),
imbl_value::json!({
"pointer": if include_private {
AsRef::<str>::as_ref(&ROOT)
} else {
AsRef::<str>::as_ref(&*PUBLIC)
}
}),
)
.await?,
)?
@@ -224,25 +95,76 @@ async fn cli_dump(
Ok(dump)
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct DumpParams {
#[arg(long = "include-private", short = 'p')]
#[serde(default)]
#[ts(skip)]
include_private: bool,
#[ts(type = "string | null")]
pointer: Option<JsonPointer>,
}
pub async fn dump(
pub async fn dump(ctx: RpcContext, DumpParams { pointer }: DumpParams) -> Result<Dump, Error> {
Ok(ctx.db.dump(pointer.as_ref().unwrap_or(&*PUBLIC)).await)
}
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct SubscribeParams {
#[ts(type = "string | null")]
pointer: Option<JsonPointer>,
#[ts(skip)]
#[serde(rename = "__auth_session")]
session: InternedString,
}
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct SubscribeRes {
#[ts(type = "{ id: number; value: unknown }")]
pub dump: Dump,
pub guid: Guid,
}
pub async fn subscribe(
ctx: RpcContext,
DumpParams { include_private }: DumpParams,
) -> Result<Dump, Error> {
Ok(if include_private {
ctx.db.dump(&ROOT).await
} else {
ctx.db.dump(&PUBLIC).await
})
SubscribeParams { pointer, session }: SubscribeParams,
) -> Result<SubscribeRes, Error> {
let (dump, mut sub) = ctx
.db
.dump_and_sub(pointer.unwrap_or_else(|| PUBLIC.clone()))
.await;
let guid = Guid::new();
ctx.rpc_continuations
.add(
guid.clone(),
RpcContinuation::ws_authed(
&ctx,
session,
|mut ws| async move {
if let Err(e) = async {
while let Some(rev) = sub.recv().await {
ws.send(ws::Message::Text(
serde_json::to_string(&rev).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error in db websocket: {e}");
tracing::debug!("{e:?}");
}
},
Duration::from_secs(30),
),
)
.await;
Ok(SubscribeRes { dump, guid })
}
#[derive(Deserialize, Serialize, Parser)]

View File

@@ -27,10 +27,6 @@ pub fn diagnostic<C: Context>() -> ParentHandler<C> {
"kernel-logs",
from_fn_async(crate::logs::cli_logs::<DiagnosticContext, Empty>).no_display(),
)
.subcommand(
"exit",
from_fn(exit).no_display().with_call_remote::<CliContext>(),
)
.subcommand(
"restart",
from_fn(restart)
@@ -51,20 +47,15 @@ pub fn error(ctx: DiagnosticContext) -> Result<Arc<RpcError>, Error> {
Ok(ctx.error.clone())
}
pub fn exit(ctx: DiagnosticContext) -> Result<(), Error> {
ctx.shutdown.send(None).expect("receiver dropped");
Ok(())
}
pub fn restart(ctx: DiagnosticContext) -> Result<(), Error> {
ctx.shutdown
.send(Some(Shutdown {
.send(Shutdown {
export_args: ctx
.disk_guid
.clone()
.map(|guid| (guid, ctx.datadir.clone())),
restart: true,
}))
})
.expect("receiver dropped");
Ok(())
}

View File

@@ -13,7 +13,7 @@ use crate::disk::mount::util::unmount;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
pub const PASSWORD_PATH: &'static str = "/run/embassy/password";
pub const PASSWORD_PATH: &'static str = "/run/startos/password";
pub const DEFAULT_PASSWORD: &'static str = "password";
pub const MAIN_FS_SIZE: FsSize = FsSize::Gigabytes(8);

View File

@@ -9,6 +9,7 @@ use tokio::process::Command;
use crate::disk::fsck::RequiresReboot;
use crate::prelude::*;
use crate::progress::PhaseProgressTrackerHandle;
use crate::util::Invoke;
use crate::PLATFORM;
@@ -49,12 +50,7 @@ pub fn display_firmware_update_result(result: RequiresReboot) {
}
}
/// We wanted to make sure during every init
/// that the firmware was the correct and updated for
/// systems like the Pure System that a new firmware
/// was released and the updates where pushed through the pure os.
// #[command(rename = "update-firmware", display(display_firmware_update_result))]
pub async fn update_firmware() -> Result<RequiresReboot, Error> {
pub async fn check_for_firmware_update() -> Result<Option<Firmware>, Error> {
let system_product_name = String::from_utf8(
Command::new("dmidecode")
.arg("-s")
@@ -74,22 +70,21 @@ pub async fn update_firmware() -> Result<RequiresReboot, Error> {
.trim()
.to_owned();
if system_product_name.is_empty() || bios_version.is_empty() {
return Ok(RequiresReboot(false));
return Ok(None);
}
let firmware_dir = Path::new("/usr/lib/startos/firmware");
for firmware in serde_json::from_str::<Vec<Firmware>>(
&tokio::fs::read_to_string("/usr/lib/startos/firmware.json").await?,
)
.with_kind(ErrorKind::Deserialization)?
{
let id = firmware.id;
let matches_product_name = firmware
.system_product_name
.map_or(true, |spn| spn == system_product_name);
.as_ref()
.map_or(true, |spn| spn == &system_product_name);
let matches_bios_version = firmware
.bios_version
.as_ref()
.map_or(Some(true), |bv| {
let mut semver_str = bios_version.as_str();
if let Some(prefix) = &bv.semver_prefix {
@@ -113,35 +108,45 @@ pub async fn update_firmware() -> Result<RequiresReboot, Error> {
})
.unwrap_or(false);
if firmware.platform.contains(&*PLATFORM) && matches_product_name && matches_bios_version {
let filename = format!("{id}.rom.gz");
let firmware_path = firmware_dir.join(&filename);
Command::new("sha256sum")
.arg("-c")
.input(Some(&mut std::io::Cursor::new(format!(
"{} {}",
firmware.shasum,
firmware_path.display()
))))
.invoke(ErrorKind::Filesystem)
.await?;
let mut rdr = if tokio::fs::metadata(&firmware_path).await.is_ok() {
GzipDecoder::new(BufReader::new(File::open(&firmware_path).await?))
} else {
return Err(Error::new(
eyre!("Firmware {id}.rom.gz not found in {firmware_dir:?}"),
ErrorKind::NotFound,
));
};
Command::new("flashrom")
.arg("-p")
.arg("internal")
.arg("-w-")
.input(Some(&mut rdr))
.invoke(ErrorKind::Firmware)
.await?;
return Ok(RequiresReboot(true));
return Ok(Some(firmware));
}
}
Ok(RequiresReboot(false))
Ok(None)
}
/// We wanted to make sure during every init
/// that the firmware was the correct and updated for
/// systems like the Pure System that a new firmware
/// was released and the updates where pushed through the pure os.
pub async fn update_firmware(firmware: Firmware) -> Result<(), Error> {
let id = &firmware.id;
let firmware_dir = Path::new("/usr/lib/startos/firmware");
let filename = format!("{id}.rom.gz");
let firmware_path = firmware_dir.join(&filename);
Command::new("sha256sum")
.arg("-c")
.input(Some(&mut std::io::Cursor::new(format!(
"{} {}",
firmware.shasum,
firmware_path.display()
))))
.invoke(ErrorKind::Filesystem)
.await?;
let mut rdr = if tokio::fs::metadata(&firmware_path).await.is_ok() {
GzipDecoder::new(BufReader::new(File::open(&firmware_path).await?))
} else {
return Err(Error::new(
eyre!("Firmware {id}.rom.gz not found in {firmware_dir:?}"),
ErrorKind::NotFound,
));
};
Command::new("flashrom")
.arg("-p")
.arg("internal")
.arg("-w-")
.input(Some(&mut rdr))
.invoke(ErrorKind::Firmware)
.await?;
Ok(())
}

View File

@@ -1,25 +1,40 @@
use std::fs::Permissions;
use std::io::Cursor;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::time::{Duration, SystemTime};
use axum::extract::ws::{self, CloseFrame};
use color_eyre::eyre::eyre;
use futures::{StreamExt, TryStreamExt};
use itertools::Itertools;
use models::ResultExt;
use rand::random;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use tracing::instrument;
use ts_rs::TS;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig;
use crate::context::{CliContext, InitContext};
use crate::db::model::public::ServerStatus;
use crate::db::model::Database;
use crate::disk::mount::util::unmount;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::net::net_controller::PreInitNetController;
use crate::prelude::*;
use crate::progress::{
FullProgress, FullProgressTracker, PhaseProgressTrackerHandle, PhasedProgressBar,
};
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::ssh::SSH_AUTHORIZED_KEYS_FILE;
use crate::util::cpupower::{get_available_governors, get_preferred_governor, set_governor};
use crate::util::Invoke;
use crate::{Error, ARCH};
use crate::util::io::IOHook;
use crate::util::net::WebSocketExt;
use crate::util::{cpupower, Invoke};
use crate::Error;
pub const SYSTEM_REBUILD_PATH: &str = "/media/startos/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/media/startos/config/standby";
@@ -180,14 +195,114 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
}
pub struct InitResult {
pub db: TypedPatchDb<Database>,
pub net_ctrl: PreInitNetController,
}
pub struct InitPhases {
preinit: Option<PhaseProgressTrackerHandle>,
local_auth: PhaseProgressTrackerHandle,
load_database: PhaseProgressTrackerHandle,
load_ssh_keys: PhaseProgressTrackerHandle,
start_net: PhaseProgressTrackerHandle,
mount_logs: PhaseProgressTrackerHandle,
load_ca_cert: PhaseProgressTrackerHandle,
load_wifi: PhaseProgressTrackerHandle,
init_tmp: PhaseProgressTrackerHandle,
set_governor: PhaseProgressTrackerHandle,
sync_clock: PhaseProgressTrackerHandle,
enable_zram: PhaseProgressTrackerHandle,
update_server_info: PhaseProgressTrackerHandle,
launch_service_network: PhaseProgressTrackerHandle,
run_migrations: PhaseProgressTrackerHandle,
validate_db: PhaseProgressTrackerHandle,
postinit: Option<PhaseProgressTrackerHandle>,
}
impl InitPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
Self {
preinit: if Path::new("/media/startos/config/preinit.sh").exists() {
Some(handle.add_phase("Running preinit.sh".into(), Some(5)))
} else {
None
},
local_auth: handle.add_phase("Enabling local authentication".into(), Some(1)),
load_database: handle.add_phase("Loading database".into(), Some(5)),
load_ssh_keys: handle.add_phase("Loading SSH Keys".into(), Some(1)),
start_net: handle.add_phase("Starting network controller".into(), Some(1)),
mount_logs: handle.add_phase("Switching logs to write to data drive".into(), Some(1)),
load_ca_cert: handle.add_phase("Loading CA certificate".into(), Some(1)),
load_wifi: handle.add_phase("Loading WiFi configuration".into(), Some(1)),
init_tmp: handle.add_phase("Initializing temporary files".into(), Some(1)),
set_governor: handle.add_phase("Setting CPU performance profile".into(), Some(1)),
sync_clock: handle.add_phase("Synchronizing system clock".into(), Some(10)),
enable_zram: handle.add_phase("Enabling ZRAM".into(), Some(1)),
update_server_info: handle.add_phase("Updating server info".into(), Some(1)),
launch_service_network: handle.add_phase("Launching service intranet".into(), Some(10)),
run_migrations: handle.add_phase("Running migrations".into(), Some(10)),
validate_db: handle.add_phase("Validating database".into(), Some(1)),
postinit: if Path::new("/media/startos/config/postinit.sh").exists() {
Some(handle.add_phase("Running postinit.sh".into(), Some(5)))
} else {
None
},
}
}
}
pub async fn run_script<P: AsRef<Path>>(path: P, mut progress: PhaseProgressTrackerHandle) {
let script = path.as_ref();
progress.start();
if let Err(e) = async {
let script = tokio::fs::read_to_string(script).await?;
progress.set_total(script.as_bytes().iter().filter(|b| **b == b'\n').count() as u64);
let mut reader = IOHook::new(Cursor::new(script.as_bytes()));
reader.post_read(|buf| progress += buf.iter().filter(|b| **b == b'\n').count() as u64);
Command::new("/bin/bash")
.input(Some(&mut reader))
.invoke(ErrorKind::Unknown)
.await?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error Running {}: {}", script.display(), e);
tracing::debug!("{:?}", e);
}
progress.complete();
}
#[instrument(skip_all)]
pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
tokio::fs::create_dir_all("/run/embassy")
pub async fn init(
cfg: &ServerConfig,
InitPhases {
preinit,
mut local_auth,
mut load_database,
mut load_ssh_keys,
mut start_net,
mut mount_logs,
mut load_ca_cert,
mut load_wifi,
mut init_tmp,
mut set_governor,
mut sync_clock,
mut enable_zram,
mut update_server_info,
mut launch_service_network,
run_migrations,
mut validate_db,
postinit,
}: InitPhases,
) -> Result<InitResult, Error> {
if let Some(progress) = preinit {
run_script("/media/startos/config/preinit.sh", progress).await;
}
local_auth.start();
tokio::fs::create_dir_all("/run/startos")
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, "mkdir -p /run/embassy"))?;
.with_ctx(|_| (crate::ErrorKind::Filesystem, "mkdir -p /run/startos"))?;
if tokio::fs::metadata(LOCAL_AUTH_COOKIE_PATH).await.is_err() {
tokio::fs::write(
LOCAL_AUTH_COOKIE_PATH,
@@ -207,43 +322,41 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
local_auth.complete();
load_database.start();
let db = TypedPatchDb::<Database>::load_unchecked(cfg.db().await?);
let peek = db.peek().await;
load_database.complete();
tracing::info!("Opened PatchDB");
load_ssh_keys.start();
crate::ssh::sync_keys(
&peek.as_private().as_ssh_pubkeys().de()?,
SSH_AUTHORIZED_KEYS_FILE,
)
.await?;
load_ssh_keys.complete();
tracing::info!("Synced SSH Keys");
let account = AccountInfo::load(&peek)?;
let mut server_info = peek.as_public().as_server_info().de()?;
// write to ca cert store
tokio::fs::write(
"/usr/local/share/ca-certificates/startos-root-ca.crt",
account.root_ca_cert.to_pem()?,
start_net.start();
let net_ctrl = PreInitNetController::init(
db.clone(),
cfg.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
cfg.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
))),
&account.hostname,
account.tor_key,
)
.await?;
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
&mut server_info.wifi,
)
.await?;
tracing::info!("Synchronized WiFi");
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok()
|| &*server_info.version < &emver::Version::new(0, 3, 2, 0)
|| (ARCH == "x86_64" && &*server_info.version < &emver::Version::new(0, 3, 4, 0));
start_net.complete();
mount_logs.start();
let log_dir = cfg.datadir().join("main/logs");
if tokio::fs::metadata(&log_dir).await.is_err() {
tokio::fs::create_dir_all(&log_dir).await?;
@@ -272,10 +385,35 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
.arg("systemd-journald")
.invoke(crate::ErrorKind::Journald)
.await?;
mount_logs.complete();
tracing::info!("Mounted Logs");
let mut server_info = peek.as_public().as_server_info().de()?;
load_ca_cert.start();
// write to ca cert store
tokio::fs::write(
"/usr/local/share/ca-certificates/startos-root-ca.crt",
account.root_ca_cert.to_pem()?,
)
.await?;
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
load_ca_cert.complete();
load_wifi.start();
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
&mut server_info.wifi,
)
.await?;
load_wifi.complete();
tracing::info!("Synchronized WiFi");
init_tmp.start();
let tmp_dir = cfg.datadir().join("package-data/tmp");
if should_rebuild && tokio::fs::metadata(&tmp_dir).await.is_ok() {
if tokio::fs::metadata(&tmp_dir).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_dir).await?;
}
if tokio::fs::metadata(&tmp_dir).await.is_err() {
@@ -286,23 +424,30 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
tokio::fs::remove_dir_all(&tmp_var).await?;
}
crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?;
init_tmp.complete();
set_governor.start();
let governor = if let Some(governor) = &server_info.governor {
if get_available_governors().await?.contains(governor) {
if cpupower::get_available_governors()
.await?
.contains(governor)
{
Some(governor)
} else {
tracing::warn!("CPU Governor \"{governor}\" Not Available");
None
}
} else {
get_preferred_governor().await?
cpupower::get_preferred_governor().await?
};
if let Some(governor) = governor {
tracing::info!("Setting CPU Governor to \"{governor}\"");
set_governor(governor).await?;
cpupower::set_governor(governor).await?;
tracing::info!("Set CPU Governor");
}
set_governor.complete();
sync_clock.start();
server_info.ntp_synced = false;
let mut not_made_progress = 0u32;
for _ in 0..1800 {
@@ -329,10 +474,15 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
} else {
tracing::info!("Syncronized system clock");
}
sync_clock.complete();
enable_zram.start();
if server_info.zram {
crate::system::enable_zram().await?
}
enable_zram.complete();
update_server_info.start();
server_info.ip_info = crate::net::dhcp::init_ips().await?;
server_info.status_info = ServerStatus {
updated: false,
@@ -341,36 +491,129 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
shutting_down: false,
restarting: false,
};
db.mutate(|v| {
v.as_public_mut().as_server_info_mut().ser(&server_info)?;
Ok(())
})
.await?;
update_server_info.complete();
launch_service_network.start();
Command::new("systemctl")
.arg("start")
.arg("lxc-net.service")
.invoke(ErrorKind::Lxc)
.await?;
launch_service_network.complete();
crate::version::init(&db).await?;
crate::version::init(&db, run_migrations).await?;
validate_db.start();
db.mutate(|d| {
let model = d.de()?;
d.ser(&model)
})
.await?;
validate_db.complete();
if should_rebuild {
match tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await {
Ok(()) => Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
Err(e) => Err(e),
}?;
if let Some(progress) = postinit {
run_script("/media/startos/config/postinit.sh", progress).await;
}
tracing::info!("System initialized.");
Ok(InitResult { db })
Ok(InitResult { net_ctrl })
}
pub fn init_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("logs", crate::system::logs::<InitContext>())
.subcommand(
"logs",
from_fn_async(crate::logs::cli_logs::<InitContext, Empty>).no_display(),
)
.subcommand("kernel-logs", crate::system::kernel_logs::<InitContext>())
.subcommand(
"kernel-logs",
from_fn_async(crate::logs::cli_logs::<InitContext, Empty>).no_display(),
)
.subcommand("subscribe", from_fn_async(init_progress).no_cli())
.subcommand("subscribe", from_fn_async(cli_init_progress).no_display())
}
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct InitProgressRes {
pub progress: FullProgress,
pub guid: Guid,
}
pub async fn init_progress(ctx: InitContext) -> Result<InitProgressRes, Error> {
let progress_tracker = ctx.progress.clone();
let progress = progress_tracker.snapshot();
let guid = Guid::new();
ctx.rpc_continuations
.add(
guid.clone(),
RpcContinuation::ws(
|mut ws| async move {
if let Err(e) = async {
let mut stream = progress_tracker.stream(Some(Duration::from_millis(100)));
while let Some(progress) = stream.next().await {
ws.send(ws::Message::Text(
serde_json::to_string(&progress)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
if progress.overall.is_complete() {
break;
}
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("error in init progress websocket: {e}");
tracing::debug!("{e:?}");
}
},
Duration::from_secs(30),
),
)
.await;
Ok(InitProgressRes { progress, guid })
}
pub async fn cli_init_progress(
HandlerArgs {
context: ctx,
parent_method,
method,
raw_params,
..
}: HandlerArgs<CliContext>,
) -> Result<(), Error> {
let res: InitProgressRes = from_value(
ctx.call_remote::<InitContext>(
&parent_method
.into_iter()
.chain(method.into_iter())
.join("."),
raw_params,
)
.await?,
)?;
let mut ws = ctx.ws_continuation(res.guid).await?;
let mut bar = PhasedProgressBar::new("Initializing...");
while let Some(msg) = ws.try_next().await.with_kind(ErrorKind::Network)? {
if let tokio_tungstenite::tungstenite::Message::Text(msg) = msg {
bar.update(&serde_json::from_str(&msg).with_kind(ErrorKind::Deserialization)?);
}
}
Ok(())
}

View File

@@ -6,7 +6,8 @@ use clap::builder::ValueParserFactory;
use clap::{value_parser, CommandFactory, FromArgMatches, Parser};
use color_eyre::eyre::eyre;
use emver::VersionRange;
use futures::{FutureExt, StreamExt};
use futures::StreamExt;
use imbl_value::InternedString;
use itertools::Itertools;
use patch_db::json_ptr::JsonPointer;
use reqwest::header::{HeaderMap, CONTENT_LENGTH};
@@ -29,6 +30,7 @@ use crate::s9pk::merkle_archive::source::http::HttpSource;
use crate::s9pk::S9pk;
use crate::upload::upload;
use crate::util::clap::FromStrParser;
use crate::util::net::WebSocketExt;
use crate::util::Never;
pub const PKG_ARCHIVE_DIR: &str = "package-data/archive";
@@ -170,7 +172,15 @@ pub async fn install(
Ok(())
}
#[derive(Deserialize, Serialize)]
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct SideloadParams {
#[ts(skip)]
#[serde(rename = "__auth_session")]
session: InternedString,
}
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct SideloadResponse {
pub upload: Guid,
@@ -178,8 +188,11 @@ pub struct SideloadResponse {
}
#[instrument(skip_all)]
pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
let (upload, file) = upload(&ctx).await?;
pub async fn sideload(
ctx: RpcContext,
SideloadParams { session }: SideloadParams,
) -> Result<SideloadResponse, Error> {
let (upload, file) = upload(&ctx, session.clone()).await?;
let (id_send, id_recv) = oneshot::channel();
let (err_send, err_recv) = oneshot::channel();
let progress = Guid::new();
@@ -193,8 +206,8 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
.await;
ctx.rpc_continuations.add(
progress.clone(),
RpcContinuation::ws(
Box::new(|mut ws| {
RpcContinuation::ws_authed(&ctx, session,
|mut ws| {
use axum::extract::ws::Message;
async move {
if let Err(e) = async {
@@ -251,7 +264,7 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
}
}
ws.close().await.with_kind(ErrorKind::Network)?;
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
@@ -261,8 +274,7 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
tracing::debug!("{e:?}");
}
}
.boxed()
}),
},
Duration::from_secs(600),
),
)

View File

@@ -1,8 +1,5 @@
pub const DEFAULT_MARKETPLACE: &str = "https://registry.start9.com";
// pub const COMMUNITY_MARKETPLACE: &str = "https://community-registry.start9.com";
pub const CAP_1_KiB: usize = 1024;
pub const CAP_1_MiB: usize = CAP_1_KiB * CAP_1_KiB;
pub const CAP_10_MiB: usize = 10 * CAP_1_MiB;
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1];
pub use std::env::consts::ARCH;
lazy_static::lazy_static! {
@@ -18,6 +15,15 @@ lazy_static::lazy_static! {
};
}
mod cap {
#![allow(non_upper_case_globals)]
pub const CAP_1_KiB: usize = 1024;
pub const CAP_1_MiB: usize = CAP_1_KiB * CAP_1_KiB;
pub const CAP_10_MiB: usize = 10 * CAP_1_MiB;
}
pub use cap::*;
pub mod account;
pub mod action;
pub mod auth;
@@ -75,13 +81,17 @@ use rpc_toolkit::{
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::{CliContext, DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::context::{
CliContext, DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext,
};
use crate::disk::fsck::RequiresReboot;
use crate::registry::context::{RegistryContext, RegistryUrlParams};
use crate::util::serde::HandlerExtSerde;
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
#[ts(export)]
pub struct EchoParams {
message: String,
}
@@ -90,6 +100,20 @@ pub fn echo<C: Context>(_: C, EchoParams { message }: EchoParams) -> Result<Stri
Ok(message)
}
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub enum ApiState {
Error,
Initializing,
Running,
}
impl std::fmt::Display for ApiState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Debug::fmt(&self, f)
}
}
pub fn main_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand::<C, _>("git-info", from_fn(version::git_info))
@@ -99,6 +123,12 @@ pub fn main_api<C: Context>() -> ParentHandler<C> {
.with_metadata("authenticated", Value::Bool(false))
.with_call_remote::<CliContext>(),
)
.subcommand(
"state",
from_fn(|_: RpcContext| Ok::<_, Error>(ApiState::Running))
.with_metadata("authenticated", Value::Bool(false))
.with_call_remote::<CliContext>(),
)
.subcommand("server", server::<C>())
.subcommand("package", package::<C>())
.subcommand("net", net::net::<C>())
@@ -179,11 +209,18 @@ pub fn server<C: Context>() -> ParentHandler<C> {
)
.subcommand(
"update-firmware",
from_fn_async(|_: RpcContext| firmware::update_firmware())
.with_custom_display_fn(|_handle, result| {
Ok(firmware::display_firmware_update_result(result))
})
.with_call_remote::<CliContext>(),
from_fn_async(|_: RpcContext| async {
if let Some(firmware) = firmware::check_for_firmware_update().await? {
firmware::update_firmware(firmware).await?;
Ok::<_, Error>(RequiresReboot(true))
} else {
Ok(RequiresReboot(false))
}
})
.with_custom_display_fn(|_handle, result| {
Ok(firmware::display_firmware_update_result(result))
})
.with_call_remote::<CliContext>(),
)
}
@@ -204,7 +241,12 @@ pub fn package<C: Context>() -> ParentHandler<C> {
.with_metadata("sync_db", Value::Bool(true))
.no_cli(),
)
.subcommand("sideload", from_fn_async(install::sideload).no_cli())
.subcommand(
"sideload",
from_fn_async(install::sideload)
.with_metadata("get_session", Value::Bool(true))
.no_cli(),
)
.subcommand("install", from_fn_async(install::cli_install).no_display())
.subcommand(
"uninstall",
@@ -273,9 +315,34 @@ pub fn diagnostic_api() -> ParentHandler<DiagnosticContext> {
"echo",
from_fn(echo::<DiagnosticContext>).with_call_remote::<CliContext>(),
)
.subcommand(
"state",
from_fn(|_: DiagnosticContext| Ok::<_, Error>(ApiState::Error))
.with_metadata("authenticated", Value::Bool(false))
.with_call_remote::<CliContext>(),
)
.subcommand("diagnostic", diagnostic::diagnostic::<DiagnosticContext>())
}
pub fn init_api() -> ParentHandler<InitContext> {
ParentHandler::new()
.subcommand::<InitContext, _>(
"git-info",
from_fn(version::git_info).with_metadata("authenticated", Value::Bool(false)),
)
.subcommand(
"echo",
from_fn(echo::<InitContext>).with_call_remote::<CliContext>(),
)
.subcommand(
"state",
from_fn(|_: InitContext| Ok::<_, Error>(ApiState::Initializing))
.with_metadata("authenticated", Value::Bool(false))
.with_call_remote::<CliContext>(),
)
.subcommand("init", init::init_api::<InitContext>())
}
pub fn setup_api() -> ParentHandler<SetupContext> {
ParentHandler::new()
.subcommand::<SetupContext, _>(

View File

@@ -7,7 +7,7 @@ use std::time::Duration;
use clap::builder::ValueParserFactory;
use clap::Parser;
use futures::{AsyncWriteExt, FutureExt, StreamExt};
use futures::{AsyncWriteExt, StreamExt};
use imbl_value::{InOMap, InternedString};
use models::InvalidId;
use rpc_toolkit::yajrc::{RpcError, RpcResponse};
@@ -456,51 +456,49 @@ pub async fn connect(ctx: &RpcContext, container: &LxcContainer) -> Result<Guid,
.add(
guid.clone(),
RpcContinuation::ws(
Box::new(|mut ws| {
async move {
if let Err(e) = async {
loop {
match ws.next().await {
None => break,
Some(Ok(Message::Text(txt))) => {
let mut id = None;
let result = async {
let req: RpcRequest = serde_json::from_str(&txt)
.map_err(|e| RpcError {
data: Some(serde_json::Value::String(
e.to_string(),
)),
..rpc_toolkit::yajrc::PARSE_ERROR
})?;
id = req.id;
rpc.request(req.method, req.params).await
}
.await;
ws.send(Message::Text(
serde_json::to_string(
&RpcResponse::<GenericRpcMethod> { id, result },
)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
Some(Ok(_)) => (),
Some(Err(e)) => {
return Err(Error::new(e, ErrorKind::Network));
|mut ws| async move {
if let Err(e) = async {
loop {
match ws.next().await {
None => break,
Some(Ok(Message::Text(txt))) => {
let mut id = None;
let result = async {
let req: RpcRequest =
serde_json::from_str(&txt).map_err(|e| RpcError {
data: Some(serde_json::Value::String(
e.to_string(),
)),
..rpc_toolkit::yajrc::PARSE_ERROR
})?;
id = req.id;
rpc.request(req.method, req.params).await
}
.await;
ws.send(Message::Text(
serde_json::to_string(&RpcResponse::<GenericRpcMethod> {
id,
result,
})
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
Some(Ok(_)) => (),
Some(Err(e)) => {
return Err(Error::new(e, ErrorKind::Network));
}
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("{e}");
tracing::debug!("{e:?}");
}
Ok::<_, Error>(())
}
.boxed()
}),
.await
{
tracing::error!("{e}");
tracing::debug!("{e:?}");
}
},
Duration::from_secs(30),
),
)

View File

@@ -23,7 +23,7 @@ use tokio::sync::Mutex;
use crate::context::RpcContext;
use crate::prelude::*;
pub const LOCAL_AUTH_COOKIE_PATH: &str = "/run/embassy/rpc.authcookie";
pub const LOCAL_AUTH_COOKIE_PATH: &str = "/run/startos/rpc.authcookie";
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
@@ -48,19 +48,9 @@ impl HasLoggedOutSessions {
.into_iter()
.map(|s| s.as_logout_session_id())
.collect();
ctx.open_authed_websockets
.lock()
.await
.retain(|session, sockets| {
if to_log_out.contains(session.hashed()) {
for socket in std::mem::take(sockets) {
let _ = socket.send(());
}
false
} else {
true
}
});
for sid in &to_log_out {
ctx.open_authed_continuations.kill(sid)
}
ctx.db
.mutate(|db| {
let sessions = db.as_private_mut().as_sessions_mut();

View File

@@ -1,42 +0,0 @@
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::{Empty, Middleware, RpcRequest, RpcResponse};
use crate::context::DiagnosticContext;
use crate::prelude::*;
#[derive(Clone)]
pub struct DiagnosticMode {
method: Option<String>,
}
impl DiagnosticMode {
pub fn new() -> Self {
Self { method: None }
}
}
impl Middleware<DiagnosticContext> for DiagnosticMode {
type Metadata = Empty;
async fn process_rpc_request(
&mut self,
_: &DiagnosticContext,
_: Self::Metadata,
request: &mut RpcRequest,
) -> Result<(), RpcResponse> {
self.method = Some(request.method.as_str().to_owned());
Ok(())
}
async fn process_rpc_response(&mut self, _: &DiagnosticContext, response: &mut RpcResponse) {
if let Err(e) = &mut response.result {
if e.code == -32601 {
*e = Error::new(
eyre!(
"{} is not available on the Diagnostic API",
self.method.as_ref().map(|s| s.as_str()).unwrap_or_default()
),
crate::ErrorKind::DiagnosticMode,
)
.into();
}
}
}
}

View File

@@ -1,4 +1,3 @@
pub mod auth;
pub mod cors;
pub mod db;
pub mod diagnostic;

View File

@@ -23,22 +23,18 @@ use crate::prelude::*;
use crate::util::serde::MaybeUtf8String;
use crate::HOST_IP;
pub struct NetController {
db: TypedPatchDb<Database>,
pub(super) tor: TorController,
pub(super) vhost: VHostController,
pub(super) dns: DnsController,
pub(super) forward: LanPortForwardController,
pub(super) os_bindings: Vec<Arc<()>>,
pub struct PreInitNetController {
pub db: TypedPatchDb<Database>,
tor: TorController,
vhost: VHostController,
os_bindings: Vec<Arc<()>>,
}
impl NetController {
impl PreInitNetController {
#[instrument(skip_all)]
pub async fn init(
db: TypedPatchDb<Database>,
tor_control: SocketAddr,
tor_socks: SocketAddr,
dns_bind: &[SocketAddr],
hostname: &Hostname,
os_tor_key: TorSecretKeyV3,
) -> Result<Self, Error> {
@@ -46,8 +42,6 @@ impl NetController {
db: db.clone(),
tor: TorController::new(tor_control, tor_socks),
vhost: VHostController::new(db),
dns: DnsController::init(dns_bind).await?,
forward: LanPortForwardController::new(),
os_bindings: Vec::new(),
};
res.add_os_bindings(hostname, os_tor_key).await?;
@@ -73,8 +67,6 @@ impl NetController {
alpn.clone(),
)
.await?;
self.os_bindings
.push(self.dns.add(None, HOST_IP.into()).await?);
// LAN IP
self.os_bindings.push(
@@ -142,6 +134,39 @@ impl NetController {
Ok(())
}
}
pub struct NetController {
db: TypedPatchDb<Database>,
pub(super) tor: TorController,
pub(super) vhost: VHostController,
pub(super) dns: DnsController,
pub(super) forward: LanPortForwardController,
pub(super) os_bindings: Vec<Arc<()>>,
}
impl NetController {
pub async fn init(
PreInitNetController {
db,
tor,
vhost,
os_bindings,
}: PreInitNetController,
dns_bind: &[SocketAddr],
) -> Result<Self, Error> {
let mut res = Self {
db,
tor,
vhost,
dns: DnsController::init(dns_bind).await?,
forward: LanPortForwardController::new(),
os_bindings,
};
res.os_bindings
.push(res.dns.add(None, HOST_IP.into()).await?);
Ok(res)
}
#[instrument(skip_all)]
pub async fn create_service(

View File

@@ -0,0 +1,11 @@
<html>
<head>
<title>StartOS: Loading...</title>
<script>
setTimeout(window.location.reload, 1000)
</script>
</head>
<body>
Loading...
</body>
</html>

View File

@@ -1,4 +1,3 @@
use std::fs::Metadata;
use std::future::Future;
use std::path::{Path, PathBuf};
use std::time::UNIX_EPOCH;
@@ -13,25 +12,26 @@ use digest::Digest;
use futures::future::ready;
use http::header::ACCEPT_ENCODING;
use http::request::Parts as RequestParts;
use http::{HeaderMap, Method, StatusCode};
use http::{Method, StatusCode};
use imbl_value::InternedString;
use include_dir::Dir;
use new_mime_guess::MimeGuess;
use openssl::hash::MessageDigest;
use openssl::x509::X509;
use rpc_toolkit::Server;
use rpc_toolkit::{Context, HttpServer, Server};
use tokio::fs::File;
use tokio::io::BufReader;
use tokio_util::io::ReaderStream;
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::db::subscribe;
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::hostname::Hostname;
use crate::middleware::auth::{Auth, HasValidSession};
use crate::middleware::cors::Cors;
use crate::middleware::db::SyncDb;
use crate::middleware::diagnostic::DiagnosticMode;
use crate::rpc_continuations::Guid;
use crate::{diagnostic_api, install_api, main_api, setup_api, Error, ErrorKind, ResultExt};
use crate::rpc_continuations::{Guid, RpcContinuations};
use crate::{
diagnostic_api, init_api, install_api, main_api, setup_api, Error, ErrorKind, ResultExt,
};
const NOT_FOUND: &[u8] = b"Not Found";
const METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
@@ -49,7 +49,6 @@ const PROXY_STRIP_HEADERS: &[&str] = &["cookie", "host", "origin", "referer", "u
#[derive(Clone)]
pub enum UiMode {
Setup,
Diag,
Install,
Main,
}
@@ -58,128 +57,46 @@ impl UiMode {
fn path(&self, path: &str) -> PathBuf {
match self {
Self::Setup => Path::new("setup-wizard").join(path),
Self::Diag => Path::new("diagnostic-ui").join(path),
Self::Install => Path::new("install-wizard").join(path),
Self::Main => Path::new("ui").join(path),
}
}
}
pub fn setup_ui_file_router(ctx: SetupContext) -> Router {
Router::new()
.route_service(
"/rpc/*path",
post(Server::new(move || ready(Ok(ctx.clone())), setup_api()).middleware(Cors::new())),
)
.fallback(any(|request: Request| async move {
alt_ui(request, UiMode::Setup)
.await
.unwrap_or_else(server_error)
}))
}
pub fn diag_ui_file_router(ctx: DiagnosticContext) -> Router {
pub fn rpc_router<C: Context + Clone + AsRef<RpcContinuations>>(
ctx: C,
server: HttpServer<C>,
) -> Router {
Router::new()
.route("/rpc/*path", post(server))
.route(
"/rpc/*path",
post(
Server::new(move || ready(Ok(ctx.clone())), diagnostic_api())
.middleware(Cors::new())
.middleware(DiagnosticMode::new()),
),
)
.fallback(any(|request: Request| async move {
alt_ui(request, UiMode::Diag)
.await
.unwrap_or_else(server_error)
}))
}
pub fn install_ui_file_router(ctx: InstallContext) -> Router {
Router::new()
.route("/rpc/*path", {
let ctx = ctx.clone();
post(Server::new(move || ready(Ok(ctx.clone())), install_api()).middleware(Cors::new()))
})
.fallback(any(|request: Request| async move {
alt_ui(request, UiMode::Install)
.await
.unwrap_or_else(server_error)
}))
}
pub fn main_ui_server_router(ctx: RpcContext) -> Router {
Router::new()
.route("/rpc/*path", {
let ctx = ctx.clone();
post(
Server::new(move || ready(Ok(ctx.clone())), main_api::<RpcContext>())
.middleware(Cors::new())
.middleware(Auth::new())
.middleware(SyncDb::new()),
)
})
.route(
"/ws/db",
any({
let ctx = ctx.clone();
move |headers: HeaderMap, ws: x::WebSocketUpgrade| async move {
subscribe(ctx, headers, ws)
.await
.unwrap_or_else(server_error)
}
}),
)
.route(
"/ws/rpc/*path",
"/ws/rpc/:guid",
get({
let ctx = ctx.clone();
move |x::Path(path): x::Path<String>,
move |x::Path(guid): x::Path<Guid>,
ws: axum::extract::ws::WebSocketUpgrade| async move {
match Guid::from(&path) {
None => {
tracing::debug!("No Guid Path");
bad_request()
}
Some(guid) => match ctx.rpc_continuations.get_ws_handler(&guid).await {
Some(cont) => ws.on_upgrade(cont),
_ => not_found(),
},
match AsRef::<RpcContinuations>::as_ref(&ctx).get_ws_handler(&guid).await {
Some(cont) => ws.on_upgrade(cont),
_ => not_found(),
}
}
}),
)
.route(
"/rest/rpc/*path",
"/rest/rpc/:guid",
any({
let ctx = ctx.clone();
move |request: x::Request| async move {
let path = request
.uri()
.path()
.strip_prefix("/rest/rpc/")
.unwrap_or_default();
match Guid::from(&path) {
None => {
tracing::debug!("No Guid Path");
bad_request()
}
Some(guid) => match ctx.rpc_continuations.get_rest_handler(&guid).await {
None => not_found(),
Some(cont) => cont(request).await.unwrap_or_else(server_error),
},
move |x::Path(guid): x::Path<Guid>, request: x::Request| async move {
match AsRef::<RpcContinuations>::as_ref(&ctx).get_rest_handler(&guid).await {
None => not_found(),
Some(cont) => cont(request).await.unwrap_or_else(server_error),
}
}
}),
)
.fallback(any(move |request: Request| async move {
main_start_os_ui(request, ctx)
.await
.unwrap_or_else(server_error)
}))
}
async fn alt_ui(req: Request, ui_mode: UiMode) -> Result<Response, Error> {
fn serve_ui(req: Request, ui_mode: UiMode) -> Result<Response, Error> {
let (request_parts, _body) = req.into_parts();
match &request_parts.method {
&Method::GET => {
@@ -196,9 +113,7 @@ async fn alt_ui(req: Request, ui_mode: UiMode) -> Result<Response, Error> {
.or_else(|| EMBEDDED_UIS.get_file(&*ui_mode.path("index.html")));
if let Some(file) = file {
FileData::from_embedded(&request_parts, file)
.into_response(&request_parts)
.await
FileData::from_embedded(&request_parts, file).into_response(&request_parts)
} else {
Ok(not_found())
}
@@ -207,6 +122,75 @@ async fn alt_ui(req: Request, ui_mode: UiMode) -> Result<Response, Error> {
}
}
pub fn setup_ui_router(ctx: SetupContext) -> Router {
rpc_router(
ctx.clone(),
Server::new(move || ready(Ok(ctx.clone())), setup_api()).middleware(Cors::new()),
)
.fallback(any(|request: Request| async move {
serve_ui(request, UiMode::Setup).unwrap_or_else(server_error)
}))
}
pub fn diagnostic_ui_router(ctx: DiagnosticContext) -> Router {
rpc_router(
ctx.clone(),
Server::new(move || ready(Ok(ctx.clone())), diagnostic_api()).middleware(Cors::new()),
)
.fallback(any(|request: Request| async move {
serve_ui(request, UiMode::Main).unwrap_or_else(server_error)
}))
}
pub fn install_ui_router(ctx: InstallContext) -> Router {
rpc_router(
ctx.clone(),
Server::new(move || ready(Ok(ctx.clone())), install_api()).middleware(Cors::new()),
)
.fallback(any(|request: Request| async move {
serve_ui(request, UiMode::Install).unwrap_or_else(server_error)
}))
}
pub fn init_ui_router(ctx: InitContext) -> Router {
rpc_router(
ctx.clone(),
Server::new(move || ready(Ok(ctx.clone())), init_api()).middleware(Cors::new()),
)
.fallback(any(|request: Request| async move {
serve_ui(request, UiMode::Main).unwrap_or_else(server_error)
}))
}
pub fn main_ui_router(ctx: RpcContext) -> Router {
rpc_router(
ctx.clone(),
Server::new(move || ready(Ok(ctx.clone())), main_api::<RpcContext>())
.middleware(Cors::new())
.middleware(Auth::new())
.middleware(SyncDb::new()),
)
// TODO: cert
.fallback(any(|request: Request| async move {
serve_ui(request, UiMode::Main).unwrap_or_else(server_error)
}))
}
pub fn refresher() -> Router {
Router::new().fallback(get(|request: Request| async move {
let res = include_bytes!("./refresher.html");
FileData {
data: Body::from(&res[..]),
e_tag: None,
encoding: None,
len: Some(res.len() as u64),
mime: Some("text/html".into()),
}
.into_response(&request.into_parts().0)
.unwrap_or_else(server_error)
}))
}
async fn if_authorized<
F: FnOnce() -> Fut,
Fut: Future<Output = Result<Response, Error>> + Send + Sync,
@@ -223,89 +207,6 @@ async fn if_authorized<
}
}
async fn main_start_os_ui(req: Request, ctx: RpcContext) -> Result<Response, Error> {
let (request_parts, _body) = req.into_parts();
match (
&request_parts.method,
request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path())
.split_once('/'),
) {
(&Method::GET, Some(("public", path))) => {
todo!("pull directly from s9pk")
}
(&Method::GET, Some(("proxy", target))) => {
if_authorized(&ctx, &request_parts, || async {
let target = urlencoding::decode(target)?;
let res = ctx
.client
.get(target.as_ref())
.headers(
request_parts
.headers
.iter()
.filter(|(h, _)| {
!PROXY_STRIP_HEADERS
.iter()
.any(|bad| h.as_str().eq_ignore_ascii_case(bad))
})
.flat_map(|(h, v)| {
Some((
reqwest::header::HeaderName::from_lowercase(
h.as_str().as_bytes(),
)
.ok()?,
reqwest::header::HeaderValue::from_bytes(v.as_bytes()).ok()?,
))
})
.collect(),
)
.send()
.await
.with_kind(crate::ErrorKind::Network)?;
let mut hres = Response::builder().status(res.status().as_u16());
for (h, v) in res.headers().clone() {
if let Some(h) = h {
hres = hres.header(h.to_string(), v.as_bytes());
}
}
hres.body(Body::from_stream(res.bytes_stream()))
.with_kind(crate::ErrorKind::Network)
})
.await
}
(&Method::GET, Some(("eos", "local.crt"))) => {
let account = ctx.account.read().await;
cert_send(&account.root_ca_cert, &account.hostname)
}
(&Method::GET, _) => {
let uri_path = UiMode::Main.path(
request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path()),
);
let file = EMBEDDED_UIS
.get_file(&*uri_path)
.or_else(|| EMBEDDED_UIS.get_file(&*UiMode::Main.path("index.html")));
if let Some(file) = file {
FileData::from_embedded(&request_parts, file)
.into_response(&request_parts)
.await
} else {
Ok(not_found())
}
}
_ => Ok(method_not_allowed()),
}
}
pub fn unauthorized(err: Error, path: &str) -> Response {
tracing::warn!("unauthorized for {} @{:?}", err, path);
tracing::debug!("{:?}", err);
@@ -373,8 +274,8 @@ struct FileData {
data: Body,
len: Option<u64>,
encoding: Option<&'static str>,
e_tag: String,
mime: Option<String>,
e_tag: Option<String>,
mime: Option<InternedString>,
}
impl FileData {
fn from_embedded(req: &RequestParts, file: &'static include_dir::File<'static>) -> Self {
@@ -407,10 +308,23 @@ impl FileData {
len: Some(data.len() as u64),
encoding,
data: data.into(),
e_tag: e_tag(path, None),
e_tag: file.metadata().map(|metadata| {
e_tag(
path,
format!(
"{}",
metadata
.modified()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or_else(|e| e.duration().as_secs() as i64 * -1),
)
.as_bytes(),
)
}),
mime: MimeGuess::from_path(path)
.first()
.map(|m| m.essence_str().to_owned()),
.map(|m| m.essence_str().into()),
}
}
@@ -434,7 +348,18 @@ impl FileData {
.await
.with_ctx(|_| (ErrorKind::Filesystem, path.display().to_string()))?;
let e_tag = e_tag(path, Some(&metadata));
let e_tag = Some(e_tag(
path,
format!(
"{}",
metadata
.modified()?
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or_else(|e| e.duration().as_secs() as i64 * -1)
)
.as_bytes(),
));
let (len, data) = if encoding == Some("gzip") {
(
@@ -455,16 +380,18 @@ impl FileData {
e_tag,
mime: MimeGuess::from_path(path)
.first()
.map(|m| m.essence_str().to_owned()),
.map(|m| m.essence_str().into()),
})
}
async fn into_response(self, req: &RequestParts) -> Result<Response, Error> {
fn into_response(self, req: &RequestParts) -> Result<Response, Error> {
let mut builder = Response::builder();
if let Some(mime) = self.mime {
builder = builder.header(http::header::CONTENT_TYPE, &*mime);
}
builder = builder.header(http::header::ETAG, &*self.e_tag);
if let Some(e_tag) = &self.e_tag {
builder = builder.header(http::header::ETAG, &**e_tag);
}
builder = builder.header(
http::header::CACHE_CONTROL,
"public, max-age=21000000, immutable",
@@ -481,11 +408,12 @@ impl FileData {
builder = builder.header(http::header::CONNECTION, "keep-alive");
}
if req
.headers
.get("if-none-match")
.and_then(|h| h.to_str().ok())
== Some(self.e_tag.as_ref())
if self.e_tag.is_some()
&& req
.headers
.get("if-none-match")
.and_then(|h| h.to_str().ok())
== self.e_tag.as_deref()
{
builder = builder.status(StatusCode::NOT_MODIFIED);
builder.body(Body::empty())
@@ -503,21 +431,14 @@ impl FileData {
}
}
fn e_tag(path: &Path, metadata: Option<&Metadata>) -> String {
lazy_static::lazy_static! {
static ref INSTANCE_NONCE: u64 = rand::random();
}
fn e_tag(path: &Path, modified: impl AsRef<[u8]>) -> String {
let mut hasher = sha2::Sha256::new();
hasher.update(format!("{:?}", path).as_bytes());
if let Some(modified) = metadata.and_then(|m| m.modified().ok()) {
hasher.update(
format!(
"{}",
modified
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_secs()
)
.as_bytes(),
);
}
hasher.update(modified.as_ref());
let res = hasher.finalize();
format!(
"\"{}\"",

View File

@@ -1,23 +1,84 @@
use std::convert::Infallible;
use std::net::SocketAddr;
use std::task::Poll;
use std::time::Duration;
use axum::extract::Request;
use axum::Router;
use axum_server::Handle;
use bytes::Bytes;
use futures::future::ready;
use futures::FutureExt;
use helpers::NonDetachingJoinHandle;
use tokio::sync::oneshot;
use tokio::sync::{oneshot, watch};
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::net::static_server::{
diag_ui_file_router, install_ui_file_router, main_ui_server_router, setup_ui_file_router,
diagnostic_ui_router, init_ui_router, install_ui_router, main_ui_router, refresher,
setup_ui_router,
};
use crate::Error;
use crate::prelude::*;
#[derive(Clone)]
pub struct SwappableRouter(watch::Sender<Router>);
impl SwappableRouter {
pub fn new(router: Router) -> Self {
Self(watch::channel(router).0)
}
pub fn swap(&self, router: Router) {
let _ = self.0.send_replace(router);
}
}
#[derive(Clone)]
pub struct SwappableRouterService(watch::Receiver<Router>);
impl<B> tower_service::Service<Request<B>> for SwappableRouterService
where
B: axum::body::HttpBody<Data = Bytes> + Send + 'static,
B::Error: Into<axum::BoxError>,
{
type Response = <Router as tower_service::Service<Request<B>>>::Response;
type Error = <Router as tower_service::Service<Request<B>>>::Error;
type Future = <Router as tower_service::Service<Request<B>>>::Future;
#[inline]
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
let mut changed = self.0.changed().boxed();
if changed.poll_unpin(cx).is_ready() {
return Poll::Ready(Ok(()));
}
drop(changed);
tower_service::Service::<Request<B>>::poll_ready(&mut self.0.borrow().clone(), cx)
}
fn call(&mut self, req: Request<B>) -> Self::Future {
self.0.borrow().clone().call(req)
}
}
impl<T> tower_service::Service<T> for SwappableRouter {
type Response = SwappableRouterService;
type Error = Infallible;
type Future = futures::future::Ready<Result<Self::Response, Self::Error>>;
#[inline]
fn poll_ready(
&mut self,
_: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _: T) -> Self::Future {
ready(Ok(SwappableRouterService(self.0.subscribe())))
}
}
pub struct WebServer {
shutdown: oneshot::Sender<()>,
router: SwappableRouter,
thread: NonDetachingJoinHandle<()>,
}
impl WebServer {
pub fn new(bind: SocketAddr, router: Router) -> Self {
pub fn new(bind: SocketAddr) -> Self {
let router = SwappableRouter::new(refresher());
let thread_router = router.clone();
let (shutdown, shutdown_recv) = oneshot::channel();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let handle = Handle::new();
@@ -25,14 +86,18 @@ impl WebServer {
server.http_builder().http1().preserve_header_case(true);
server.http_builder().http1().title_case_headers(true);
if let (Err(e), _) = tokio::join!(server.serve(router.into_make_service()), async {
if let (Err(e), _) = tokio::join!(server.serve(thread_router), async {
let _ = shutdown_recv.await;
handle.graceful_shutdown(Some(Duration::from_secs(0)));
}) {
tracing::error!("Spawning hyper server error: {}", e);
}
}));
Self { shutdown, thread }
Self {
shutdown,
router,
thread,
}
}
pub async fn shutdown(self) {
@@ -40,19 +105,27 @@ impl WebServer {
self.thread.await.unwrap()
}
pub fn main(bind: SocketAddr, ctx: RpcContext) -> Result<Self, Error> {
Ok(Self::new(bind, main_ui_server_router(ctx)))
pub fn serve_router(&mut self, router: Router) {
self.router.swap(router)
}
pub fn setup(bind: SocketAddr, ctx: SetupContext) -> Result<Self, Error> {
Ok(Self::new(bind, setup_ui_file_router(ctx)))
pub fn serve_main(&mut self, ctx: RpcContext) {
self.serve_router(main_ui_router(ctx))
}
pub fn diagnostic(bind: SocketAddr, ctx: DiagnosticContext) -> Result<Self, Error> {
Ok(Self::new(bind, diag_ui_file_router(ctx)))
pub fn serve_setup(&mut self, ctx: SetupContext) {
self.serve_router(setup_ui_router(ctx))
}
pub fn install(bind: SocketAddr, ctx: InstallContext) -> Result<Self, Error> {
Ok(Self::new(bind, install_ui_file_router(ctx)))
pub fn serve_diagnostic(&mut self, ctx: DiagnosticContext) {
self.serve_router(diagnostic_ui_router(ctx))
}
pub fn serve_install(&mut self, ctx: InstallContext) {
self.serve_router(install_ui_router(ctx))
}
pub fn serve_init(&mut self, ctx: InitContext) {
self.serve_router(init_ui_router(ctx))
}
}

View File

@@ -1,14 +1,16 @@
use std::panic::UnwindSafe;
use std::sync::Arc;
use std::time::Duration;
use futures::Future;
use futures::future::pending;
use futures::stream::BoxStream;
use futures::{Future, FutureExt, StreamExt, TryFutureExt};
use helpers::NonDetachingJoinHandle;
use imbl_value::{InOMap, InternedString};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncSeek, AsyncWrite};
use tokio::sync::{mpsc, watch};
use tokio::sync::watch;
use ts_rs::TS;
use crate::db::model::{Database, DatabaseModel};
@@ -168,39 +170,23 @@ impl FullProgress {
}
}
#[derive(Clone)]
pub struct FullProgressTracker {
overall: Arc<watch::Sender<Progress>>,
overall_recv: watch::Receiver<Progress>,
phases: InOMap<InternedString, watch::Receiver<Progress>>,
new_phase: (
mpsc::UnboundedSender<(InternedString, watch::Receiver<Progress>)>,
mpsc::UnboundedReceiver<(InternedString, watch::Receiver<Progress>)>,
),
overall: watch::Sender<Progress>,
phases: watch::Sender<InOMap<InternedString, watch::Receiver<Progress>>>,
}
impl FullProgressTracker {
pub fn new() -> Self {
let (overall, overall_recv) = watch::channel(Progress::new());
Self {
overall: Arc::new(overall),
overall_recv,
phases: InOMap::new(),
new_phase: mpsc::unbounded_channel(),
}
let (overall, _) = watch::channel(Progress::new());
let (phases, _) = watch::channel(InOMap::new());
Self { overall, phases }
}
fn fill_phases(&mut self) -> bool {
let mut changed = false;
while let Ok((name, phase)) = self.new_phase.1.try_recv() {
self.phases.insert(name, phase);
changed = true;
}
changed
}
pub fn snapshot(&mut self) -> FullProgress {
self.fill_phases();
pub fn snapshot(&self) -> FullProgress {
FullProgress {
overall: *self.overall.borrow(),
phases: self
.phases
.borrow()
.iter()
.map(|(name, progress)| NamedProgress {
name: name.clone(),
@@ -209,28 +195,75 @@ impl FullProgressTracker {
.collect(),
}
}
pub async fn changed(&mut self) {
if self.fill_phases() {
return;
}
let phases = self
.phases
.iter_mut()
.map(|(_, p)| Box::pin(p.changed()))
.collect_vec();
tokio::select! {
_ = self.overall_recv.changed() => (),
_ = futures::future::select_all(phases) => (),
}
}
pub fn handle(&self) -> FullProgressTrackerHandle {
FullProgressTrackerHandle {
overall: self.overall.clone(),
new_phase: self.new_phase.0.clone(),
pub fn stream(&self, min_interval: Option<Duration>) -> BoxStream<'static, FullProgress> {
struct StreamState {
overall: watch::Receiver<Progress>,
phases_recv: watch::Receiver<InOMap<InternedString, watch::Receiver<Progress>>>,
phases: InOMap<InternedString, watch::Receiver<Progress>>,
}
let mut overall = self.overall.subscribe();
overall.mark_changed(); // make sure stream starts with a value
let phases_recv = self.phases.subscribe();
let phases = phases_recv.borrow().clone();
let state = StreamState {
overall,
phases_recv,
phases,
};
futures::stream::unfold(
state,
move |StreamState {
mut overall,
mut phases_recv,
mut phases,
}| async move {
let changed = phases
.iter_mut()
.map(|(_, p)| async move { p.changed().or_else(|_| pending()).await }.boxed())
.chain([overall.changed().boxed()])
.chain([phases_recv.changed().boxed()])
.map(|fut| fut.map(|r| r.unwrap_or_default()))
.collect_vec();
if let Some(min_interval) = min_interval {
tokio::join!(
tokio::time::sleep(min_interval),
futures::future::select_all(changed),
);
} else {
futures::future::select_all(changed).await;
}
for (name, phase) in &*phases_recv.borrow_and_update() {
if !phases.contains_key(name) {
phases.insert(name.clone(), phase.clone());
}
}
let o = *overall.borrow_and_update();
Some((
FullProgress {
overall: o,
phases: phases
.iter_mut()
.map(|(name, progress)| NamedProgress {
name: name.clone(),
progress: *progress.borrow_and_update(),
})
.collect(),
},
StreamState {
overall,
phases_recv,
phases,
},
))
},
)
.boxed()
}
pub fn sync_to_db<DerefFn>(
mut self,
&self,
db: TypedPatchDb<Database>,
deref: DerefFn,
min_interval: Option<Duration>,
@@ -239,9 +272,9 @@ impl FullProgressTracker {
DerefFn: Fn(&mut DatabaseModel) -> Option<&mut Model<FullProgress>> + 'static,
for<'a> &'a DerefFn: UnwindSafe + Send,
{
let mut stream = self.stream(min_interval);
async move {
loop {
let progress = self.snapshot();
while let Some(progress) = stream.next().await {
if db
.mutate(|v| {
if let Some(p) = deref(v) {
@@ -255,25 +288,23 @@ impl FullProgressTracker {
{
break;
}
tokio::join!(self.changed(), async {
if let Some(interval) = min_interval {
tokio::time::sleep(interval).await
} else {
futures::future::ready(()).await
}
});
}
Ok(())
}
}
}
#[derive(Clone)]
pub struct FullProgressTrackerHandle {
overall: Arc<watch::Sender<Progress>>,
new_phase: mpsc::UnboundedSender<(InternedString, watch::Receiver<Progress>)>,
}
impl FullProgressTrackerHandle {
pub fn progress_bar_task(&self, name: &str) -> NonDetachingJoinHandle<()> {
let mut stream = self.stream(None);
let mut bar = PhasedProgressBar::new(name);
tokio::spawn(async move {
while let Some(progress) = stream.next().await {
bar.update(&progress);
if progress.overall.is_complete() {
break;
}
}
})
.into()
}
pub fn add_phase(
&self,
name: InternedString,
@@ -284,7 +315,9 @@ impl FullProgressTrackerHandle {
.send_modify(|o| o.add_total(overall_contribution));
}
let (send, recv) = watch::channel(Progress::new());
let _ = self.new_phase.send((name, recv));
self.phases.send_modify(|p| {
p.insert(name, recv);
});
PhaseProgressTrackerHandle {
overall: self.overall.clone(),
overall_contribution,
@@ -298,7 +331,7 @@ impl FullProgressTrackerHandle {
}
pub struct PhaseProgressTrackerHandle {
overall: Arc<watch::Sender<Progress>>,
overall: watch::Sender<Progress>,
overall_contribution: Option<u64>,
contributed: u64,
progress: watch::Sender<Progress>,

View File

@@ -169,7 +169,8 @@ impl CallRemote<RegistryContext> for CliContext {
&AnySigningKey::Ed25519(self.developer_key()?.clone()),
&body,
&host,
)?.to_header(),
)?
.to_header(),
)
.body(body)
.send()

View File

@@ -70,7 +70,7 @@ pub fn registry_api<C: Context>() -> ParentHandler<C> {
.subcommand("db", db::db_api::<C>())
}
pub fn registry_server_router(ctx: RegistryContext) -> Router {
pub fn registry_router(ctx: RegistryContext) -> Router {
use axum::extract as x;
use axum::routing::{any, get, post};
Router::new()
@@ -128,7 +128,7 @@ pub fn registry_server_router(ctx: RegistryContext) -> Router {
}
impl WebServer {
pub fn registry(bind: SocketAddr, ctx: RegistryContext) -> Self {
Self::new(bind, registry_server_router(ctx))
pub fn serve_registry(&mut self, ctx: RegistryContext) {
self.serve_router(registry_router(ctx))
}
}

View File

@@ -186,29 +186,16 @@ pub async fn cli_add_asset(
let file = MultiCursorFile::from(tokio::fs::File::open(&path).await?);
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let mut sign_phase =
progress_handle.add_phase(InternedString::intern("Signing File"), Some(10));
let mut verify_phase =
progress_handle.add_phase(InternedString::intern("Verifying URL"), Some(100));
let mut index_phase = progress_handle.add_phase(
let progress = FullProgressTracker::new();
let mut sign_phase = progress.add_phase(InternedString::intern("Signing File"), Some(10));
let mut verify_phase = progress.add_phase(InternedString::intern("Verifying URL"), Some(100));
let mut index_phase = progress.add_phase(
InternedString::intern("Adding File to Registry Index"),
Some(1),
);
let progress_task: NonDetachingJoinHandle<()> = tokio::spawn(async move {
let mut bar = PhasedProgressBar::new(&format!("Adding {} to registry...", path.display()));
loop {
let snap = progress.snapshot();
bar.update(&snap);
if snap.overall.is_complete() {
break;
}
progress.changed().await
}
})
.into();
let progress_task =
progress.progress_bar_task(&format!("Adding {} to registry...", path.display()));
sign_phase.start();
let blake3 = file.blake3_mmap().await?;
@@ -252,7 +239,7 @@ pub async fn cli_add_asset(
.await?;
index_phase.complete();
progress_handle.complete();
progress.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;

View File

@@ -3,7 +3,7 @@ use std::panic::UnwindSafe;
use std::path::{Path, PathBuf};
use clap::Parser;
use helpers::{AtomicFile, NonDetachingJoinHandle};
use helpers::AtomicFile;
use imbl_value::{json, InternedString};
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
@@ -12,7 +12,7 @@ use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhasedProgressBar};
use crate::progress::FullProgressTracker;
use crate::registry::asset::RegistryAsset;
use crate::registry::context::RegistryContext;
use crate::registry::os::index::OsVersionInfo;
@@ -135,29 +135,17 @@ async fn cli_get_os_asset(
.await
.with_kind(ErrorKind::Filesystem)?;
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let progress = FullProgressTracker::new();
let mut download_phase =
progress_handle.add_phase(InternedString::intern("Downloading File"), Some(100));
progress.add_phase(InternedString::intern("Downloading File"), Some(100));
download_phase.set_total(res.commitment.size);
let reverify_phase = if reverify {
Some(progress_handle.add_phase(InternedString::intern("Reverifying File"), Some(10)))
Some(progress.add_phase(InternedString::intern("Reverifying File"), Some(10)))
} else {
None
};
let progress_task: NonDetachingJoinHandle<()> = tokio::spawn(async move {
let mut bar = PhasedProgressBar::new("Downloading...");
loop {
let snap = progress.snapshot();
bar.update(&snap);
if snap.overall.is_complete() {
break;
}
progress.changed().await
}
})
.into();
let progress_task = progress.progress_bar_task("Downloading...");
download_phase.start();
let mut download_writer = download_phase.writer(&mut *file);
@@ -177,7 +165,7 @@ async fn cli_get_os_asset(
reverify_phase.complete();
}
progress_handle.complete();
progress.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;
}

View File

@@ -3,7 +3,6 @@ use std::panic::UnwindSafe;
use std::path::PathBuf;
use clap::Parser;
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
@@ -12,7 +11,7 @@ use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhasedProgressBar};
use crate::progress::FullProgressTracker;
use crate::registry::asset::RegistryAsset;
use crate::registry::context::RegistryContext;
use crate::registry::os::index::OsVersionInfo;
@@ -169,27 +168,15 @@ pub async fn cli_sign_asset(
let file = MultiCursorFile::from(tokio::fs::File::open(&path).await?);
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let mut sign_phase =
progress_handle.add_phase(InternedString::intern("Signing File"), Some(10));
let mut index_phase = progress_handle.add_phase(
let progress = FullProgressTracker::new();
let mut sign_phase = progress.add_phase(InternedString::intern("Signing File"), Some(10));
let mut index_phase = progress.add_phase(
InternedString::intern("Adding Signature to Registry Index"),
Some(1),
);
let progress_task: NonDetachingJoinHandle<()> = tokio::spawn(async move {
let mut bar = PhasedProgressBar::new(&format!("Adding {} to registry...", path.display()));
loop {
let snap = progress.snapshot();
bar.update(&snap);
if snap.overall.is_complete() {
break;
}
progress.changed().await
}
})
.into();
let progress_task =
progress.progress_bar_task(&format!("Adding {} to registry...", path.display()));
sign_phase.start();
let blake3 = file.blake3_mmap().await?;
@@ -220,7 +207,7 @@ pub async fn cli_sign_asset(
.await?;
index_phase.complete();
progress_handle.complete();
progress.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;

View File

@@ -2,7 +2,6 @@ use std::path::PathBuf;
use std::sync::Arc;
use clap::Parser;
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use itertools::Itertools;
use rpc_toolkit::HandlerArgs;
@@ -12,7 +11,7 @@ use url::Url;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhasedProgressBar};
use crate::progress::FullProgressTracker;
use crate::registry::context::RegistryContext;
use crate::registry::package::index::PackageVersionInfo;
use crate::registry::signer::commitment::merkle_archive::MerkleArchiveCommitment;
@@ -110,28 +109,16 @@ pub async fn cli_add_package(
) -> Result<(), Error> {
let s9pk = S9pk::open(&file, None).await?;
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let mut sign_phase = progress_handle.add_phase(InternedString::intern("Signing File"), Some(1));
let mut verify_phase =
progress_handle.add_phase(InternedString::intern("Verifying URL"), Some(100));
let mut index_phase = progress_handle.add_phase(
let progress = FullProgressTracker::new();
let mut sign_phase = progress.add_phase(InternedString::intern("Signing File"), Some(1));
let mut verify_phase = progress.add_phase(InternedString::intern("Verifying URL"), Some(100));
let mut index_phase = progress.add_phase(
InternedString::intern("Adding File to Registry Index"),
Some(1),
);
let progress_task: NonDetachingJoinHandle<()> = tokio::spawn(async move {
let mut bar = PhasedProgressBar::new(&format!("Adding {} to registry...", file.display()));
loop {
let snap = progress.snapshot();
bar.update(&snap);
if snap.overall.is_complete() {
break;
}
progress.changed().await
}
})
.into();
let progress_task =
progress.progress_bar_task(&format!("Adding {} to registry...", file.display()));
sign_phase.start();
let commitment = s9pk.as_archive().commitment().await?;
@@ -160,7 +147,7 @@ pub async fn cli_add_package(
.await?;
index_phase.complete();
progress_handle.complete();
progress.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;

View File

@@ -1,5 +1,5 @@
use std::time::{SystemTime, UNIX_EPOCH};
use std::collections::BTreeMap;
use std::time::{SystemTime, UNIX_EPOCH};
use axum::body::Body;
use axum::extract::Request;

View File

@@ -1,5 +1,8 @@
use std::collections::BTreeMap;
use std::pin::Pin;
use std::str::FromStr;
use std::sync::Mutex as SyncMutex;
use std::task::{Context, Poll};
use std::time::Duration;
use axum::extract::ws::WebSocket;
@@ -7,9 +10,10 @@ use axum::extract::Request;
use axum::response::Response;
use clap::builder::ValueParserFactory;
use futures::future::BoxFuture;
use futures::{Future, FutureExt};
use helpers::TimedResource;
use imbl_value::InternedString;
use tokio::sync::Mutex;
use tokio::sync::{broadcast, Mutex as AsyncMutex};
use ts_rs::TS;
#[allow(unused_imports)]
@@ -73,21 +77,103 @@ impl std::fmt::Display for Guid {
}
}
pub type RestHandler =
Box<dyn FnOnce(Request) -> BoxFuture<'static, Result<Response, crate::Error>> + Send>;
pub struct RestFuture {
kill: Option<broadcast::Receiver<()>>,
fut: BoxFuture<'static, Result<Response, Error>>,
}
impl Future for RestFuture {
type Output = Result<Response, Error>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.kill.as_ref().map_or(false, |k| !k.is_empty()) {
Poll::Ready(Err(Error::new(
eyre!("session killed"),
ErrorKind::Authorization,
)))
} else {
self.fut.poll_unpin(cx)
}
}
}
pub type RestHandler = Box<dyn FnOnce(Request) -> RestFuture + Send>;
pub type WebSocketHandler = Box<dyn FnOnce(WebSocket) -> BoxFuture<'static, ()> + Send>;
pub struct WebSocketFuture {
kill: Option<broadcast::Receiver<()>>,
fut: BoxFuture<'static, ()>,
}
impl Future for WebSocketFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.kill.as_ref().map_or(false, |k| !k.is_empty()) {
Poll::Ready(())
} else {
self.fut.poll_unpin(cx)
}
}
}
pub type WebSocketHandler = Box<dyn FnOnce(WebSocket) -> WebSocketFuture + Send>;
pub enum RpcContinuation {
Rest(TimedResource<RestHandler>),
WebSocket(TimedResource<WebSocketHandler>),
}
impl RpcContinuation {
pub fn rest(handler: RestHandler, timeout: Duration) -> Self {
RpcContinuation::Rest(TimedResource::new(handler, timeout))
pub fn rest<F, Fut>(handler: F, timeout: Duration) -> Self
where
F: FnOnce(Request) -> Fut + Send + 'static,
Fut: Future<Output = Result<Response, Error>> + Send + 'static,
{
RpcContinuation::Rest(TimedResource::new(
Box::new(|req| RestFuture {
kill: None,
fut: handler(req).boxed(),
}),
timeout,
))
}
pub fn ws(handler: WebSocketHandler, timeout: Duration) -> Self {
RpcContinuation::WebSocket(TimedResource::new(handler, timeout))
pub fn ws<F, Fut>(handler: F, timeout: Duration) -> Self
where
F: FnOnce(WebSocket) -> Fut + Send + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill: None,
fut: handler(ws).boxed(),
}),
timeout,
))
}
pub fn rest_authed<Ctx, T, F, Fut>(ctx: Ctx, session: T, handler: F, timeout: Duration) -> Self
where
Ctx: AsRef<OpenAuthedContinuations<T>>,
T: Eq + Ord,
F: FnOnce(Request) -> Fut + Send + 'static,
Fut: Future<Output = Result<Response, Error>> + Send + 'static,
{
let kill = Some(ctx.as_ref().subscribe_to_kill(session));
RpcContinuation::Rest(TimedResource::new(
Box::new(|req| RestFuture {
kill,
fut: handler(req).boxed(),
}),
timeout,
))
}
pub fn ws_authed<Ctx, T, F, Fut>(ctx: Ctx, session: T, handler: F, timeout: Duration) -> Self
where
Ctx: AsRef<OpenAuthedContinuations<T>>,
T: Eq + Ord,
F: FnOnce(WebSocket) -> Fut + Send + 'static,
Fut: Future<Output = ()> + Send + 'static,
{
let kill = Some(ctx.as_ref().subscribe_to_kill(session));
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill,
fut: handler(ws).boxed(),
}),
timeout,
))
}
pub fn is_timed_out(&self) -> bool {
match self {
@@ -97,10 +183,10 @@ impl RpcContinuation {
}
}
pub struct RpcContinuations(Mutex<BTreeMap<Guid, RpcContinuation>>);
pub struct RpcContinuations(AsyncMutex<BTreeMap<Guid, RpcContinuation>>);
impl RpcContinuations {
pub fn new() -> Self {
RpcContinuations(Mutex::new(BTreeMap::new()))
RpcContinuations(AsyncMutex::new(BTreeMap::new()))
}
#[instrument(skip_all)]
@@ -146,3 +232,28 @@ impl RpcContinuations {
x.get().await
}
}
pub struct OpenAuthedContinuations<Key: Eq + Ord>(SyncMutex<BTreeMap<Key, broadcast::Sender<()>>>);
impl<T> OpenAuthedContinuations<T>
where
T: Eq + Ord,
{
pub fn new() -> Self {
Self(SyncMutex::new(BTreeMap::new()))
}
pub fn kill(&self, session: &T) {
if let Some(channel) = self.0.lock().unwrap().remove(session) {
channel.send(()).ok();
}
}
fn subscribe_to_kill(&self, session: T) -> broadcast::Receiver<()> {
let mut map = self.0.lock().unwrap();
if let Some(send) = map.get(&session) {
send.subscribe()
} else {
let (send, recv) = broadcast::channel(1);
map.insert(session, send);
recv
}
}
}

View File

@@ -97,6 +97,7 @@ impl ArchiveSource for MultiCursorFile {
.ok()
.map(|m| m.len())
}
#[allow(refining_impl_trait)]
async fn fetch_all(&self) -> Result<impl AsyncRead + Unpin + Send + 'static, Error> {
use tokio::io::AsyncSeekExt;

View File

@@ -354,7 +354,7 @@ impl Service {
.with_kind(ErrorKind::MigrationFailed)?; // TODO: handle cancellation
if let Some(mut progress) = progress {
progress.finalization_progress.complete();
progress.progress_handle.complete();
progress.progress.complete();
tokio::task::yield_now().await;
}
ctx.db

View File

@@ -18,10 +18,7 @@ use crate::disk::mount::guard::GenericMountGuard;
use crate::install::PKG_ARCHIVE_DIR;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::progress::{
FullProgressTracker, FullProgressTrackerHandle, PhaseProgressTrackerHandle,
ProgressTrackerWriter,
};
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle, ProgressTrackerWriter};
use crate::s9pk::manifest::PackageId;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::S9pk;
@@ -34,7 +31,7 @@ pub type InstallFuture = BoxFuture<'static, Result<(), Error>>;
pub struct InstallProgressHandles {
pub finalization_progress: PhaseProgressTrackerHandle,
pub progress_handle: FullProgressTrackerHandle,
pub progress: FullProgressTracker,
}
/// This is the structure to contain all the services
@@ -59,13 +56,22 @@ impl ServiceMap {
}
#[instrument(skip_all)]
pub async fn init(&self, ctx: &RpcContext) -> Result<(), Error> {
for id in ctx.db.peek().await.as_public().as_package_data().keys()? {
pub async fn init(
&self,
ctx: &RpcContext,
mut progress: PhaseProgressTrackerHandle,
) -> Result<(), Error> {
progress.start();
let ids = ctx.db.peek().await.as_public().as_package_data().keys()?;
progress.set_total(ids.len() as u64);
for id in ids {
if let Err(e) = self.load(ctx, &id, LoadDisposition::Retry).await {
tracing::error!("Error loading installed package as service: {e}");
tracing::debug!("{e:?}");
}
progress += 1;
}
progress.complete();
Ok(())
}
@@ -112,17 +118,16 @@ impl ServiceMap {
};
let size = s9pk.size();
let mut progress = FullProgressTracker::new();
let progress = FullProgressTracker::new();
let download_progress_contribution = size.unwrap_or(60);
let progress_handle = progress.handle();
let mut download_progress = progress_handle.add_phase(
let mut download_progress = progress.add_phase(
InternedString::intern("Download"),
Some(download_progress_contribution),
);
if let Some(size) = size {
download_progress.set_total(size);
}
let mut finalization_progress = progress_handle.add_phase(
let mut finalization_progress = progress.add_phase(
InternedString::intern(op_name),
Some(download_progress_contribution / 2),
);
@@ -194,7 +199,7 @@ impl ServiceMap {
let deref_id = id.clone();
let sync_progress_task =
NonDetachingJoinHandle::from(tokio::spawn(progress.sync_to_db(
NonDetachingJoinHandle::from(tokio::spawn(progress.clone().sync_to_db(
ctx.db.clone(),
move |v| {
v.as_public_mut()
@@ -248,7 +253,7 @@ impl ServiceMap {
service
.uninstall(Some(s9pk.as_manifest().version.clone()))
.await?;
progress_handle.complete();
progress.complete();
Some(version)
} else {
None
@@ -261,7 +266,7 @@ impl ServiceMap {
recovery_source,
Some(InstallProgressHandles {
finalization_progress,
progress_handle,
progress,
}),
)
.await?
@@ -275,7 +280,7 @@ impl ServiceMap {
prev,
Some(InstallProgressHandles {
finalization_progress,
progress_handle,
progress,
}),
)
.await?

View File

@@ -4,7 +4,6 @@ use std::time::Duration;
use color_eyre::eyre::eyre;
use josekit::jwk::Jwk;
use openssl::x509::X509;
use patch_db::json_ptr::ROOT;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
@@ -12,15 +11,15 @@ use serde::{Deserialize, Serialize};
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio::try_join;
use torut::onion::OnionAddressV3;
use tracing::instrument;
use ts_rs::TS;
use crate::account::AccountInfo;
use crate::backup::restore::recover_full_embassy;
use crate::backup::target::BackupTargetFS;
use crate::context::rpc::InitRpcContextPhases;
use crate::context::setup::SetupResult;
use crate::context::SetupContext;
use crate::context::{RpcContext, SetupContext};
use crate::db::model::Database;
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
@@ -29,10 +28,12 @@ use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::disk::util::{pvscan, recovery_info, DiskInfo, EmbassyOsRecoveryInfo};
use crate::disk::REPAIR_DISK_PATH;
use crate::hostname::Hostname;
use crate::init::{init, InitResult};
use crate::init::{init, InitPhases, InitResult};
use crate::net::net_controller::PreInitNetController;
use crate::net::ssl::root_ca_start_time;
use crate::prelude::*;
use crate::progress::{FullProgress, PhaseProgressTrackerHandle};
use crate::rpc_continuations::Guid;
use crate::util::crypto::EncryptedWire;
use crate::util::io::{dir_copy, dir_size, Counter};
use crate::{Error, ErrorKind, ResultExt};
@@ -75,10 +76,12 @@ pub async fn list_disks(ctx: SetupContext) -> Result<Vec<DiskInfo>, Error> {
async fn setup_init(
ctx: &SetupContext,
password: Option<String>,
) -> Result<(Hostname, OnionAddressV3, X509), Error> {
let InitResult { db } = init(&ctx.config).await?;
init_phases: InitPhases,
) -> Result<(AccountInfo, PreInitNetController), Error> {
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
let account = db
let account = net_ctrl
.db
.mutate(|m| {
let mut account = AccountInfo::load(m)?;
if let Some(password) = password {
@@ -93,15 +96,12 @@ async fn setup_init(
})
.await?;
Ok((
account.hostname,
account.tor_key.public().get_onion_address(),
account.root_ca_cert,
))
Ok((account, net_ctrl))
}
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AttachParams {
#[serde(rename = "startOsPassword")]
password: Option<EncryptedWire>,
@@ -110,25 +110,20 @@ pub struct AttachParams {
pub async fn attach(
ctx: SetupContext,
AttachParams { password, guid }: AttachParams,
) -> Result<(), Error> {
let mut status = ctx.setup_status.write().await;
if status.is_some() {
return Err(Error::new(
eyre!("Setup already in progress"),
ErrorKind::InvalidRequest,
));
}
*status = Some(Ok(SetupStatus {
bytes_transferred: 0,
total_bytes: None,
complete: false,
}));
drop(status);
tokio::task::spawn(async move {
if let Err(e) = async {
AttachParams {
password,
guid: disk_guid,
}: AttachParams,
) -> Result<SetupProgress, Error> {
let setup_ctx = ctx.clone();
ctx.run_setup(|| async move {
let progress = &setup_ctx.progress;
let mut disk_phase = progress.add_phase("Opening data drive".into(), Some(10));
let init_phases = InitPhases::new(&progress);
let rpc_ctx_phases = InitRpcContextPhases::new(&progress);
let password: Option<String> = match password {
Some(a) => match a.decrypt(&*ctx) {
Some(a) => match a.decrypt(&setup_ctx) {
a @ Some(_) => a,
None => {
return Err(Error::new(
@@ -139,15 +134,17 @@ pub async fn attach(
},
None => None,
};
disk_phase.start();
let requires_reboot = crate::disk::main::import(
&*guid,
&ctx.datadir,
&*disk_guid,
&setup_ctx.datadir,
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
} else {
RepairStrategy::Preen
},
if guid.ends_with("_UNENC") { None } else { Some(DEFAULT_PASSWORD) },
if disk_guid.ends_with("_UNENC") { None } else { Some(DEFAULT_PASSWORD) },
)
.await?;
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
@@ -156,7 +153,7 @@ pub async fn attach(
.with_ctx(|_| (ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
}
if requires_reboot.0 {
crate::disk::main::export(&*guid, &ctx.datadir).await?;
crate::disk::main::export(&*disk_guid, &setup_ctx.datadir).await?;
return Err(Error::new(
eyre!(
"Errors were corrected with your disk, but the server must be restarted in order to proceed"
@@ -164,37 +161,48 @@ pub async fn attach(
ErrorKind::DiskManagement,
));
}
let (hostname, tor_addr, root_ca) = setup_init(&ctx, password).await?;
*ctx.setup_result.write().await = Some((guid, SetupResult {
tor_address: format!("https://{}", tor_addr),
lan_address: hostname.lan_address(),
root_ca: String::from_utf8(root_ca.to_pem()?)?,
}));
*ctx.setup_status.write().await = Some(Ok(SetupStatus {
bytes_transferred: 0,
total_bytes: None,
complete: true,
}));
Ok(())
}.await {
tracing::error!("Error Setting Up Embassy: {}", e);
tracing::debug!("{:?}", e);
*ctx.setup_status.write().await = Some(Err(e.into()));
}
});
Ok(())
disk_phase.complete();
let (account, net_ctrl) = setup_init(&setup_ctx, password, init_phases).await?;
let rpc_ctx = RpcContext::init(&setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
Ok(((&account).try_into()?, rpc_ctx))
})?;
Ok(ctx.progress().await)
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct SetupStatus {
pub bytes_transferred: u64,
pub total_bytes: Option<u64>,
pub complete: bool,
#[ts(export)]
#[serde(tag = "status")]
pub enum SetupStatusRes {
Complete(SetupResult),
Running(SetupProgress),
}
pub async fn status(ctx: SetupContext) -> Result<Option<SetupStatus>, RpcError> {
ctx.setup_status.read().await.clone().transpose()
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SetupProgress {
pub progress: FullProgress,
pub guid: Guid,
}
pub async fn status(ctx: SetupContext) -> Result<Option<SetupStatusRes>, Error> {
if let Some(res) = ctx.result.get() {
match res {
Ok((res, _)) => Ok(Some(SetupStatusRes::Complete(res.clone()))),
Err(e) => Err(e.clone_output()),
}
} else {
if ctx.task.initialized() {
Ok(Some(SetupStatusRes::Running(ctx.progress().await)))
} else {
Ok(None)
}
}
}
/// We want to be able to get a secret, a shared private key with the frontend
@@ -202,7 +210,7 @@ pub async fn status(ctx: SetupContext) -> Result<Option<SetupStatus>, RpcError>
/// without knowing the password over clearnet. We use the public key shared across the network
/// since it is fine to share the public, and encrypt against the public.
pub async fn get_pubkey(ctx: SetupContext) -> Result<Jwk, RpcError> {
let secret = ctx.as_ref().clone();
let secret = AsRef::<Jwk>::as_ref(&ctx).clone();
let pub_key = secret.to_public_key()?;
Ok(pub_key)
}
@@ -213,6 +221,7 @@ pub fn cifs<C: Context>() -> ParentHandler<C> {
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct VerifyCifsParams {
hostname: String,
path: PathBuf,
@@ -230,7 +239,7 @@ pub async fn verify_cifs(
password,
}: VerifyCifsParams,
) -> Result<EmbassyOsRecoveryInfo, Error> {
let password: Option<String> = password.map(|x| x.decrypt(&*ctx)).flatten();
let password: Option<String> = password.map(|x| x.decrypt(&ctx)).flatten();
let guard = TmpMountGuard::mount(
&Cifs {
hostname,
@@ -256,7 +265,8 @@ pub enum RecoverySource {
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExecuteParams {
#[ts(export)]
pub struct SetupExecuteParams {
start_os_logicalname: PathBuf,
start_os_password: EncryptedWire,
recovery_source: Option<RecoverySource>,
@@ -266,104 +276,65 @@ pub struct ExecuteParams {
// #[command(rpc_only)]
pub async fn execute(
ctx: SetupContext,
ExecuteParams {
SetupExecuteParams {
start_os_logicalname,
start_os_password,
recovery_source,
recovery_password,
}: ExecuteParams,
) -> Result<(), Error> {
let start_os_password = match start_os_password.decrypt(&*ctx) {
}: SetupExecuteParams,
) -> Result<SetupProgress, Error> {
let start_os_password = match start_os_password.decrypt(&ctx) {
Some(a) => a,
None => {
return Err(Error::new(
color_eyre::eyre::eyre!("Couldn't decode embassy-password"),
color_eyre::eyre::eyre!("Couldn't decode startOsPassword"),
crate::ErrorKind::Unknown,
))
}
};
let recovery_password: Option<String> = match recovery_password {
Some(a) => match a.decrypt(&*ctx) {
Some(a) => match a.decrypt(&ctx) {
Some(a) => Some(a),
None => {
return Err(Error::new(
color_eyre::eyre::eyre!("Couldn't decode recovery-password"),
color_eyre::eyre::eyre!("Couldn't decode recoveryPassword"),
crate::ErrorKind::Unknown,
))
}
},
None => None,
};
let mut status = ctx.setup_status.write().await;
if status.is_some() {
return Err(Error::new(
eyre!("Setup already in progress"),
ErrorKind::InvalidRequest,
));
}
*status = Some(Ok(SetupStatus {
bytes_transferred: 0,
total_bytes: None,
complete: false,
}));
drop(status);
tokio::task::spawn({
async move {
let ctx = ctx.clone();
match execute_inner(
ctx.clone(),
start_os_logicalname,
start_os_password,
recovery_source,
recovery_password,
)
.await
{
Ok((guid, hostname, tor_addr, root_ca)) => {
tracing::info!("Setup Complete!");
*ctx.setup_result.write().await = Some((
guid,
SetupResult {
tor_address: format!("https://{}", tor_addr),
lan_address: hostname.lan_address(),
root_ca: String::from_utf8(
root_ca.to_pem().expect("failed to serialize root ca"),
)
.expect("invalid pem string"),
},
));
*ctx.setup_status.write().await = Some(Ok(SetupStatus {
bytes_transferred: 0,
total_bytes: None,
complete: true,
}));
}
Err(e) => {
tracing::error!("Error Setting Up Server: {}", e);
tracing::debug!("{:?}", e);
*ctx.setup_status.write().await = Some(Err(e.into()));
}
}
}
});
Ok(())
let setup_ctx = ctx.clone();
ctx.run_setup(|| {
execute_inner(
setup_ctx,
start_os_logicalname,
start_os_password,
recovery_source,
recovery_password,
)
})?;
Ok(ctx.progress().await)
}
#[instrument(skip_all)]
// #[command(rpc_only)]
pub async fn complete(ctx: SetupContext) -> Result<SetupResult, Error> {
let (guid, setup_result) = if let Some((guid, setup_result)) = &*ctx.setup_result.read().await {
(guid.clone(), setup_result.clone())
} else {
return Err(Error::new(
match ctx.result.get() {
Some(Ok((res, ctx))) => {
let mut guid_file = File::create("/media/startos/config/disk.guid").await?;
guid_file.write_all(ctx.disk_guid.as_bytes()).await?;
guid_file.sync_all().await?;
Ok(res.clone())
}
Some(Err(e)) => Err(e.clone_output()),
None => Err(Error::new(
eyre!("setup.execute has not completed successfully"),
crate::ErrorKind::InvalidRequest,
));
};
let mut guid_file = File::create("/media/startos/config/disk.guid").await?;
guid_file.write_all(guid.as_bytes()).await?;
guid_file.sync_all().await?;
Ok(setup_result)
)),
}
}
#[instrument(skip_all)]
@@ -380,7 +351,22 @@ pub async fn execute_inner(
start_os_password: String,
recovery_source: Option<RecoverySource>,
recovery_password: Option<String>,
) -> Result<(Arc<String>, Hostname, OnionAddressV3, X509), Error> {
) -> Result<(SetupResult, RpcContext), Error> {
let progress = &ctx.progress;
let mut disk_phase = progress.add_phase("Formatting data drive".into(), Some(10));
let restore_phase = match &recovery_source {
Some(RecoverySource::Backup { .. }) => {
Some(progress.add_phase("Restoring backup".into(), Some(100)))
}
Some(RecoverySource::Migrate { .. }) => {
Some(progress.add_phase("Transferring data".into(), Some(100)))
}
None => None,
};
let init_phases = InitPhases::new(&progress);
let rpc_ctx_phases = InitRpcContextPhases::new(&progress);
disk_phase.start();
let encryption_password = if ctx.disable_encryption {
None
} else {
@@ -402,41 +388,70 @@ pub async fn execute_inner(
encryption_password,
)
.await?;
disk_phase.complete();
if let Some(RecoverySource::Backup { target }) = recovery_source {
recover(ctx, guid, start_os_password, target, recovery_password).await
} else if let Some(RecoverySource::Migrate { guid: old_guid }) = recovery_source {
migrate(ctx, guid, &old_guid, start_os_password).await
} else {
let (hostname, tor_addr, root_ca) = fresh_setup(&ctx, &start_os_password).await?;
Ok((guid, hostname, tor_addr, root_ca))
let progress = SetupExecuteProgress {
init_phases,
restore_phase,
rpc_ctx_phases,
};
match recovery_source {
Some(RecoverySource::Backup { target }) => {
recover(
&ctx,
guid,
start_os_password,
target,
recovery_password,
progress,
)
.await
}
Some(RecoverySource::Migrate { guid: old_guid }) => {
migrate(&ctx, guid, &old_guid, start_os_password, progress).await
}
None => fresh_setup(&ctx, guid, &start_os_password, progress).await,
}
}
pub struct SetupExecuteProgress {
pub init_phases: InitPhases,
pub restore_phase: Option<PhaseProgressTrackerHandle>,
pub rpc_ctx_phases: InitRpcContextPhases,
}
async fn fresh_setup(
ctx: &SetupContext,
guid: Arc<String>,
start_os_password: &str,
) -> Result<(Hostname, OnionAddressV3, X509), Error> {
SetupExecuteProgress {
init_phases,
rpc_ctx_phases,
..
}: SetupExecuteProgress,
) -> Result<(SetupResult, RpcContext), Error> {
let account = AccountInfo::new(start_os_password, root_ca_start_time().await?)?;
let db = ctx.db().await?;
db.put(&ROOT, &Database::init(&account)?).await?;
drop(db);
init(&ctx.config).await?;
Ok((
account.hostname,
account.tor_key.public().get_onion_address(),
account.root_ca_cert,
))
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?;
Ok(((&account).try_into()?, rpc_ctx))
}
#[instrument(skip_all)]
async fn recover(
ctx: SetupContext,
ctx: &SetupContext,
guid: Arc<String>,
start_os_password: String,
recovery_source: BackupTargetFS,
recovery_password: Option<String>,
) -> Result<(Arc<String>, Hostname, OnionAddressV3, X509), Error> {
progress: SetupExecuteProgress,
) -> Result<(SetupResult, RpcContext), Error> {
let recovery_source = TmpMountGuard::mount(&recovery_source, ReadWrite).await?;
recover_full_embassy(
ctx,
@@ -444,23 +459,26 @@ async fn recover(
start_os_password,
recovery_source,
recovery_password,
progress,
)
.await
}
#[instrument(skip_all)]
async fn migrate(
ctx: SetupContext,
ctx: &SetupContext,
guid: Arc<String>,
old_guid: &str,
start_os_password: String,
) -> Result<(Arc<String>, Hostname, OnionAddressV3, X509), Error> {
*ctx.setup_status.write().await = Some(Ok(SetupStatus {
bytes_transferred: 0,
total_bytes: None,
complete: false,
}));
SetupExecuteProgress {
init_phases,
restore_phase,
rpc_ctx_phases,
}: SetupExecuteProgress,
) -> Result<(SetupResult, RpcContext), Error> {
let mut restore_phase = restore_phase.or_not_found("restore progress")?;
restore_phase.start();
let _ = crate::disk::main::import(
&old_guid,
"/media/startos/migrate",
@@ -500,20 +518,12 @@ async fn migrate(
res = async {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
*ctx.setup_status.write().await = Some(Ok(SetupStatus {
bytes_transferred: 0,
total_bytes: Some(main_transfer_size.load() + package_data_transfer_size.load()),
complete: false,
}));
restore_phase.set_total(main_transfer_size.load() + package_data_transfer_size.load());
}
} => res,
};
*ctx.setup_status.write().await = Some(Ok(SetupStatus {
bytes_transferred: 0,
total_bytes: Some(size),
complete: false,
}));
restore_phase.set_total(size);
let main_transfer_progress = Counter::new(0, ordering);
let package_data_transfer_progress = Counter::new(0, ordering);
@@ -529,18 +539,17 @@ async fn migrate(
res = async {
loop {
tokio::time::sleep(Duration::from_secs(1)).await;
*ctx.setup_status.write().await = Some(Ok(SetupStatus {
bytes_transferred: main_transfer_progress.load() + package_data_transfer_progress.load(),
total_bytes: Some(size),
complete: false,
}));
restore_phase.set_done(main_transfer_progress.load() + package_data_transfer_progress.load());
}
} => res,
}
let (hostname, tor_addr, root_ca) = setup_init(&ctx, Some(start_os_password)).await?;
crate::disk::main::export(&old_guid, "/media/startos/migrate").await?;
restore_phase.complete();
Ok((guid, hostname, tor_addr, root_ca))
let (account, net_ctrl) = setup_init(&ctx, Some(start_os_password), init_phases).await?;
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?;
Ok(((&account).try_into()?, rpc_ctx))
}

View File

@@ -20,9 +20,7 @@ use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::progress::{
FullProgressTracker, FullProgressTrackerHandle, PhaseProgressTrackerHandle, PhasedProgressBar,
};
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle, PhasedProgressBar};
use crate::registry::asset::RegistryAsset;
use crate::registry::context::{RegistryContext, RegistryUrlParams};
use crate::registry::os::index::OsVersionInfo;
@@ -34,6 +32,7 @@ use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::sound::{
CIRCLE_OF_5THS_SHORT, UPDATE_FAILED_1, UPDATE_FAILED_2, UPDATE_FAILED_3, UPDATE_FAILED_4,
};
use crate::util::net::WebSocketExt;
use crate::util::Invoke;
use crate::PLATFORM;
@@ -91,50 +90,47 @@ pub async fn update_system(
.add(
guid.clone(),
RpcContinuation::ws(
Box::new(|mut ws| {
async move {
if let Err(e) = async {
let mut sub = ctx
|mut ws| async move {
if let Err(e) = async {
let mut sub = ctx
.db
.subscribe(
"/public/serverInfo/statusInfo/updateProgress"
.parse::<JsonPointer>()
.with_kind(ErrorKind::Database)?,
)
.await;
while {
let progress = ctx
.db
.subscribe(
"/public/serverInfo/statusInfo/updateProgress"
.parse::<JsonPointer>()
.with_kind(ErrorKind::Database)?,
)
.await;
while {
let progress = ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_status_info()
.into_update_progress()
.de()?;
ws.send(axum::extract::ws::Message::Text(
serde_json::to_string(&progress)
.with_kind(ErrorKind::Serialization)?,
))
.peek()
.await
.with_kind(ErrorKind::Network)?;
progress.is_some()
} {
sub.recv().await;
}
ws.close().await.with_kind(ErrorKind::Network)?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error returning progress of update: {e}");
tracing::debug!("{e:?}")
.into_public()
.into_server_info()
.into_status_info()
.into_update_progress()
.de()?;
ws.send(axum::extract::ws::Message::Text(
serde_json::to_string(&progress)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
progress.is_some()
} {
sub.recv().await;
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.boxed()
}),
.await
{
tracing::error!("Error returning progress of update: {e}");
tracing::debug!("{e:?}")
}
},
Duration::from_secs(30),
),
)
@@ -250,13 +246,12 @@ async fn maybe_do_update(
asset.validate(SIG_CONTEXT, asset.all_signers())?;
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let mut download_phase = progress_handle.add_phase("Downloading File".into(), Some(100));
let progress = FullProgressTracker::new();
let mut download_phase = progress.add_phase("Downloading File".into(), Some(100));
download_phase.set_total(asset.commitment.size);
let reverify_phase = progress_handle.add_phase("Reverifying File".into(), Some(10));
let sync_boot_phase = progress_handle.add_phase("Syncing Boot Files".into(), Some(1));
let finalize_phase = progress_handle.add_phase("Finalizing Update".into(), Some(1));
let reverify_phase = progress.add_phase("Reverifying File".into(), Some(10));
let sync_boot_phase = progress.add_phase("Syncing Boot Files".into(), Some(1));
let finalize_phase = progress.add_phase("Finalizing Update".into(), Some(1));
let start_progress = progress.snapshot();
@@ -287,7 +282,7 @@ async fn maybe_do_update(
));
}
let progress_task = NonDetachingJoinHandle::from(tokio::spawn(progress.sync_to_db(
let progress_task = NonDetachingJoinHandle::from(tokio::spawn(progress.clone().sync_to_db(
ctx.db.clone(),
|db| {
db.as_public_mut()
@@ -304,7 +299,7 @@ async fn maybe_do_update(
ctx.clone(),
asset,
UpdateProgressHandles {
progress_handle,
progress,
download_phase,
reverify_phase,
sync_boot_phase,
@@ -373,7 +368,7 @@ async fn maybe_do_update(
}
struct UpdateProgressHandles {
progress_handle: FullProgressTrackerHandle,
progress: FullProgressTracker,
download_phase: PhaseProgressTrackerHandle,
reverify_phase: PhaseProgressTrackerHandle,
sync_boot_phase: PhaseProgressTrackerHandle,
@@ -385,7 +380,7 @@ async fn do_update(
ctx: RpcContext,
asset: RegistryAsset<Blake3Commitment>,
UpdateProgressHandles {
progress_handle,
progress,
mut download_phase,
mut reverify_phase,
mut sync_boot_phase,
@@ -436,7 +431,7 @@ async fn do_update(
.await?;
finalize_phase.complete();
progress_handle.complete();
progress.complete();
Ok(())
}

View File

@@ -5,9 +5,10 @@ use std::time::Duration;
use axum::body::Body;
use axum::response::Response;
use futures::{FutureExt, StreamExt};
use futures::StreamExt;
use http::header::CONTENT_LENGTH;
use http::StatusCode;
use imbl_value::InternedString;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio::sync::watch;
@@ -19,68 +20,70 @@ use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::util::io::TmpDir;
pub async fn upload(ctx: &RpcContext) -> Result<(Guid, UploadingFile), Error> {
pub async fn upload(
ctx: &RpcContext,
session: InternedString,
) -> Result<(Guid, UploadingFile), Error> {
let guid = Guid::new();
let (mut handle, file) = UploadingFile::new().await?;
ctx.rpc_continuations
.add(
guid.clone(),
RpcContinuation::rest(
Box::new(|request| {
async move {
let headers = request.headers();
let content_length = match headers.get(CONTENT_LENGTH).map(|a| a.to_str()) {
None => {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("Content-Length is required"))
.with_kind(ErrorKind::Network)
}
Some(Err(_)) => {
RpcContinuation::rest_authed(
ctx,
session,
|request| async move {
let headers = request.headers();
let content_length = match headers.get(CONTENT_LENGTH).map(|a| a.to_str()) {
None => {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("Content-Length is required"))
.with_kind(ErrorKind::Network)
}
Some(Err(_)) => {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("Invalid Content-Length"))
.with_kind(ErrorKind::Network)
}
Some(Ok(a)) => match a.parse::<u64>() {
Err(_) => {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("Invalid Content-Length"))
.with_kind(ErrorKind::Network)
}
Some(Ok(a)) => match a.parse::<u64>() {
Err(_) => {
return Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::from("Invalid Content-Length"))
.with_kind(ErrorKind::Network)
}
Ok(a) => a,
},
};
Ok(a) => a,
},
};
handle
.progress
.send_modify(|p| p.expected_size = Some(content_length));
handle
.progress
.send_modify(|p| p.expected_size = Some(content_length));
let mut body = request.into_body().into_data_stream();
while let Some(next) = body.next().await {
if let Err(e) = async {
handle
.write_all(&next.map_err(|e| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})?)
.await?;
Ok(())
}
.await
{
handle.progress.send_if_modified(|p| p.handle_error(&e));
break;
}
let mut body = request.into_body().into_data_stream();
while let Some(next) = body.next().await {
if let Err(e) = async {
handle
.write_all(&next.map_err(|e| {
std::io::Error::new(std::io::ErrorKind::Other, e)
})?)
.await?;
Ok(())
}
.await
{
handle.progress.send_if_modified(|p| p.handle_error(&e));
break;
}
Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())
.with_kind(ErrorKind::Network)
}
.boxed()
}),
Response::builder()
.status(StatusCode::NO_CONTENT)
.body(Body::empty())
.with_kind(ErrorKind::Network)
},
Duration::from_secs(30),
),
)

View File

@@ -274,6 +274,81 @@ pub fn response_to_reader(response: reqwest::Response) -> impl AsyncRead + Unpin
}))
}
#[pin_project::pin_project]
pub struct IOHook<'a, T> {
#[pin]
pub io: T,
pre_write: Option<Box<dyn FnMut(&[u8]) -> Result<(), std::io::Error> + Send + 'a>>,
post_write: Option<Box<dyn FnMut(&[u8]) + Send + 'a>>,
post_read: Option<Box<dyn FnMut(&[u8]) + Send + 'a>>,
}
impl<'a, T> IOHook<'a, T> {
pub fn new(io: T) -> Self {
Self {
io,
pre_write: None,
post_write: None,
post_read: None,
}
}
pub fn into_inner(self) -> T {
self.io
}
pub fn pre_write<F: FnMut(&[u8]) -> Result<(), std::io::Error> + Send + 'a>(&mut self, f: F) {
self.pre_write = Some(Box::new(f))
}
pub fn post_write<F: FnMut(&[u8]) + Send + 'a>(&mut self, f: F) {
self.post_write = Some(Box::new(f))
}
pub fn post_read<F: FnMut(&[u8]) + Send + 'a>(&mut self, f: F) {
self.post_read = Some(Box::new(f))
}
}
impl<'a, T: AsyncWrite> AsyncWrite for IOHook<'a, T> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
if let Some(pre_write) = this.pre_write {
pre_write(buf)?;
}
let written = futures::ready!(this.io.poll_write(cx, buf)?);
if let Some(post_write) = this.post_write {
post_write(&buf[..written]);
}
Poll::Ready(Ok(written))
}
fn poll_flush(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().io.poll_flush(cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().io.poll_shutdown(cx)
}
}
impl<'a, T: AsyncRead> AsyncRead for IOHook<'a, T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let this = self.project();
let start = buf.filled().len();
futures::ready!(this.io.poll_read(cx, buf)?);
if let Some(post_read) = this.post_read {
post_read(&buf.filled()[start..]);
}
Poll::Ready(Ok(()))
}
}
#[pin_project::pin_project]
pub struct BufferedWriteReader {
#[pin]
@@ -768,7 +843,7 @@ fn poll_flush_prefix<W: AsyncWrite>(
flush_writer: bool,
) -> Poll<Result<(), std::io::Error>> {
while let Some(mut cur) = prefix.pop_front() {
let buf = cur.remaining_slice();
let buf = CursorExt::remaining_slice(&cur);
if !buf.is_empty() {
match writer.as_mut().poll_write(cx, buf)? {
Poll::Ready(n) if n == buf.len() => (),

View File

@@ -36,6 +36,7 @@ pub mod http_reader;
pub mod io;
pub mod logger;
pub mod lshw;
pub mod net;
pub mod rpc;
pub mod rpc_client;
pub mod serde;

View File

@@ -0,0 +1,24 @@
use std::borrow::Cow;
use axum::extract::ws::{self, CloseFrame};
use futures::Future;
use crate::prelude::*;
pub trait WebSocketExt {
fn normal_close(
self,
msg: impl Into<Cow<'static, str>>,
) -> impl Future<Output = Result<(), Error>>;
}
impl WebSocketExt for ws::WebSocket {
async fn normal_close(mut self, msg: impl Into<Cow<'static, str>>) -> Result<(), Error> {
self.send(ws::Message::Close(Some(CloseFrame {
code: 1000,
reason: msg.into(),
})))
.await
.with_kind(ErrorKind::Network)
}
}

View File

@@ -22,8 +22,8 @@ use ts_rs::TS;
use super::IntoDoubleEndedIterator;
use crate::prelude::*;
use crate::util::Apply;
use crate::util::clap::FromStrParser;
use crate::util::Apply;
pub fn deserialize_from_str<
'de,
@@ -1040,15 +1040,19 @@ impl<T: AsRef<[u8]>> std::fmt::Display for Base64<T> {
f.write_str(&base64::encode(self.0.as_ref()))
}
}
impl<T: TryFrom<Vec<u8>>> FromStr for Base64<T>
{
impl<T: TryFrom<Vec<u8>>> FromStr for Base64<T> {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
base64::decode(&s)
.with_kind(ErrorKind::Deserialization)?
.apply(TryFrom::try_from)
.map(Self)
.map_err(|_| Error::new(eyre!("failed to create from buffer"), ErrorKind::Deserialization))
.map_err(|_| {
Error::new(
eyre!("failed to create from buffer"),
ErrorKind::Deserialization,
)
})
}
}
impl<'de, T: TryFrom<Vec<u8>>> Deserialize<'de> for Base64<T> {

View File

@@ -7,6 +7,7 @@ use imbl_value::InternedString;
use crate::db::model::Database;
use crate::prelude::*;
use crate::progress::PhaseProgressTrackerHandle;
use crate::Error;
mod v0_3_5;
@@ -85,11 +86,12 @@ where
&self,
version: &V,
db: &TypedPatchDb<Database>,
progress: &mut PhaseProgressTrackerHandle,
) -> impl Future<Output = Result<(), Error>> + Send {
async {
match self.semver().cmp(&version.semver()) {
Ordering::Greater => self.rollback_to_unchecked(version, db).await,
Ordering::Less => version.migrate_from_unchecked(self, db).await,
Ordering::Greater => self.rollback_to_unchecked(version, db, progress).await,
Ordering::Less => version.migrate_from_unchecked(self, db, progress).await,
Ordering::Equal => Ok(()),
}
}
@@ -98,11 +100,15 @@ where
&'a self,
version: &'a V,
db: &'a TypedPatchDb<Database>,
progress: &'a mut PhaseProgressTrackerHandle,
) -> BoxFuture<'a, Result<(), Error>> {
progress.add_total(1);
async {
let previous = Self::Previous::new();
if version.semver() < previous.semver() {
previous.migrate_from_unchecked(version, db).await?;
previous
.migrate_from_unchecked(version, db, progress)
.await?;
} else if version.semver() > previous.semver() {
return Err(Error::new(
eyre!(
@@ -115,6 +121,7 @@ where
tracing::info!("{} -> {}", previous.semver(), self.semver(),);
self.up(db).await?;
self.commit(db).await?;
*progress += 1;
Ok(())
}
.boxed()
@@ -123,14 +130,18 @@ where
&'a self,
version: &'a V,
db: &'a TypedPatchDb<Database>,
progress: &'a mut PhaseProgressTrackerHandle,
) -> BoxFuture<'a, Result<(), Error>> {
async {
let previous = Self::Previous::new();
tracing::info!("{} -> {}", self.semver(), previous.semver(),);
self.down(db).await?;
previous.commit(db).await?;
*progress += 1;
if version.semver() < previous.semver() {
previous.rollback_to_unchecked(version, db).await?;
previous
.rollback_to_unchecked(version, db, progress)
.await?;
} else if version.semver() > previous.semver() {
return Err(Error::new(
eyre!(
@@ -196,7 +207,11 @@ where
}
}
pub async fn init(db: &TypedPatchDb<Database>) -> Result<(), Error> {
pub async fn init(
db: &TypedPatchDb<Database>,
mut progress: PhaseProgressTrackerHandle,
) -> Result<(), Error> {
progress.start();
let version = Version::from_util_version(
db.peek()
.await
@@ -213,10 +228,10 @@ pub async fn init(db: &TypedPatchDb<Database>) -> Result<(), Error> {
ErrorKind::MigrationFailed,
));
}
Version::V0_3_5(v) => v.0.migrate_to(&Current::new(), &db).await?,
Version::V0_3_5_1(v) => v.0.migrate_to(&Current::new(), &db).await?,
Version::V0_3_5_2(v) => v.0.migrate_to(&Current::new(), &db).await?,
Version::V0_3_6(v) => v.0.migrate_to(&Current::new(), &db).await?,
Version::V0_3_5(v) => v.0.migrate_to(&Current::new(), &db, &mut progress).await?,
Version::V0_3_5_1(v) => v.0.migrate_to(&Current::new(), &db, &mut progress).await?,
Version::V0_3_5_2(v) => v.0.migrate_to(&Current::new(), &db, &mut progress).await?,
Version::V0_3_6(v) => v.0.migrate_to(&Current::new(), &db, &mut progress).await?,
Version::Other(_) => {
return Err(Error::new(
eyre!("Cannot downgrade"),
@@ -224,6 +239,7 @@ pub async fn init(db: &TypedPatchDb<Database>) -> Result<(), Error> {
))
}
}
progress.complete();
Ok(())
}

View File

@@ -20,7 +20,11 @@ pub fn data_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, volume_id: &Volu
.join(volume_id)
}
pub fn asset_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, version: &VersionString) -> PathBuf {
pub fn asset_dir<P: AsRef<Path>>(
datadir: P,
pkg_id: &PackageId,
version: &VersionString,
) -> PathBuf {
datadir
.as_ref()
.join(PKG_VOLUME_DIR)

View File

@@ -0,0 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { EncryptedWire } from "./EncryptedWire"
export type AttachParams = {
startOsPassword: EncryptedWire | null
guid: string
}

View File

@@ -0,0 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { BlockDev } from "./BlockDev"
import type { Cifs } from "./Cifs"
export type BackupTargetFS =
| ({ type: "disk" } & BlockDev)
| ({ type: "cifs" } & Cifs)

View File

@@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type BlockDev = { logicalname: string }

View File

@@ -0,0 +1,8 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type Cifs = {
hostname: string
path: string
username: string
password: string | null
}

View File

@@ -0,0 +1,5 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { FullProgress } from "./FullProgress"
import type { Guid } from "./Guid"
export type InitProgressRes = { progress: FullProgress; guid: Guid }

View File

@@ -0,0 +1,6 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { BackupTargetFS } from "./BackupTargetFS"
export type RecoverySource =
| { type: "migrate"; guid: string }
| { type: "backup"; target: BackupTargetFS }

View File

@@ -0,0 +1,10 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { EncryptedWire } from "./EncryptedWire"
import type { RecoverySource } from "./RecoverySource"
export type SetupExecuteParams = {
startOsLogicalname: string
startOsPassword: EncryptedWire
recoverySource: RecoverySource | null
recoveryPassword: EncryptedWire | null
}

View File

@@ -0,0 +1,5 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { FullProgress } from "./FullProgress"
import type { Guid } from "./Guid"
export type SetupProgress = { progress: FullProgress; guid: Guid }

View File

@@ -0,0 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type SetupResult = {
torAddress: string
lanAddress: string
rootCa: string
}

View File

@@ -0,0 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { SetupProgress } from "./SetupProgress"
import type { SetupResult } from "./SetupResult"
export type SetupStatusRes =
| ({ status: "complete" } & SetupResult)
| ({ status: "running" } & SetupProgress)

View File

@@ -0,0 +1,9 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { EncryptedWire } from "./EncryptedWire"
export type VerifyCifsParams = {
hostname: string
path: string
username: string
password: EncryptedWire | null
}

View File

@@ -15,17 +15,21 @@ export { AlpnInfo } from "./AlpnInfo"
export { AnySignature } from "./AnySignature"
export { AnySigningKey } from "./AnySigningKey"
export { AnyVerifyingKey } from "./AnyVerifyingKey"
export { AttachParams } from "./AttachParams"
export { BackupProgress } from "./BackupProgress"
export { BackupTargetFS } from "./BackupTargetFS"
export { Base64 } from "./Base64"
export { BindInfo } from "./BindInfo"
export { BindOptions } from "./BindOptions"
export { BindParams } from "./BindParams"
export { Blake3Commitment } from "./Blake3Commitment"
export { BlockDev } from "./BlockDev"
export { Callback } from "./Callback"
export { Category } from "./Category"
export { CheckDependenciesParam } from "./CheckDependenciesParam"
export { CheckDependenciesResult } from "./CheckDependenciesResult"
export { ChrootParams } from "./ChrootParams"
export { Cifs } from "./Cifs"
export { ContactInfo } from "./ContactInfo"
export { CreateOverlayedImageParams } from "./CreateOverlayedImageParams"
export { CurrentDependencies } from "./CurrentDependencies"
@@ -73,6 +77,7 @@ export { ImageConfig } from "./ImageConfig"
export { ImageId } from "./ImageId"
export { ImageMetadata } from "./ImageMetadata"
export { ImageSource } from "./ImageSource"
export { InitProgressRes } from "./InitProgressRes"
export { InstalledState } from "./InstalledState"
export { InstallingInfo } from "./InstallingInfo"
export { InstallingState } from "./InstallingState"
@@ -105,6 +110,7 @@ export { ParamsPackageId } from "./ParamsPackageId"
export { PasswordType } from "./PasswordType"
export { Progress } from "./Progress"
export { Public } from "./Public"
export { RecoverySource } from "./RecoverySource"
export { RegistryAsset } from "./RegistryAsset"
export { RemoveActionParams } from "./RemoveActionParams"
export { RemoveAddressParams } from "./RemoveAddressParams"
@@ -127,10 +133,15 @@ export { SetMainStatusStatus } from "./SetMainStatusStatus"
export { SetMainStatus } from "./SetMainStatus"
export { SetStoreParams } from "./SetStoreParams"
export { SetSystemSmtpParams } from "./SetSystemSmtpParams"
export { SetupExecuteParams } from "./SetupExecuteParams"
export { SetupProgress } from "./SetupProgress"
export { SetupResult } from "./SetupResult"
export { SetupStatusRes } from "./SetupStatusRes"
export { SignAssetParams } from "./SignAssetParams"
export { SignerInfo } from "./SignerInfo"
export { Status } from "./Status"
export { UpdatingState } from "./UpdatingState"
export { VerifyCifsParams } from "./VerifyCifsParams"
export { VersionSignerParams } from "./VersionSignerParams"
export { Version } from "./Version"
export { VolumeId } from "./VolumeId"

19
web/package-lock.json generated
View File

@@ -31,6 +31,7 @@
"@taiga-ui/core": "3.20.0",
"@taiga-ui/icons": "3.20.0",
"@taiga-ui/kit": "3.20.0",
"@tinkoff/ng-dompurify": "4.0.0",
"angular-svg-round-progressbar": "^9.0.0",
"ansi-to-html": "^0.7.2",
"base64-js": "^1.5.1",
@@ -1973,7 +1974,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-rev0.lib0.rc8.beta10",
"version": "0.3.6-alpha1",
"license": "MIT",
"dependencies": {
"isomorphic-fetch": "^3.0.0",
@@ -5432,6 +5433,20 @@
"rxjs": ">=6.0.0"
}
},
"node_modules/@tinkoff/ng-dompurify": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/@tinkoff/ng-dompurify/-/ng-dompurify-4.0.0.tgz",
"integrity": "sha512-BjKUweWLrOx8UOZw+Tl+Dae5keYuSbeMkppcXQdsvwASMrPfmP7d3Q206Q6HDqOV2WnpnFqGUB95IMbLAeRRuw==",
"dependencies": {
"tslib": "^2.0.0"
},
"peerDependencies": {
"@angular/core": ">=12.0.0",
"@angular/platform-browser": ">=12.0.0",
"@types/dompurify": ">=2.3.0",
"dompurify": ">= 2.3.0"
}
},
"node_modules/@tinkoff/ng-event-plugins": {
"version": "3.1.0",
"license": "Apache-2.0",
@@ -5549,7 +5564,6 @@
},
"node_modules/@types/dompurify": {
"version": "2.3.4",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/trusted-types": "*"
@@ -5726,7 +5740,6 @@
},
"node_modules/@types/trusted-types": {
"version": "2.0.2",
"dev": true,
"license": "MIT"
},
"node_modules/@types/uuid": {

View File

@@ -13,7 +13,6 @@
"check:setup": "tsc --project projects/setup-wizard/tsconfig.json --noEmit --skipLibCheck",
"check:ui": "tsc --project projects/ui/tsconfig.json --noEmit --skipLibCheck",
"build:deps": "rm -rf .angular/cache && (cd ../patch-db/client && npm ci && npm run build) && (cd ../sdk && make bundle)",
"build:dui": "ng run diagnostic-ui:build",
"build:install-wiz": "ng run install-wizard:build",
"build:setup": "ng run setup-wizard:build",
"build:ui": "ng run ui:build",
@@ -25,7 +24,6 @@
"analyze:ui": "webpack-bundle-analyzer dist/raw/ui/stats.json",
"publish:shared": "npm run build:shared && npm publish ./dist/shared --access public",
"publish:marketplace": "npm run build:marketplace && npm publish ./dist/marketplace --access public",
"start:dui": "npm run-script build-config && ionic serve --project diagnostic-ui --host 0.0.0.0",
"start:install-wiz": "npm run-script build-config && ionic serve --project install-wizard --host 0.0.0.0",
"start:setup": "npm run-script build-config && ionic serve --project setup-wizard --host 0.0.0.0",
"start:ui": "npm run-script build-config && ionic serve --project ui --ip --host 0.0.0.0",
@@ -56,6 +54,7 @@
"@taiga-ui/core": "3.20.0",
"@taiga-ui/icons": "3.20.0",
"@taiga-ui/kit": "3.20.0",
"@tinkoff/ng-dompurify": "4.0.0",
"angular-svg-round-progressbar": "^9.0.0",
"ansi-to-html": "^0.7.2",
"base64-js": "^1.5.1",

View File

@@ -1,27 +0,0 @@
import { NgModule } from '@angular/core'
import { PreloadAllModules, RouterModule, Routes } from '@angular/router'
const routes: Routes = [
{
path: '',
loadChildren: () =>
import('./pages/home/home.module').then(m => m.HomePageModule),
},
{
path: 'logs',
loadChildren: () =>
import('./pages/logs/logs.module').then(m => m.LogsPageModule),
},
]
@NgModule({
imports: [
RouterModule.forRoot(routes, {
scrollPositionRestoration: 'enabled',
preloadingStrategy: PreloadAllModules,
useHash: true,
}),
],
exports: [RouterModule],
})
export class AppRoutingModule {}

View File

@@ -1,5 +0,0 @@
<tui-root>
<ion-app>
<ion-router-outlet></ion-router-outlet>
</ion-app>
</tui-root>

View File

@@ -1,8 +0,0 @@
:host {
display: block;
height: 100%;
}
tui-root {
height: 100%;
}

View File

@@ -1,10 +0,0 @@
import { Component } from '@angular/core'
@Component({
selector: 'app-root',
templateUrl: 'app.component.html',
styleUrls: ['app.component.scss'],
})
export class AppComponent {
constructor() {}
}

View File

@@ -1,43 +0,0 @@
import { NgModule } from '@angular/core'
import { BrowserAnimationsModule } from '@angular/platform-browser/animations'
import { RouteReuseStrategy } from '@angular/router'
import { IonicModule, IonicRouteStrategy } from '@ionic/angular'
import { TuiRootModule } from '@taiga-ui/core'
import { AppComponent } from './app.component'
import { AppRoutingModule } from './app-routing.module'
import { HttpClientModule } from '@angular/common/http'
import { ApiService } from './services/api/api.service'
import { MockApiService } from './services/api/mock-api.service'
import { LiveApiService } from './services/api/live-api.service'
import { RELATIVE_URL, WorkspaceConfig } from '@start9labs/shared'
const {
useMocks,
ui: { api },
} = require('../../../../config.json') as WorkspaceConfig
@NgModule({
declarations: [AppComponent],
imports: [
HttpClientModule,
BrowserAnimationsModule,
IonicModule.forRoot({
mode: 'md',
}),
AppRoutingModule,
TuiRootModule,
],
providers: [
{ provide: RouteReuseStrategy, useClass: IonicRouteStrategy },
{
provide: ApiService,
useClass: useMocks ? MockApiService : LiveApiService,
},
{
provide: RELATIVE_URL,
useValue: `/${api.url}/${api.version}`,
},
],
bootstrap: [AppComponent],
})
export class AppModule {}

View File

@@ -1,16 +0,0 @@
import { NgModule } from '@angular/core'
import { RouterModule, Routes } from '@angular/router'
import { HomePage } from './home.page'
const routes: Routes = [
{
path: '',
component: HomePage,
},
]
@NgModule({
imports: [RouterModule.forChild(routes)],
exports: [RouterModule],
})
export class HomePageRoutingModule {}

View File

@@ -1,16 +0,0 @@
import { LogsRes, ServerLogsReq } from '@start9labs/shared'
export abstract class ApiService {
abstract getError(): Promise<GetErrorRes>
abstract restart(): Promise<void>
abstract forgetDrive(): Promise<void>
abstract repairDisk(): Promise<void>
abstract systemRebuild(): Promise<void>
abstract getLogs(params: ServerLogsReq): Promise<LogsRes>
}
export interface GetErrorRes {
code: number
message: string
data: { details: string }
}

View File

@@ -1,68 +0,0 @@
import { Injectable } from '@angular/core'
import {
HttpService,
isRpcError,
RpcError,
RPCOptions,
} from '@start9labs/shared'
import { ApiService, GetErrorRes } from './api.service'
import { LogsRes, ServerLogsReq } from '@start9labs/shared'
@Injectable()
export class LiveApiService implements ApiService {
constructor(private readonly http: HttpService) {}
async getError(): Promise<GetErrorRes> {
return this.rpcRequest<GetErrorRes>({
method: 'diagnostic.error',
params: {},
})
}
async restart(): Promise<void> {
return this.rpcRequest<void>({
method: 'diagnostic.restart',
params: {},
})
}
async forgetDrive(): Promise<void> {
return this.rpcRequest<void>({
method: 'diagnostic.disk.forget',
params: {},
})
}
async repairDisk(): Promise<void> {
return this.rpcRequest<void>({
method: 'diagnostic.disk.repair',
params: {},
})
}
async systemRebuild(): Promise<void> {
return this.rpcRequest<void>({
method: 'diagnostic.rebuild',
params: {},
})
}
async getLogs(params: ServerLogsReq): Promise<LogsRes> {
return this.rpcRequest<LogsRes>({
method: 'diagnostic.logs',
params,
})
}
private async rpcRequest<T>(opts: RPCOptions): Promise<T> {
const res = await this.http.rpcRequest<T>(opts)
const rpcRes = res.body
if (isRpcError(rpcRes)) {
throw new RpcError(rpcRes.error)
}
return rpcRes.result
}
}

View File

@@ -1,67 +0,0 @@
import { Injectable } from '@angular/core'
import { pauseFor } from '@start9labs/shared'
import { ApiService, GetErrorRes } from './api.service'
import { LogsRes, ServerLogsReq, Log } from '@start9labs/shared'
@Injectable()
export class MockApiService implements ApiService {
async getError(): Promise<GetErrorRes> {
await pauseFor(1000)
return {
code: 15,
message: 'Unknown server',
data: { details: 'Some details about the error here' },
}
}
async restart(): Promise<void> {
await pauseFor(1000)
}
async forgetDrive(): Promise<void> {
await pauseFor(1000)
}
async repairDisk(): Promise<void> {
await pauseFor(1000)
}
async systemRebuild(): Promise<void> {
await pauseFor(1000)
}
async getLogs(params: ServerLogsReq): Promise<LogsRes> {
await pauseFor(1000)
let entries: Log[]
if (Math.random() < 0.2) {
entries = packageLogs
} else {
const arrLength = params.limit
? Math.ceil(params.limit / packageLogs.length)
: 10
entries = new Array(arrLength)
.fill(packageLogs)
.reduce((acc, val) => acc.concat(val), [])
}
return {
entries,
startCursor: 'start-cursor',
endCursor: 'end-cursor',
}
}
}
const packageLogs = [
{
timestamp: '2019-12-26T14:20:30.872Z',
message: '****** START *****',
},
{
timestamp: '2019-12-26T14:21:30.872Z',
message: 'ServerLogs ServerLogs ServerLogs ServerLogs ServerLogs',
},
{
timestamp: '2019-12-26T14:22:30.872Z',
message: '****** FINISH *****',
},
]

View File

@@ -1,3 +0,0 @@
export const environment = {
production: true,
}

View File

@@ -1,16 +0,0 @@
// This file can be replaced during build by using the `fileReplacements` array.
// `ng build --prod` replaces `environment.ts` with `environment.prod.ts`.
// The list of file replacements can be found in `angular.json`.
export const environment = {
production: false,
}
/*
* For easier debugging in development mode, you can import the following file
* to ignore zone related error stack frames such as `zone.run`, `zoneDelegate.invokeTask`.
*
* This import should be commented out in production mode because it will have a negative impact
* on performance if an error is thrown.
*/
// import 'zone.js/dist/zone-error'; // Included with Angular CLI.

View File

@@ -1,23 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<title>StartOS Diagnostic UI</title>
<base href="/" />
<meta name="color-scheme" content="light dark" />
<meta
name="viewport"
content="viewport-fit=cover, width=device-width, initial-scale=1.0, minimum-scale=1.0, maximum-scale=1.0, user-scalable=no"
/>
<meta name="format-detection" content="telephone=no" />
<meta name="msapplication-tap-highlight" content="no" />
<link rel="icon" type="image/png" href="assets/icon/favicon.ico" />
</head>
<body>
<app-root></app-root>
</body>
</html>

View File

@@ -1,12 +0,0 @@
import { enableProdMode } from '@angular/core'
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic'
import { AppModule } from './app/app.module'
import { environment } from './environments/environment'
if (environment.production) {
enableProdMode()
}
platformBrowserDynamic()
.bootstrapModule(AppModule)
.catch(err => console.error(err))

View File

@@ -1,64 +0,0 @@
/**
* This file includes polyfills needed by Angular and is loaded before the app.
* You can add your own extra polyfills to this file.
*
* This file is divided into 2 sections:
* 1. Browser polyfills. These are applied before loading ZoneJS and are sorted by browsers.
* 2. Application imports. Files imported after ZoneJS that should be loaded before your main
* file.
*
* The current setup is for so-called "evergreen" browsers; the last versions of browsers that
* automatically update themselves. This includes Safari >= 10, Chrome >= 55 (including Opera),
* Edge >= 13 on the desktop, and iOS 10 and Chrome on mobile.
*
* Learn more in https://angular.io/guide/browser-support
*/
/***************************************************************************************************
* BROWSER POLYFILLS
*/
/** IE11 requires the following for NgClass support on SVG elements */
// import 'classlist.js'; // Run `npm install --save classlist.js`.
/**
* Web Animations `@angular/platform-browser/animations`
* Only required if AnimationBuilder is used within the application and using IE/Edge or Safari.
* Standard animation support in Angular DOES NOT require any polyfills (as of Angular 6.0).
*/
// import 'web-animations-js'; // Run `npm install --save web-animations-js`.
/**
* By default, zone.js will patch all possible macroTask and DomEvents
* user can disable parts of macroTask/DomEvents patch by setting following flags
* because those flags need to be set before `zone.js` being loaded, and webpack
* will put import in the top of bundle, so user need to create a separate file
* in this directory (for example: zone-flags.ts), and put the following flags
* into that file, and then add the following code before importing zone.js.
* import './zone-flags';
*
* The flags allowed in zone-flags.ts are listed here.
*
* The following flags will work for all browsers.
*
* (window as any).__Zone_disable_requestAnimationFrame = true; // disable patch requestAnimationFrame
* (window as any).__Zone_disable_on_property = true; // disable patch onProperty such as onclick
* (window as any).__zone_symbol__UNPATCHED_EVENTS = ['scroll', 'mousemove']; // disable patch specified eventNames
*
* in IE/Edge developer tools, the addEventListener will also be wrapped by zone.js
* with the following flag, it will bypass `zone.js` patch for IE/Edge
*
* (window as any).__Zone_enable_cross_context_check = true;
*
*/
import './zone-flags'
/***************************************************************************************************
* Zone JS is required by default for Angular itself.
*/
import 'zone.js/dist/zone' // Included with Angular CLI.
/***************************************************************************************************
* APPLICATION IMPORTS
*/

View File

@@ -1,41 +0,0 @@
@font-face {
font-family: 'Montserrat';
font-style: normal;
font-weight: normal;
src: url('/assets/fonts/Montserrat/Montserrat-Regular.ttf');
}
/** Ionic CSS Variables overrides **/
:root {
--ion-font-family: 'Montserrat';
--ion-color-primary: #0075e1;
--ion-color-medium: #989aa2;
--ion-color-medium-rgb: 152,154,162;
--ion-color-medium-contrast: #000000;
--ion-color-medium-contrast-rgb: 0,0,0;
--ion-color-medium-shade: #86888f;
--ion-color-medium-tint: #a2a4ab;
--ion-color-light: #222428;
--ion-color-light-rgb: 34,36,40;
--ion-color-light-contrast: #ffffff;
--ion-color-light-contrast-rgb: 255,255,255;
--ion-color-light-shade: #1e2023;
--ion-color-light-tint: #383a3e;
--ion-item-background: #2b2b2b;
--ion-toolbar-background: #2b2b2b;
--ion-card-background: #2b2b2b;
--ion-background-color: #282828;
--ion-background-color-rgb: 30,30,30;
--ion-text-color: var(--ion-color-dark);
--ion-text-color-rgb: var(--ion-color-dark-rgb);
}
.loader {
--spinner-color: var(--ion-color-warning) !important;
z-index: 40000 !important;
}

View File

@@ -1,6 +0,0 @@
/**
* Prevents Angular change detection from
* running with certain Web Component callbacks
*/
// eslint-disable-next-line no-underscore-dangle
(window as any).__Zone_disable_customElements = true

View File

@@ -1,9 +0,0 @@
/* To learn more about this file see: https://angular.io/config/tsconfig. */
{
"extends": "../../tsconfig.json",
"compilerOptions": {
"baseUrl": "./"
},
"files": ["src/main.ts", "src/polyfills.ts"],
"include": ["src/**/*.d.ts"]
}

View File

@@ -21,7 +21,7 @@ export class AppComponent {
let route = '/home'
if (inProgress) {
route = inProgress.complete ? '/success' : '/loading'
route = inProgress.status === 'complete' ? '/success' : '/loading'
}
await this.navCtrl.navigateForward(route)

View File

@@ -5,12 +5,7 @@ import {
ModalController,
NavController,
} from '@ionic/angular'
import {
ApiService,
BackupRecoverySource,
DiskRecoverySource,
DiskMigrateSource,
} from 'src/app/services/api/api.service'
import { ApiService } from 'src/app/services/api/api.service'
import { DiskInfo, ErrorToastService, GuidPipe } from '@start9labs/shared'
import { StateService } from 'src/app/services/state.service'
import { PasswordPage } from '../../modals/password/password.page'
@@ -58,18 +53,17 @@ export class EmbassyPage {
} else if (this.stateService.setupType === 'restore') {
this.storageDrives = disks.filter(
d =>
this.stateService.recoverySource?.type === 'backup' &&
this.stateService.recoverySource.target?.type === 'disk' &&
!d.partitions
.map(p => p.logicalname)
.includes(
(
(this.stateService.recoverySource as BackupRecoverySource)
?.target as DiskRecoverySource
)?.logicalname,
),
.includes(this.stateService.recoverySource.target.logicalname),
)
} else if (this.stateService.setupType === 'transfer') {
const guid = (this.stateService.recoverySource as DiskMigrateSource)
.guid
} else if (
this.stateService.setupType === 'transfer' &&
this.stateService.recoverySource?.type === 'migrate'
) {
const guid = this.stateService.recoverySource.guid
this.storageDrives = disks.filter(d => {
return (
d.guid !== guid && !d.partitions.map(p => p.guid).includes(guid)

View File

@@ -2,11 +2,11 @@ import { NgModule } from '@angular/core'
import { CommonModule } from '@angular/common'
import { IonicModule } from '@ionic/angular'
import { FormsModule } from '@angular/forms'
import { LoadingPage, ToMessagePipe } from './loading.page'
import { LoadingPage } from './loading.page'
import { LoadingPageRoutingModule } from './loading-routing.module'
@NgModule({
imports: [CommonModule, FormsModule, IonicModule, LoadingPageRoutingModule],
declarations: [LoadingPage, ToMessagePipe],
declarations: [LoadingPage],
})
export class LoadingPageModule {}

View File

@@ -1,39 +1,17 @@
<ion-content>
<ion-grid>
<ion-row class="ion-align-items-center">
<ion-col class="ion-text-center">
<ion-card *ngIf="progress$ | async as progress" color="dark">
<ion-card-header>
<ion-card-title>Initializing StartOS</ion-card-title>
<div class="center-wrapper">
<ion-card-subtitle>
{{ progress.transferred | toMessage }}
</ion-card-subtitle>
</div>
</ion-card-header>
<section *ngIf="progress$ | async as progress">
<h1 [style.font-size.rem]="2.5" [style.margin.rem]="1">
Setting up your server
</h1>
<div class="center-wrapper" *ngIf="progress.total">
Progress: {{ (progress.total * 100).toFixed(0) }}%
</div>
<ion-card-content class="ion-margin">
<ion-progress-bar
color="tertiary"
style="max-width: 700px; margin: auto; margin-bottom: 36px"
[type]="progress.transferred && progress.transferred < 1 ? 'determinate' : 'indeterminate'"
[value]="progress.transferred || 0"
></ion-progress-bar>
<p>
<ng-container *ngIf="progress.totalBytes as total">
<ng-container
*ngIf="progress.transferred as transferred; else calculating"
>
Progress: {{ (transferred * 100).toFixed() }}%
</ng-container>
<ng-template #calculating>
{{ (progress.totalBytes / 1073741824).toFixed(2) }} GB
</ng-template>
</ng-container>
</p>
</ion-card-content>
</ion-card>
</ion-col>
</ion-row>
</ion-grid>
</ion-content>
<progress
tuiProgressBar
class="progress"
[style.max-width.rem]="40"
[style.margin]="'1rem auto'"
[attr.value]="progress.total"
></progress>
<p>{{ progress.message }}</p>
</section>

View File

@@ -1,3 +0,0 @@
ion-card-title {
font-size: 42px;
}

View File

@@ -1,15 +1,23 @@
import { Component } from '@angular/core'
import { NavController } from '@ionic/angular'
import { StateService } from 'src/app/services/state.service'
import { Pipe, PipeTransform } from '@angular/core'
import { BehaviorSubject } from 'rxjs'
import {
EMPTY,
Observable,
catchError,
filter,
from,
interval,
map,
of,
startWith,
switchMap,
take,
tap,
} from 'rxjs'
import { ApiService } from 'src/app/services/api/api.service'
import { ErrorToastService, pauseFor } from '@start9labs/shared'
type Progress = {
totalBytes: number | null
transferred: number
}
import { ErrorToastService } from '@start9labs/shared'
import { T } from '@start9labs/start-sdk'
@Component({
selector: 'app-loading',
@@ -17,10 +25,46 @@ type Progress = {
styleUrls: ['loading.page.scss'],
})
export class LoadingPage {
readonly progress$ = new BehaviorSubject<Progress>({
totalBytes: null,
transferred: 0,
})
readonly progress$ = this.getRunningStatus$().pipe(
switchMap(res =>
this.api.openProgressWebsocket$(res.guid).pipe(
startWith(res.progress),
catchError((_, watch$) => {
return interval(2000).pipe(
switchMap(() =>
from(this.api.getStatus()).pipe(catchError(() => EMPTY)),
),
take(1),
switchMap(() => watch$),
)
}),
tap(progress => {
if (progress.overall === true) {
this.getStatus()
}
}),
),
),
map(({ phases, overall }) => {
return {
total: getDecimal(overall),
message: phases
.filter(
(
p,
): p is {
name: string
progress: {
done: number
total: number | null
}
} => p.progress !== true && p.progress !== null,
)
.map(p => `${p.name}${getPhaseBytes(p.progress)}`)
.join(','),
}
}),
)
constructor(
private readonly navCtrl: NavController,
@@ -28,55 +72,55 @@ export class LoadingPage {
private readonly errorToastService: ErrorToastService,
) {}
ngOnInit() {
this.poll()
}
private async getStatus(): Promise<{
status: 'running'
guid: string
progress: T.FullProgress
} | void> {
const res = await this.api.getStatus()
async poll() {
try {
const progress = await this.api.getStatus()
if (!progress) return
const { totalBytes, bytesTransferred } = progress
this.progress$.next({
totalBytes,
transferred: totalBytes ? bytesTransferred / totalBytes : 0,
})
if (progress.complete) {
this.navCtrl.navigateForward(`/success`)
this.progress$.complete()
return
}
await pauseFor(250)
setTimeout(() => this.poll(), 0) // prevent call stack from growing
} catch (e: any) {
this.errorToastService.present(e)
}
}
}
@Pipe({
name: 'toMessage',
})
export class ToMessagePipe implements PipeTransform {
constructor(private readonly stateService: StateService) {}
transform(progress: number | null): string {
if (['fresh', 'attach'].includes(this.stateService.setupType || '')) {
return 'Setting up your server'
}
if (!progress) {
return 'Calculating size'
} else if (progress < 1) {
return 'Copying data'
if (!res) {
this.navCtrl.navigateRoot('/home')
} else if (res.status === 'complete') {
this.navCtrl.navigateForward(`/success`)
} else {
return 'Finalizing'
return res
}
}
private getRunningStatus$(): Observable<{
status: 'running'
guid: string
progress: T.FullProgress
}> {
return from(this.getStatus()).pipe(
filter(Boolean),
catchError(e => {
this.errorToastService.present(e)
return of(e)
}),
take(1),
)
}
}
function getDecimal(progress: T.Progress): number {
if (progress === true) {
return 1
} else if (!progress || !progress.total) {
return 0
} else {
return progress.total && progress.done / progress.total
}
}
function getPhaseBytes(
progress:
| false
| {
done: number
total: number | null
},
): string {
return progress === false ? '' : `: (${progress.done}/${progress.total})`
}

View File

@@ -1,16 +1,21 @@
import * as jose from 'node-jose'
import { DiskListResponse, StartOSDiskInfo } from '@start9labs/shared'
import { T } from '@start9labs/start-sdk'
import { WebSocketSubjectConfig } from 'rxjs/webSocket'
import { Observable } from 'rxjs'
export abstract class ApiService {
pubkey?: jose.JWK.Key
abstract getStatus(): Promise<StatusRes> // setup.status
abstract getStatus(): Promise<T.SetupStatusRes | null> // setup.status
abstract getPubKey(): Promise<void> // setup.get-pubkey
abstract getDrives(): Promise<DiskListResponse> // setup.disk.list
abstract verifyCifs(cifs: CifsRecoverySource): Promise<StartOSDiskInfo> // setup.cifs.verify
abstract attach(importInfo: AttachReq): Promise<void> // setup.attach
abstract execute(setupInfo: ExecuteReq): Promise<void> // setup.execute
abstract complete(): Promise<CompleteRes> // setup.complete
abstract verifyCifs(cifs: T.VerifyCifsParams): Promise<StartOSDiskInfo> // setup.cifs.verify
abstract attach(importInfo: T.AttachParams): Promise<T.SetupProgress> // setup.attach
abstract execute(setupInfo: T.SetupExecuteParams): Promise<T.SetupProgress> // setup.execute
abstract complete(): Promise<T.SetupResult> // setup.complete
abstract exit(): Promise<void> // setup.exit
abstract openProgressWebsocket$(guid: string): Observable<T.FullProgress>
async encrypt(toEncrypt: string): Promise<Encrypted> {
if (!this.pubkey) throw new Error('No pubkey found!')
@@ -27,29 +32,7 @@ type Encrypted = {
encrypted: string
}
export type StatusRes = {
bytesTransferred: number
totalBytes: number | null
complete: boolean
} | null
export type AttachReq = {
guid: string
startOsPassword: Encrypted
}
export type ExecuteReq = {
startOsLogicalname: string
startOsPassword: Encrypted
recoverySource: RecoverySource | null
recoveryPassword: Encrypted | null
}
export type CompleteRes = {
torAddress: string
lanAddress: string
rootCa: string
}
export type WebsocketConfig<T> = Omit<WebSocketSubjectConfig<T>, 'url'>
export type DiskBackupTarget = {
vendor: string | null
@@ -68,27 +51,3 @@ export type CifsBackupTarget = {
mountable: boolean
startOs: StartOSDiskInfo | null
}
export type DiskRecoverySource = {
type: 'disk'
logicalname: string // partition logicalname
}
export type BackupRecoverySource = {
type: 'backup'
target: CifsRecoverySource | DiskRecoverySource
}
export type RecoverySource = BackupRecoverySource | DiskMigrateSource
export type DiskMigrateSource = {
type: 'migrate'
guid: string
}
export type CifsRecoverySource = {
type: 'cifs'
hostname: string
path: string
username: string
password: Encrypted | null
}

View File

@@ -1,4 +1,4 @@
import { Injectable } from '@angular/core'
import { Inject, Injectable } from '@angular/core'
import {
DiskListResponse,
StartOSDiskInfo,
@@ -8,27 +8,35 @@ import {
RpcError,
RPCOptions,
} from '@start9labs/shared'
import {
ApiService,
CifsRecoverySource,
DiskRecoverySource,
StatusRes,
AttachReq,
ExecuteReq,
CompleteRes,
} from './api.service'
import { T } from '@start9labs/start-sdk'
import { ApiService, WebsocketConfig } from './api.service'
import * as jose from 'node-jose'
import { Observable } from 'rxjs'
import { DOCUMENT } from '@angular/common'
import { webSocket } from 'rxjs/webSocket'
@Injectable({
providedIn: 'root',
})
export class LiveApiService extends ApiService {
constructor(private readonly http: HttpService) {
constructor(
private readonly http: HttpService,
@Inject(DOCUMENT) private readonly document: Document,
) {
super()
}
async getStatus() {
return this.rpcRequest<StatusRes>({
openProgressWebsocket$(guid: string): Observable<T.FullProgress> {
const { location } = this.document.defaultView!
const host = location.host
return webSocket({
url: `ws://${host}/ws/rpc/${guid}`,
})
}
async getStatus(): Promise<T.SetupStatusRes | null> {
return this.rpcRequest<T.SetupStatusRes | null>({
method: 'setup.status',
params: {},
})
@@ -41,7 +49,7 @@ export class LiveApiService extends ApiService {
* this wil all public/private key, which means that there is no information loss
* through the network.
*/
async getPubKey() {
async getPubKey(): Promise<void> {
const response: jose.JWK.Key = await this.rpcRequest({
method: 'setup.get-pubkey',
params: {},
@@ -50,14 +58,14 @@ export class LiveApiService extends ApiService {
this.pubkey = response
}
async getDrives() {
async getDrives(): Promise<DiskListResponse> {
return this.rpcRequest<DiskListResponse>({
method: 'setup.disk.list',
params: {},
})
}
async verifyCifs(source: CifsRecoverySource) {
async verifyCifs(source: T.VerifyCifsParams): Promise<StartOSDiskInfo> {
source.path = source.path.replace('/\\/g', '/')
return this.rpcRequest<StartOSDiskInfo>({
method: 'setup.cifs.verify',
@@ -65,14 +73,14 @@ export class LiveApiService extends ApiService {
})
}
async attach(params: AttachReq) {
await this.rpcRequest<void>({
async attach(params: T.AttachParams): Promise<T.SetupProgress> {
return this.rpcRequest<T.SetupProgress>({
method: 'setup.attach',
params,
})
}
async execute(setupInfo: ExecuteReq) {
async execute(setupInfo: T.SetupExecuteParams): Promise<T.SetupProgress> {
if (setupInfo.recoverySource?.type === 'backup') {
if (isCifsSource(setupInfo.recoverySource.target)) {
setupInfo.recoverySource.target.path =
@@ -80,14 +88,14 @@ export class LiveApiService extends ApiService {
}
}
await this.rpcRequest<void>({
return this.rpcRequest<T.SetupProgress>({
method: 'setup.execute',
params: setupInfo,
})
}
async complete() {
const res = await this.rpcRequest<CompleteRes>({
async complete(): Promise<T.SetupResult> {
const res = await this.rpcRequest<T.SetupResult>({
method: 'setup.complete',
params: {},
})
@@ -98,7 +106,7 @@ export class LiveApiService extends ApiService {
}
}
async exit() {
async exit(): Promise<void> {
await this.rpcRequest<void>({
method: 'setup.exit',
params: {},
@@ -119,7 +127,7 @@ export class LiveApiService extends ApiService {
}
function isCifsSource(
source: CifsRecoverySource | DiskRecoverySource | null,
): source is CifsRecoverySource {
return !!(source as CifsRecoverySource)?.hostname
source: T.BackupTargetFS | null,
): source is T.Cifs & { type: 'cifs' } {
return !!(source as T.Cifs)?.hostname
}

View File

@@ -1,42 +1,151 @@
import { Injectable } from '@angular/core'
import { encodeBase64, pauseFor } from '@start9labs/shared'
import {
ApiService,
AttachReq,
CifsRecoverySource,
CompleteRes,
ExecuteReq,
} from './api.service'
DiskListResponse,
StartOSDiskInfo,
encodeBase64,
pauseFor,
} from '@start9labs/shared'
import { ApiService } from './api.service'
import * as jose from 'node-jose'
let tries: number
import { T } from '@start9labs/start-sdk'
import {
Observable,
concatMap,
delay,
from,
interval,
map,
mergeScan,
of,
startWith,
switchMap,
switchScan,
takeWhile,
} from 'rxjs'
@Injectable({
providedIn: 'root',
})
export class MockApiService extends ApiService {
async getStatus() {
const restoreOrMigrate = true
// fullProgress$(): Observable<T.FullProgress> {
// const phases = [
// {
// name: 'Preparing Data',
// progress: null,
// },
// {
// name: 'Transferring Data',
// progress: null,
// },
// {
// name: 'Finalizing Setup',
// progress: null,
// },
// ]
// return from(phases).pipe(
// switchScan((acc, val, i) => {}, { overall: null, phases }),
// )
// }
// namedProgress$(namedProgress: T.NamedProgress): Observable<T.NamedProgress> {
// return of(namedProgress).pipe(startWith(namedProgress))
// }
// progress$(progress: T.Progress): Observable<T.Progress> {}
// websocket
openProgressWebsocket$(guid: string): Observable<T.FullProgress> {
return of(PROGRESS)
// const numPhases = PROGRESS.phases.length
// return of(PROGRESS).pipe(
// switchMap(full =>
// from(PROGRESS.phases).pipe(
// mergeScan((full, phase, i) => {
// if (
// !phase.progress ||
// typeof phase.progress !== 'object' ||
// !phase.progress.total
// ) {
// full.phases[i].progress = true
// if (
// full.overall &&
// typeof full.overall === 'object' &&
// full.overall.total
// ) {
// const step = full.overall.total / numPhases
// full.overall.done += step
// }
// return of(full).pipe(delay(2000))
// } else {
// const total = phase.progress.total
// const step = total / 4
// let done = phase.progress.done
// return interval(1000).pipe(
// takeWhile(() => done < total),
// map(() => {
// done += step
// console.error(done)
// if (
// full.overall &&
// typeof full.overall === 'object' &&
// full.overall.total
// ) {
// const step = full.overall.total / numPhases / 4
// full.overall.done += step
// }
// if (done === total) {
// full.phases[i].progress = true
// if (i === numPhases - 1) {
// full.overall = true
// }
// }
// return full
// }),
// )
// }
// }, full),
// ),
// ),
// )
}
private statusIndex = 0
async getStatus(): Promise<T.SetupStatusRes | null> {
await pauseFor(1000)
if (tries === undefined) {
tries = 0
return null
}
this.statusIndex++
tries++
const total = tries <= 4 ? tries * 268435456 : 1073741824
const progress = tries > 4 ? (tries - 4) * 268435456 : 0
return {
bytesTransferred: restoreOrMigrate ? progress : 0,
totalBytes: restoreOrMigrate ? total : null,
complete: progress === total,
switch (this.statusIndex) {
case 2:
return {
status: 'running',
progress: PROGRESS,
guid: 'progress-guid',
}
case 3:
return {
status: 'complete',
torAddress: 'https://asdafsadasdasasdasdfasdfasdf.onion',
lanAddress: 'https://adjective-noun.local',
rootCa: encodeBase64(rootCA),
}
default:
return null
}
}
async getPubKey() {
async getPubKey(): Promise<void> {
await pauseFor(1000)
// randomly generated
@@ -52,7 +161,7 @@ export class MockApiService extends ApiService {
})
}
async getDrives() {
async getDrives(): Promise<DiskListResponse> {
await pauseFor(1000)
return [
{
@@ -127,7 +236,7 @@ export class MockApiService extends ApiService {
]
}
async verifyCifs(params: CifsRecoverySource) {
async verifyCifs(params: T.VerifyCifsParams): Promise<StartOSDiskInfo> {
await pauseFor(1000)
return {
version: '0.3.0',
@@ -138,15 +247,25 @@ export class MockApiService extends ApiService {
}
}
async attach(params: AttachReq) {
async attach(params: T.AttachParams): Promise<T.SetupProgress> {
await pauseFor(1000)
return {
progress: PROGRESS,
guid: 'progress-guid',
}
}
async execute(setupInfo: ExecuteReq) {
async execute(setupInfo: T.SetupExecuteParams): Promise<T.SetupProgress> {
await pauseFor(1000)
return {
progress: PROGRESS,
guid: 'progress-guid',
}
}
async complete(): Promise<CompleteRes> {
async complete(): Promise<T.SetupResult> {
await pauseFor(1000)
return {
torAddress: 'https://asdafsadasdasasdasdfasdfasdf.onion',
@@ -155,7 +274,7 @@ export class MockApiService extends ApiService {
}
}
async exit() {
async exit(): Promise<void> {
await pauseFor(1000)
}
}
@@ -182,3 +301,8 @@ Rf3ZOPm9QP92YpWyYDkfAU04xdDo1vR0MYjKPkl4LjRqSU/tcCJnPMbJiwq+bWpX
2WJoEBXB/p15Kn6JxjI0ze2SnSI48JZ8it4fvxrhOo0VoLNIuCuNXJOwU17Rdl1W
YJidaq7je6k18AdgPA0Kh8y1XtfUH3fTaVw4
-----END CERTIFICATE-----`
const PROGRESS = {
overall: null,
phases: [],
}

View File

@@ -1,13 +1,13 @@
import { Injectable } from '@angular/core'
import { ApiService, RecoverySource } from './api/api.service'
import { ApiService } from './api/api.service'
import { T } from '@start9labs/start-sdk'
@Injectable({
providedIn: 'root',
})
export class StateService {
setupType?: 'fresh' | 'restore' | 'attach' | 'transfer'
recoverySource?: RecoverySource
recoverySource?: T.RecoverySource
recoveryPassword?: string
constructor(private readonly api: ApiService) {}

View File

@@ -13,6 +13,7 @@ export type LogsRes = {
export interface Log {
timestamp: string
message: string
bootId: string
}
export type DiskListResponse = DiskInfo[]

View File

@@ -1,5 +1,6 @@
import { NgModule } from '@angular/core'
import { PreloadAllModules, RouterModule, Routes } from '@angular/router'
import { stateNot } from 'src/app/services/state.service'
import { AuthGuard } from './guards/auth.guard'
import { UnauthGuard } from './guards/unauth.guard'
@@ -15,15 +16,29 @@ const routes: Routes = [
loadChildren: () =>
import('./pages/login/login.module').then(m => m.LoginPageModule),
},
{
path: 'diagnostic',
canActivate: [stateNot(['initializing', 'running'])],
loadChildren: () =>
import('./pages/diagnostic-routes/diagnostic-routing.module').then(
m => m.DiagnosticModule,
),
},
{
path: 'initializing',
canActivate: [stateNot(['error', 'running'])],
loadChildren: () =>
import('./pages/init/init.module').then(m => m.InitPageModule),
},
{
path: 'home',
canActivate: [AuthGuard],
canActivate: [AuthGuard, stateNot(['error', 'initializing'])],
loadChildren: () =>
import('./pages/home/home.module').then(m => m.HomePageModule),
},
{
path: 'system',
canActivate: [AuthGuard],
canActivate: [AuthGuard, stateNot(['error', 'initializing'])],
canActivateChild: [AuthGuard],
loadChildren: () =>
import('./pages/server-routes/server-routing.module').then(
@@ -32,14 +47,14 @@ const routes: Routes = [
},
{
path: 'updates',
canActivate: [AuthGuard],
canActivate: [AuthGuard, stateNot(['error', 'initializing'])],
canActivateChild: [AuthGuard],
loadChildren: () =>
import('./pages/updates/updates.module').then(m => m.UpdatesPageModule),
},
{
path: 'marketplace',
canActivate: [AuthGuard],
canActivate: [AuthGuard, stateNot(['error', 'initializing'])],
canActivateChild: [AuthGuard],
loadChildren: () =>
import('./pages/marketplace-routes/marketplace-routing.module').then(
@@ -48,7 +63,7 @@ const routes: Routes = [
},
{
path: 'notifications',
canActivate: [AuthGuard],
canActivate: [AuthGuard, stateNot(['error', 'initializing'])],
loadChildren: () =>
import('./pages/notifications/notifications.module').then(
m => m.NotificationsPageModule,
@@ -56,7 +71,7 @@ const routes: Routes = [
},
{
path: 'services',
canActivate: [AuthGuard],
canActivate: [AuthGuard, stateNot(['error', 'initializing'])],
canActivateChild: [AuthGuard],
loadChildren: () =>
import('./pages/apps-routes/apps-routing.module').then(

View File

@@ -15,6 +15,7 @@
type="overlay"
side="start"
class="left-menu"
[class.left-menu_hidden]="withoutMenu"
>
<ion-content color="light" scrollY="false" class="menu">
<app-menu *ngIf="authService.isVerified$ | async"></app-menu>

View File

@@ -9,11 +9,15 @@ tui-root {
.left-menu {
--side-max-width: 280px;
&_hidden {
display: none;
}
}
.menu {
:host-context(body[data-theme='Light']) & {
--ion-color-base: #F4F4F5 !important;
--ion-color-base: #f4f4f5 !important;
}
}

View File

@@ -1,4 +1,5 @@
import { Component, inject, OnDestroy } from '@angular/core'
import { IsActiveMatchOptions, Router } from '@angular/router'
import { combineLatest, map, merge, startWith } from 'rxjs'
import { AuthService } from './services/auth.service'
import { SplitPaneTracker } from './services/split-pane.service'
@@ -15,6 +16,13 @@ import { THEME } from '@start9labs/shared'
import { PatchDB } from 'patch-db-client'
import { DataModel } from './services/patch-db/data-model'
const OPTIONS: IsActiveMatchOptions = {
paths: 'subset',
queryParams: 'exact',
fragment: 'ignored',
matrixParams: 'ignored',
}
@Component({
selector: 'app-root',
templateUrl: 'app.component.html',
@@ -27,7 +35,7 @@ export class AppComponent implements OnDestroy {
readonly theme$ = inject(THEME)
readonly offline$ = combineLatest([
this.authService.isVerified$,
this.connection.connected$,
this.connection$,
this.patch
.watch$('serverInfo', 'statusInfo')
.pipe(startWith({ restarting: false, shuttingDown: false })),
@@ -44,8 +52,9 @@ export class AppComponent implements OnDestroy {
private readonly patchMonitor: PatchMonitorService,
private readonly splitPane: SplitPaneTracker,
private readonly patch: PatchDB<DataModel>,
private readonly router: Router,
readonly authService: AuthService,
readonly connection: ConnectionService,
readonly connection$: ConnectionService,
readonly clientStorageService: ClientStorageService,
readonly themeSwitcher: ThemeSwitcherService,
) {}
@@ -56,6 +65,13 @@ export class AppComponent implements OnDestroy {
.subscribe(name => this.titleService.setTitle(name || 'StartOS'))
}
get withoutMenu(): boolean {
return (
this.router.isActive('initializing', OPTIONS) ||
this.router.isActive('diagnostic', OPTIONS)
)
}
splitPaneVisible({ detail }: any) {
this.splitPane.sidebarOpen$.next(detail.visible)
}

View File

@@ -1,4 +1,5 @@
import {
TuiAlertModule,
TuiDialogModule,
TuiModeModule,
TuiRootModule,
@@ -58,6 +59,7 @@ import { environment } from '../environments/environment'
ConnectionBarComponentModule,
TuiRootModule,
TuiDialogModule,
TuiAlertModule,
TuiModeModule,
TuiThemeNightModule,
WidgetsPageModule,

View File

@@ -10,6 +10,7 @@ import { AuthService } from './services/auth.service'
import { ClientStorageService } from './services/client-storage.service'
import { FilterPackagesPipe } from '../../../marketplace/src/pipes/filter-packages.pipe'
import { ThemeSwitcherService } from './services/theme-switcher.service'
import { StorageService } from './services/storage.service'
const {
useMocks,
@@ -30,7 +31,7 @@ export const APP_PROVIDERS: Provider[] = [
},
{
provide: APP_INITIALIZER,
deps: [AuthService, ClientStorageService, Router],
deps: [StorageService, AuthService, ClientStorageService, Router],
useFactory: appInitializer,
multi: true,
},
@@ -45,13 +46,15 @@ export const APP_PROVIDERS: Provider[] = [
]
export function appInitializer(
storage: StorageService,
auth: AuthService,
localStorage: ClientStorageService,
router: Router,
): () => void {
return () => {
storage.migrate036()
auth.init()
localStorage.init()
localStorage.init() // @TODO pretty sure we can navigate before this step
router.initialNavigation()
}
}

View File

@@ -70,7 +70,7 @@ export class MenuComponent {
readonly showEOSUpdate$ = this.eosService.showUpdate$
private readonly local$ = this.connectionService.connected$.pipe(
private readonly local$ = this.connection$.pipe(
filter(Boolean),
switchMap(() => this.patch.watch$('packageData').pipe(first())),
switchMap(outer =>
@@ -126,6 +126,6 @@ export class MenuComponent {
private readonly marketplaceService: MarketplaceService,
private readonly splitPane: SplitPaneTracker,
private readonly emver: Emver,
private readonly connectionService: ConnectionService,
private readonly connection$: ConnectionService,
) {}
}

Some files were not shown because too many files have changed in this diff Show More