Merge branch 'next/minor' of github.com:Start9Labs/start-os into next/major

This commit is contained in:
Matt Hill
2025-02-08 19:19:35 -07:00
parent 95cad7bdd9
commit 95722802dc
206 changed files with 11364 additions and 4104 deletions

View File

@@ -24,7 +24,7 @@ pub struct AccountInfo {
pub server_id: String,
pub hostname: Hostname,
pub password: String,
pub tor_key: TorSecretKeyV3,
pub tor_keys: Vec<TorSecretKeyV3>,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ssh_key: ssh_key::PrivateKey,
@@ -34,7 +34,7 @@ impl AccountInfo {
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let tor_key = TorSecretKeyV3::generate();
let tor_key = vec![TorSecretKeyV3::generate()];
let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random(
@@ -45,7 +45,7 @@ impl AccountInfo {
server_id,
hostname,
password: hash_password(password)?,
tor_key,
tor_keys: tor_key,
root_ca_key,
root_ca_cert,
ssh_key,
@@ -58,8 +58,11 @@ impl AccountInfo {
let hostname = Hostname(db.as_public().as_server_info().as_hostname().de()?);
let password = db.as_private().as_password().de()?;
let key_store = db.as_private().as_key_store();
let tor_addr = db.as_public().as_server_info().as_onion_address().de()?;
let tor_key = key_store.as_onion().get_key(&tor_addr)?;
let tor_addrs = db.as_public().as_server_info().as_host().as_onions().de()?;
let tor_keys = tor_addrs
.into_iter()
.map(|tor_addr| key_store.as_onion().get_key(&tor_addr))
.collect::<Result<_, _>>()?;
let cert_store = key_store.as_local_certs();
let root_ca_key = cert_store.as_root_key().de()?.0;
let root_ca_cert = cert_store.as_root_cert().de()?.0;
@@ -70,7 +73,7 @@ impl AccountInfo {
server_id,
hostname,
password,
tor_key,
tor_keys,
root_ca_key,
root_ca_cert,
ssh_key,
@@ -82,17 +85,16 @@ impl AccountInfo {
let server_info = db.as_public_mut().as_server_info_mut();
server_info.as_id_mut().ser(&self.server_id)?;
server_info.as_hostname_mut().ser(&self.hostname.0)?;
server_info
.as_lan_address_mut()
.ser(&self.hostname.lan_address().parse()?)?;
server_info
.as_pubkey_mut()
.ser(&self.ssh_key.public_key().to_openssh()?)?;
let onion_address = self.tor_key.public().get_onion_address();
server_info.as_onion_address_mut().ser(&onion_address)?;
server_info
.as_tor_address_mut()
.ser(&format!("https://{onion_address}").parse()?)?;
server_info.as_host_mut().as_onions_mut().ser(
&self
.tor_keys
.iter()
.map(|tor_key| tor_key.public().get_onion_address())
.collect(),
)?;
db.as_private_mut().as_password_mut().ser(&self.password)?;
db.as_private_mut()
.as_ssh_privkey_mut()
@@ -101,7 +103,9 @@ impl AccountInfo {
.as_compat_s9pk_key_mut()
.ser(Pem::new_ref(&self.compat_s9pk_key))?;
let key_store = db.as_private_mut().as_key_store_mut();
key_store.as_onion_mut().insert_key(&self.tor_key)?;
for tor_key in &self.tor_keys {
key_store.as_onion_mut().insert_key(tor_key)?;
}
let cert_store = key_store.as_local_certs_mut();
cert_store
.as_root_key_mut()

View File

@@ -1,4 +1,3 @@
use std::collections::BTreeMap;
use std::fmt;
use clap::{CommandFactory, FromArgMatches, Parser};

View File

@@ -187,9 +187,8 @@ pub fn check_password_against_db(db: &DatabaseModel, password: &str) -> Result<(
Ok(())
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
#[ts(export)]
pub struct LoginParams {
password: Option<PasswordType>,

View File

@@ -85,7 +85,7 @@ impl OsBackupV0 {
&mut rand::thread_rng(),
ssh_key::Algorithm::Ed25519,
)?,
tor_key: TorSecretKeyV3::from(self.tor_key.0),
tor_keys: vec![TorSecretKeyV3::from(self.tor_key.0)],
compat_s9pk_key: ed25519_dalek::SigningKey::generate(&mut rand::thread_rng()),
},
ui: self.ui,
@@ -114,7 +114,7 @@ impl OsBackupV1 {
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)),
tor_key: TorSecretKeyV3::from(ed25519_expand_key(&self.net_key.0)),
tor_keys: vec![TorSecretKeyV3::from(ed25519_expand_key(&self.net_key.0))],
compat_s9pk_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key),
},
ui: self.ui,
@@ -132,7 +132,7 @@ struct OsBackupV2 {
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key
tor_key: TorSecretKeyV3, // Base64 Encoded Ed25519 Expanded Secret Key
tor_keys: Vec<TorSecretKeyV3>, // Base64 Encoded Ed25519 Expanded Secret Key
compat_s9pk_key: Pem<ed25519_dalek::SigningKey>, // PEM Encoded ED25519 Key
ui: Value, // JSON Value
}
@@ -146,7 +146,7 @@ impl OsBackupV2 {
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: self.ssh_key.0,
tor_key: self.tor_key,
tor_keys: self.tor_keys,
compat_s9pk_key: self.compat_s9pk_key.0,
},
ui: self.ui,
@@ -159,7 +159,7 @@ impl OsBackupV2 {
root_ca_key: Pem(backup.account.root_ca_key.clone()),
root_ca_cert: Pem(backup.account.root_ca_cert.clone()),
ssh_key: Pem(backup.account.ssh_key.clone()),
tor_key: backup.account.tor_key.clone(),
tor_keys: backup.account.tor_keys.clone(),
compat_s9pk_key: Pem(backup.account.compat_s9pk_key.clone()),
ui: backup.ui.clone(),
}

View File

@@ -18,7 +18,7 @@ use crate::db::model::Database;
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::init::{init, InitResult};
use crate::init::init;
use crate::prelude::*;
use crate::s9pk::S9pk;
use crate::service::service_map::DownloadInstallFuture;
@@ -109,12 +109,13 @@ pub async fn recover_full_embassy(
db.put(&ROOT, &Database::init(&os_backup.account)?).await?;
drop(db);
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
let rpc_ctx = RpcContext::init(
&ctx.webserver,
&ctx.config,
disk_guid.clone(),
Some(net_ctrl),
Some(init_result),
rpc_ctx_phases,
)
.await?;

View File

@@ -4,7 +4,7 @@ use rpc_toolkit::CliApp;
use serde_json::Value;
use crate::service::cli::{ContainerCliContext, ContainerClientConfig};
use crate::util::logger::EmbassyLogger;
use crate::util::logger::LOGGER;
use crate::version::{Current, VersionT};
lazy_static::lazy_static! {
@@ -12,7 +12,7 @@ lazy_static::lazy_static! {
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init();
LOGGER.enable();
if let Err(e) = CliApp::new(
|cfg: ContainerClientConfig| Ok(ContainerCliContext::init(cfg)),
crate::service::effects::handler(),

View File

@@ -5,16 +5,16 @@ use futures::FutureExt;
use tokio::signal::unix::signal;
use tracing::instrument;
use crate::net::web_server::WebServer;
use crate::net::web_server::{Acceptor, WebServer};
use crate::prelude::*;
use crate::registry::context::{RegistryConfig, RegistryContext};
use crate::util::logger::EmbassyLogger;
use crate::util::logger::LOGGER;
#[instrument(skip_all)]
async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
let server = async {
let ctx = RegistryContext::init(config).await?;
let mut server = WebServer::new(ctx.listen);
let mut server = WebServer::new(Acceptor::bind([ctx.listen]).await?);
server.serve_registry(ctx.clone());
let mut shutdown_recv = ctx.shutdown.subscribe();
@@ -63,7 +63,7 @@ async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init();
LOGGER.enable();
let config = RegistryConfig::parse_from(args).load().unwrap();

View File

@@ -5,7 +5,7 @@ use serde_json::Value;
use crate::context::config::ClientConfig;
use crate::context::CliContext;
use crate::util::logger::EmbassyLogger;
use crate::util::logger::LOGGER;
use crate::version::{Current, VersionT};
lazy_static::lazy_static! {
@@ -13,7 +13,8 @@ lazy_static::lazy_static! {
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init();
LOGGER.enable();
if let Err(e) = CliApp::new(
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::expanded_api(),

View File

@@ -1,3 +1,4 @@
use std::path::Path;
use std::sync::Arc;
use tokio::process::Command;
@@ -10,17 +11,17 @@ use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::{check_for_firmware_update, update_firmware};
use crate::init::{InitPhases, InitResult, STANDBY_MODE_PATH};
use crate::net::web_server::WebServer;
use crate::init::{InitPhases, STANDBY_MODE_PATH};
use crate::net::web_server::{UpgradableListener, WebServer};
use crate::prelude::*;
use crate::progress::FullProgressTracker;
use crate::shutdown::Shutdown;
use crate::util::Invoke;
use crate::PLATFORM;
use crate::{DATA_DIR, PLATFORM};
#[instrument(skip_all)]
async fn setup_or_init(
server: &mut WebServer,
server: &mut WebServer<UpgradableListener>,
config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if let Some(firmware) = check_for_firmware_update()
@@ -111,7 +112,7 @@ async fn setup_or_init(
.await
.is_err()
{
let ctx = SetupContext::init(config)?;
let ctx = SetupContext::init(server, config)?;
server.serve_setup(ctx.clone());
@@ -156,7 +157,7 @@ async fn setup_or_init(
let disk_guid = Arc::new(String::from(guid_string.trim()));
let requires_reboot = crate::disk::main::import(
&**disk_guid,
config.datadir(),
DATA_DIR,
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
} else {
@@ -178,18 +179,26 @@ async fn setup_or_init(
tracing::info!("Loaded Disk");
if requires_reboot.0 {
tracing::info!("Rebooting...");
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
reboot_phase.start();
return Ok(Err(Shutdown {
export_args: Some((disk_guid, config.datadir().to_owned())),
export_args: Some((disk_guid, Path::new(DATA_DIR).to_owned())),
restart: true,
}));
}
let InitResult { net_ctrl } = crate::init::init(config, init_phases).await?;
let init_result =
crate::init::init(&server.acceptor_setter(), config, init_phases).await?;
let rpc_ctx =
RpcContext::init(config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
let rpc_ctx = RpcContext::init(
&server.acceptor_setter(),
config,
disk_guid,
Some(init_result),
rpc_ctx_phases,
)
.await?;
Ok::<_, Error>(Ok((rpc_ctx, handle)))
}
@@ -203,7 +212,7 @@ async fn setup_or_init(
#[instrument(skip_all)]
pub async fn main(
server: &mut WebServer,
server: &mut WebServer<UpgradableListener>,
config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {

View File

@@ -1,6 +1,6 @@
use std::cmp::max;
use std::ffi::OsString;
use std::net::{Ipv6Addr, SocketAddr};
use std::net::IpAddr;
use std::sync::Arc;
use clap::Parser;
@@ -12,21 +12,27 @@ use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, RpcContext};
use crate::net::web_server::WebServer;
use crate::net::network_interface::SelfContainedNetworkInterfaceListener;
use crate::net::utils::ipv6_is_local;
use crate::net::web_server::{Acceptor, UpgradableListener, WebServer};
use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task;
use crate::util::logger::EmbassyLogger;
use crate::util::io::append_file;
use crate::util::logger::LOGGER;
use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)]
async fn inner_main(
server: &mut WebServer,
server: &mut WebServer<UpgradableListener>,
config: &ServerConfig,
) -> Result<Option<Shutdown>, Error> {
let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized")
.await
.is_ok()
{
LOGGER.set_logfile(Some(
append_file("/run/startos/init.log").await?.into_std().await,
));
let (ctx, handle) = match super::start_init::main(server, &config).await? {
Err(s) => return Ok(Some(s)),
Ok(ctx) => ctx,
@@ -34,6 +40,7 @@ async fn inner_main(
tokio::fs::write("/run/startos/initialized", "").await?;
server.serve_main(ctx.clone());
LOGGER.set_logfile(None);
handle.complete();
ctx
@@ -44,6 +51,7 @@ async fn inner_main(
server.serve_init(init_ctx);
let ctx = RpcContext::init(
&server.acceptor_setter(),
config,
Arc::new(
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
@@ -131,7 +139,7 @@ async fn inner_main(
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init();
LOGGER.enable();
let config = ServerConfig::parse_from(args).load().unwrap();
@@ -142,7 +150,10 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
.build()
.expect("failed to initialize runtime");
rt.block_on(async {
let mut server = WebServer::new(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80));
let addrs = crate::net::utils::all_socket_addrs_for(80).await?;
let mut server = WebServer::new(Acceptor::bind_upgradable(
SelfContainedNetworkInterfaceListener::bind(80),
));
match inner_main(&mut server, &config).await {
Ok(a) => {
server.shutdown().await;

View File

@@ -13,6 +13,7 @@ use crate::disk::OsPartitionInfo;
use crate::init::init_postgres;
use crate::prelude::*;
use crate::util::serde::IoFormat;
use crate::MAIN_DATA;
pub const DEVICE_CONFIG_PATH: &str = "/media/startos/config/config.yaml"; // "/media/startos/config/config.yaml";
pub const CONFIG_PATH: &str = "/etc/startos/config.yaml";
@@ -103,17 +104,11 @@ pub struct ServerConfig {
#[arg(skip)]
pub os_partitions: Option<OsPartitionInfo>,
#[arg(long)]
pub bind_rpc: Option<SocketAddr>,
#[arg(long)]
pub tor_control: Option<SocketAddr>,
#[arg(long)]
pub tor_socks: Option<SocketAddr>,
#[arg(long)]
pub dns_bind: Option<Vec<SocketAddr>>,
#[arg(long)]
pub revision_cache_size: Option<usize>,
#[arg(short, long)]
pub datadir: Option<PathBuf>,
#[arg(long)]
pub disable_encryption: Option<bool>,
#[arg(long)]
@@ -126,15 +121,12 @@ impl ContextConfig for ServerConfig {
fn merge_with(&mut self, other: Self) {
self.ethernet_interface = self.ethernet_interface.take().or(other.ethernet_interface);
self.os_partitions = self.os_partitions.take().or(other.os_partitions);
self.bind_rpc = self.bind_rpc.take().or(other.bind_rpc);
self.tor_control = self.tor_control.take().or(other.tor_control);
self.tor_socks = self.tor_socks.take().or(other.tor_socks);
self.dns_bind = self.dns_bind.take().or(other.dns_bind);
self.revision_cache_size = self
.revision_cache_size
.take()
.or(other.revision_cache_size);
self.datadir = self.datadir.take().or(other.datadir);
self.disable_encryption = self.disable_encryption.take().or(other.disable_encryption);
self.multi_arch_s9pks = self.multi_arch_s9pks.take().or(other.multi_arch_s9pks);
}
@@ -148,13 +140,8 @@ impl ServerConfig {
self.load_path_rec(Some(CONFIG_PATH))?;
Ok(self)
}
pub fn datadir(&self) -> &Path {
self.datadir
.as_deref()
.unwrap_or_else(|| Path::new("/embassy-data"))
}
pub async fn db(&self) -> Result<PatchDb, Error> {
let db_path = self.datadir().join("main").join("embassy.db");
let db_path = Path::new(MAIN_DATA).join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
@@ -163,7 +150,7 @@ impl ServerConfig {
}
#[instrument(skip_all)]
pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(self.datadir()).await?;
init_postgres("/media/startos/data").await?;
let secret_store =
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
.await?;

View File

@@ -1,5 +1,4 @@
use std::ops::Deref;
use std::path::PathBuf;
use std::sync::Arc;
use rpc_toolkit::yajrc::RpcError;
@@ -13,7 +12,6 @@ use crate::shutdown::Shutdown;
use crate::Error;
pub struct DiagnosticContextSeed {
pub datadir: PathBuf,
pub shutdown: Sender<Shutdown>,
pub error: Arc<RpcError>,
pub disk_guid: Option<Arc<String>>,
@@ -25,7 +23,7 @@ pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
impl DiagnosticContext {
#[instrument(skip_all)]
pub fn init(
config: &ServerConfig,
_config: &ServerConfig,
disk_guid: Option<Arc<String>>,
error: Error,
) -> Result<Self, Error> {
@@ -35,7 +33,6 @@ impl DiagnosticContext {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
Ok(Self(Arc::new(DiagnosticContextSeed {
datadir: config.datadir().to_owned(),
shutdown,
disk_guid,
error: Arc::new(error.into()),

View File

@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::ops::Deref;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
@@ -27,10 +26,11 @@ use crate::auth::Sessions;
use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::check_time_is_synchronized;
use crate::init::{check_time_is_synchronized, InitResult};
use crate::lxc::{ContainerId, LxcContainer, LxcManager};
use crate::net::net_controller::{NetController, PreInitNetController};
use crate::net::net_controller::{NetController, NetService};
use crate::net::utils::{find_eth_iface, find_wifi_iface};
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
use crate::net::wifi::WpaCli;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
@@ -47,13 +47,13 @@ pub struct RpcContextSeed {
pub os_partitions: OsPartitionInfo,
pub wifi_interface: Option<String>,
pub ethernet_interface: String,
pub datadir: PathBuf,
pub disk_guid: Arc<String>,
pub ephemeral_sessions: SyncMutex<Sessions>,
pub db: TypedPatchDb<Database>,
pub sync_db: watch::Sender<u64>,
pub account: RwLock<AccountInfo>,
pub net_controller: Arc<NetController>,
pub os_net_service: NetService,
pub s9pk_arch: Option<&'static str>,
pub services: ServiceMap,
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
@@ -85,7 +85,7 @@ pub struct InitRpcContextPhases {
load_db: PhaseProgressTrackerHandle,
init_net_ctrl: PhaseProgressTrackerHandle,
cleanup_init: CleanupInitPhases,
// TODO: migrations
run_migrations: PhaseProgressTrackerHandle,
}
impl InitRpcContextPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
@@ -93,6 +93,7 @@ impl InitRpcContextPhases {
load_db: handle.add_phase("Loading database".into(), Some(5)),
init_net_ctrl: handle.add_phase("Initializing network".into(), Some(1)),
cleanup_init: CleanupInitPhases::new(handle),
run_migrations: handle.add_phase("Running migrations".into(), Some(10)),
}
}
}
@@ -117,13 +118,15 @@ pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext {
#[instrument(skip_all)]
pub async fn init(
webserver: &WebServerAcceptorSetter<UpgradableListener>,
config: &ServerConfig,
disk_guid: Arc<String>,
net_ctrl: Option<PreInitNetController>,
init_result: Option<InitResult>,
InitRpcContextPhases {
mut load_db,
mut init_net_ctrl,
cleanup_init,
run_migrations,
}: InitRpcContextPhases,
) -> Result<Self, Error> {
let tor_proxy = config.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
@@ -133,7 +136,7 @@ impl RpcContext {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
load_db.start();
let db = if let Some(net_ctrl) = &net_ctrl {
let db = if let Some(InitResult { net_ctrl, .. }) = &init_result {
net_ctrl.db.clone()
} else {
TypedPatchDb::<Database>::load(config.db().await?).await?
@@ -144,29 +147,28 @@ impl RpcContext {
tracing::info!("Opened PatchDB");
init_net_ctrl.start();
let net_controller = Arc::new(
NetController::init(
if let Some(net_ctrl) = net_ctrl {
net_ctrl
} else {
PreInitNetController::init(
db.clone(),
config
.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
&account.hostname,
account.tor_key.clone(),
)
.await?
},
config
.dns_bind
.as_deref()
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
)
.await?,
);
let (net_controller, os_net_service) = if let Some(InitResult {
net_ctrl,
os_net_service,
}) = init_result
{
(net_ctrl, os_net_service)
} else {
let net_ctrl = Arc::new(
NetController::init(
db.clone(),
config
.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
&account.hostname,
)
.await?,
);
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?;
(net_ctrl, os_net_service)
};
init_net_ctrl.complete();
tracing::info!("Initialized Net Controller");
@@ -210,7 +212,6 @@ impl RpcContext {
let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false),
datadir: config.datadir().to_path_buf(),
os_partitions: config.os_partitions.clone().ok_or_else(|| {
Error::new(
eyre!("OS Partition Information Missing"),
@@ -229,6 +230,7 @@ impl RpcContext {
db,
account: RwLock::new(account),
net_controller,
os_net_service,
s9pk_arch: if config.multi_arch_s9pks.unwrap_or(false) {
None
} else {
@@ -276,7 +278,9 @@ impl RpcContext {
let res = Self(seed.clone());
res.cleanup_and_initialize(cleanup_init).await?;
tracing::info!("Cleaned up transient states");
crate::version::post_init(&res).await?;
crate::version::post_init(&res, run_migrations).await?;
tracing::info!("Completed migrations");
Ok(res)
}
@@ -286,7 +290,6 @@ impl RpcContext {
self.services.shutdown_all().await?;
self.is_closed.store(true, Ordering::SeqCst);
tracing::info!("RPC Context is shutdown");
// TODO: shutdown http servers
Ok(())
}

View File

@@ -1,5 +1,5 @@
use std::ops::Deref;
use std::path::PathBuf;
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
@@ -10,8 +10,6 @@ use josekit::jwk::Jwk;
use patch_db::PatchDb;
use rpc_toolkit::Context;
use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::sync::broadcast::Sender;
use tokio::sync::OnceCell;
use tracing::instrument;
@@ -22,12 +20,13 @@ use crate::context::config::ServerConfig;
use crate::context::RpcContext;
use crate::disk::OsPartitionInfo;
use crate::hostname::Hostname;
use crate::init::init_postgres;
use crate::net::web_server::{UpgradableListener, WebServer, WebServerAcceptorSetter};
use crate::prelude::*;
use crate::progress::FullProgressTracker;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::setup::SetupProgress;
use crate::util::net::WebSocketExt;
use crate::MAIN_DATA;
lazy_static::lazy_static! {
pub static ref CURRENT_SECRET: Jwk = Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).unwrap_or_else(|e| {
@@ -41,7 +40,7 @@ lazy_static::lazy_static! {
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SetupResult {
pub tor_address: String,
pub tor_addresses: Vec<String>,
#[ts(type = "string")]
pub hostname: Hostname,
#[ts(type = "string")]
@@ -52,7 +51,11 @@ impl TryFrom<&AccountInfo> for SetupResult {
type Error = Error;
fn try_from(value: &AccountInfo) -> Result<Self, Self::Error> {
Ok(Self {
tor_address: format!("https://{}", value.tor_key.public().get_onion_address()),
tor_addresses: value
.tor_keys
.iter()
.map(|tor_key| format!("https://{}", tor_key.public().get_onion_address()))
.collect(),
hostname: value.hostname.clone(),
lan_address: value.hostname.lan_address(),
root_ca: String::from_utf8(value.root_ca_cert.to_pem()?)?,
@@ -61,6 +64,7 @@ impl TryFrom<&AccountInfo> for SetupResult {
}
pub struct SetupContextSeed {
pub webserver: WebServerAcceptorSetter<UpgradableListener>,
pub config: ServerConfig,
pub os_partitions: OsPartitionInfo,
pub disable_encryption: bool,
@@ -68,7 +72,6 @@ pub struct SetupContextSeed {
pub task: OnceCell<NonDetachingJoinHandle<()>>,
pub result: OnceCell<Result<(SetupResult, RpcContext), Error>>,
pub shutdown: Sender<()>,
pub datadir: PathBuf,
pub rpc_continuations: RpcContinuations,
}
@@ -76,10 +79,13 @@ pub struct SetupContextSeed {
pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext {
#[instrument(skip_all)]
pub fn init(config: &ServerConfig) -> Result<Self, Error> {
pub fn init(
webserver: &WebServer<UpgradableListener>,
config: &ServerConfig,
) -> Result<Self, Error> {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let datadir = config.datadir().to_owned();
Ok(Self(Arc::new(SetupContextSeed {
webserver: webserver.acceptor_setter(),
config: config.clone(),
os_partitions: config.os_partitions.clone().ok_or_else(|| {
Error::new(
@@ -92,13 +98,12 @@ impl SetupContext {
task: OnceCell::new(),
result: OnceCell::new(),
shutdown,
datadir,
rpc_continuations: RpcContinuations::new(),
})))
}
#[instrument(skip_all)]
pub async fn db(&self) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db");
let db_path = Path::new(MAIN_DATA).join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
@@ -161,21 +166,30 @@ impl SetupContext {
if let Err(e) = async {
let mut stream =
progress_tracker.stream(Some(Duration::from_millis(100)));
while let Some(progress) = stream.next().await {
ws.send(ws::Message::Text(
serde_json::to_string(&progress)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
if progress.overall.is_complete() {
break;
loop {
tokio::select! {
progress = stream.next() => {
if let Some(progress) = progress {
ws.send(ws::Message::Text(
serde_json::to_string(&progress)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
if progress.overall.is_complete() {
return ws.normal_close("complete").await;
}
} else {
return ws.normal_close("complete").await;
}
}
msg = ws.recv() => {
if msg.transpose().with_kind(ErrorKind::Network)?.is_none() {
return Ok(())
}
}
}
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.await
{

View File

@@ -198,17 +198,26 @@ pub async fn subscribe(
session,
|mut ws| async move {
if let Err(e) = async {
while let Some(rev) = sub.recv().await {
ws.send(ws::Message::Text(
serde_json::to_string(&rev).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
loop {
tokio::select! {
rev = sub.recv() => {
if let Some(rev) = rev {
ws.send(ws::Message::Text(
serde_json::to_string(&rev).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
} else {
return ws.normal_close("complete").await;
}
}
msg = ws.recv() => {
if msg.transpose().with_kind(ErrorKind::Network)?.is_none() {
return Ok(())
}
}
}
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.await
{

View File

@@ -1,28 +1,31 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::{Ipv4Addr, Ipv6Addr};
use std::net::{IpAddr, Ipv4Addr};
use chrono::{DateTime, Utc};
use exver::{Version, VersionRange};
use imbl_value::InternedString;
use ipnet::{Ipv4Net, Ipv6Net};
use ipnet::IpNet;
use isocountry::CountryCode;
use itertools::Itertools;
use models::PackageId;
use openssl::hash::MessageDigest;
use patch_db::{HasModel, Value};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS;
use crate::account::AccountInfo;
use crate::db::model::package::AllPackageData;
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
use crate::net::acme::AcmeProvider;
use crate::net::host::binding::{AddSslOptions, BindInfo, BindOptions, NetInfo};
use crate::net::host::Host;
use crate::net::utils::ipv6_is_local;
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::system::SmtpValue;
use crate::util::cpupower::Governor;
use crate::util::lshw::LshwDevice;
use crate::util::serde::MaybeUtf8String;
use crate::version::{Current, VersionT};
use crate::{ARCH, PLATFORM};
@@ -38,7 +41,6 @@ pub struct Public {
}
impl Public {
pub fn init(account: &AccountInfo) -> Result<Self, Error> {
let lan_address = account.hostname.lan_address().parse().unwrap();
Ok(Self {
server_info: ServerInfo {
arch: get_arch(),
@@ -46,16 +48,44 @@ impl Public {
id: account.server_id.clone(),
version: Current::default().semver(),
hostname: account.hostname.no_dot_host_name(),
host: Host {
bindings: [(
80,
BindInfo {
enabled: false,
options: BindOptions {
preferred_external_port: 80,
add_ssl: Some(AddSslOptions {
preferred_external_port: 443,
alpn: Some(AlpnInfo::Specified(vec![
MaybeUtf8String("http/1.1".into()),
MaybeUtf8String("h2".into()),
])),
}),
secure: None,
},
net: NetInfo {
assigned_port: None,
assigned_ssl_port: Some(443),
public: false,
},
},
)]
.into_iter()
.collect(),
onions: account
.tor_keys
.iter()
.map(|k| k.public().get_onion_address())
.collect(),
domains: BTreeMap::new(),
hostname_info: BTreeMap::new(),
},
last_backup: None,
package_version_compat: Current::default().compat().clone(),
post_init_migration_todos: BTreeSet::new(),
lan_address,
onion_address: account.tor_key.public().get_onion_address(),
tor_address: format!("https://{}", account.tor_key.public().get_onion_address())
.parse()
.unwrap(),
ip_info: BTreeMap::new(),
acme: None,
network_interfaces: BTreeMap::new(),
acme: BTreeMap::new(),
status_info: ServerStatus {
backup_progress: None,
updated: false,
@@ -115,6 +145,7 @@ pub struct ServerInfo {
pub id: String,
#[ts(type = "string")]
pub hostname: InternedString,
pub host: Host,
#[ts(type = "string")]
pub version: Version,
#[ts(type = "string")]
@@ -123,15 +154,11 @@ pub struct ServerInfo {
pub post_init_migration_todos: BTreeSet<Version>,
#[ts(type = "string | null")]
pub last_backup: Option<DateTime<Utc>>,
#[ts(type = "string")]
pub lan_address: Url,
#[ts(type = "string")]
pub onion_address: OnionAddressV3,
/// for backwards compatibility
#[ts(type = "string")]
pub tor_address: Url,
pub ip_info: BTreeMap<String, IpInfo>,
pub acme: Option<AcmeSettings>,
#[ts(as = "BTreeMap::<String, NetworkInterfaceInfo>")]
#[serde(default)]
pub network_interfaces: BTreeMap<InternedString, NetworkInterfaceInfo>,
#[serde(default)]
pub acme: BTreeMap<AcmeProvider, AcmeSettings>,
#[serde(default)]
pub status_info: ServerStatus,
pub wifi: WifiInfo,
@@ -151,43 +178,76 @@ pub struct ServerInfo {
pub devices: Vec<LshwDevice>,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct IpInfo {
#[ts(type = "string | null")]
pub ipv4_range: Option<Ipv4Net>,
pub ipv4: Option<Ipv4Addr>,
#[ts(type = "string | null")]
pub ipv6_range: Option<Ipv6Net>,
pub ipv6: Option<Ipv6Addr>,
pub struct NetworkInterfaceInfo {
pub public: Option<bool>,
pub ip_info: Option<IpInfo>,
}
impl IpInfo {
pub async fn for_interface(iface: &str) -> Result<Self, Error> {
let (ipv4, ipv4_range) = get_iface_ipv4_addr(iface).await?.unzip();
let (ipv6, ipv6_range) = get_iface_ipv6_addr(iface).await?.unzip();
Ok(Self {
ipv4_range,
ipv4,
ipv6_range,
ipv6,
impl NetworkInterfaceInfo {
pub fn public(&self) -> bool {
self.public.unwrap_or_else(|| {
!self.ip_info.as_ref().map_or(true, |ip_info| {
let ip4s = ip_info
.subnets
.iter()
.filter_map(|ipnet| {
if let IpAddr::V4(ip4) = ipnet.addr() {
Some(ip4)
} else {
None
}
})
.collect::<BTreeSet<_>>();
if !ip4s.is_empty() {
return ip4s.iter().all(|ip4| {
ip4.is_loopback()
|| (ip4.is_private() && !ip4.octets().starts_with(&[10, 59])) // reserving 10.59 for public wireguard configurations
|| ip4.is_link_local()
});
}
ip_info.subnets.iter().all(|ipnet| {
if let IpAddr::V6(ip6) = ipnet.addr() {
ipv6_is_local(ip6)
} else {
true
}
})
})
})
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct IpInfo {
pub scope_id: u32,
pub device_type: Option<NetworkInterfaceType>,
#[ts(type = "string[]")]
pub subnets: BTreeSet<IpNet>,
pub wan_ip: Option<Ipv4Addr>,
#[ts(type = "string[]")]
pub ntp_servers: BTreeSet<InternedString>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "kebab-case")]
pub enum NetworkInterfaceType {
Ethernet,
Wireless,
Wireguard,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct AcmeSettings {
#[ts(type = "string")]
pub provider: Url,
/// email addresses for letsencrypt
pub contact: Vec<String>,
#[ts(type = "string[]")]
/// domains to get letsencrypt certs for
pub domains: BTreeSet<InternedString>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]

View File

@@ -10,7 +10,7 @@ use crate::context::{CliContext, DiagnosticContext, RpcContext};
use crate::init::SYSTEM_REBUILD_PATH;
use crate::shutdown::Shutdown;
use crate::util::io::delete_file;
use crate::Error;
use crate::{Error, DATA_DIR};
pub fn diagnostic<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
@@ -71,7 +71,7 @@ pub fn restart(ctx: DiagnosticContext) -> Result<(), Error> {
export_args: ctx
.disk_guid
.clone()
.map(|guid| (guid, ctx.datadir.clone())),
.map(|guid| (guid, Path::new(DATA_DIR).to_owned())),
restart: true,
})
.expect("receiver dropped");

View File

@@ -7,7 +7,6 @@ use models::PackageId;
use tokio::io::AsyncWriteExt;
use tracing::instrument;
use super::filesystem::ecryptfs::EcryptFS;
use super::guard::{GenericMountGuard, TmpMountGuard};
use crate::auth::check_password;
use crate::backup::target::BackupInfo;

View File

@@ -1,7 +1,6 @@
use std::ffi::OsStr;
use std::fmt::{Display, Write};
use std::path::Path;
use std::time::Duration;
use digest::generic_array::GenericArray;
use digest::OutputSizeUser;

View File

@@ -3,10 +3,12 @@ use std::io::Cursor;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use axum::extract::ws::{self};
use color_eyre::eyre::eyre;
use const_format::formatcp;
use futures::{StreamExt, TryStreamExt};
use itertools::Itertools;
use models::ResultExt;
@@ -23,21 +25,24 @@ use crate::context::{CliContext, InitContext};
use crate::db::model::public::ServerStatus;
use crate::db::model::Database;
use crate::disk::mount::util::unmount;
use crate::hostname::Hostname;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::net::net_controller::PreInitNetController;
use crate::net::net_controller::{NetController, NetService};
use crate::net::utils::find_wifi_iface;
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
use crate::prelude::*;
use crate::progress::{
FullProgress, FullProgressTracker, PhaseProgressTrackerHandle, PhasedProgressBar,
};
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::v2::pack::{CONTAINER_DATADIR, CONTAINER_TOOL};
use crate::ssh::SSH_AUTHORIZED_KEYS_FILE;
use crate::ssh::SSH_DIR;
use crate::system::get_mem_info;
use crate::util::io::{create_file, IOHook};
use crate::util::lshw::lshw;
use crate::util::net::WebSocketExt;
use crate::util::{cpupower, Invoke};
use crate::Error;
use crate::{Error, MAIN_DATA, PACKAGE_DATA};
pub const SYSTEM_REBUILD_PATH: &str = "/media/startos/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/media/startos/config/standby";
@@ -195,7 +200,8 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
}
pub struct InitResult {
pub net_ctrl: PreInitNetController,
pub net_ctrl: Arc<NetController>,
pub os_net_service: NetService,
}
pub struct InitPhases {
@@ -213,7 +219,6 @@ pub struct InitPhases {
enable_zram: PhaseProgressTrackerHandle,
update_server_info: PhaseProgressTrackerHandle,
launch_service_network: PhaseProgressTrackerHandle,
run_migrations: PhaseProgressTrackerHandle,
validate_db: PhaseProgressTrackerHandle,
postinit: Option<PhaseProgressTrackerHandle>,
}
@@ -238,7 +243,6 @@ impl InitPhases {
enable_zram: handle.add_phase("Enabling ZRAM".into(), Some(1)),
update_server_info: handle.add_phase("Updating server info".into(), Some(1)),
launch_service_network: handle.add_phase("Launching service intranet".into(), Some(1)),
run_migrations: handle.add_phase("Running migrations".into(), Some(10)),
validate_db: handle.add_phase("Validating database".into(), Some(1)),
postinit: if Path::new("/media/startos/config/postinit.sh").exists() {
Some(handle.add_phase("Running postinit.sh".into(), Some(5)))
@@ -274,6 +278,7 @@ pub async fn run_script<P: AsRef<Path>>(path: P, mut progress: PhaseProgressTrac
#[instrument(skip_all)]
pub async fn init(
webserver: &WebServerAcceptorSetter<UpgradableListener>,
cfg: &ServerConfig,
InitPhases {
preinit,
@@ -290,7 +295,6 @@ pub async fn init(
mut enable_zram,
mut update_server_info,
mut launch_service_network,
run_migrations,
mut validate_db,
postinit,
}: InitPhases,
@@ -317,7 +321,7 @@ pub async fn init(
})?;
tokio::fs::set_permissions(LOCAL_AUTH_COOKIE_PATH, Permissions::from_mode(0o046)).await?;
Command::new("chown")
.arg("root:embassy")
.arg("root:startos")
.arg(LOCAL_AUTH_COOKIE_PATH)
.invoke(crate::ErrorKind::Filesystem)
.await?;
@@ -334,8 +338,10 @@ pub async fn init(
load_ssh_keys.start();
crate::ssh::sync_keys(
&Hostname(peek.as_public().as_server_info().as_hostname().de()?),
&peek.as_private().as_ssh_privkey().de()?,
&peek.as_private().as_ssh_pubkeys().de()?,
SSH_AUTHORIZED_KEYS_FILE,
SSH_DIR,
)
.await?;
load_ssh_keys.complete();
@@ -344,22 +350,25 @@ pub async fn init(
let account = AccountInfo::load(&peek)?;
start_net.start();
let net_ctrl = PreInitNetController::init(
db.clone(),
cfg.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
cfg.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
))),
&account.hostname,
account.tor_key,
)
.await?;
let net_ctrl = Arc::new(
NetController::init(
db.clone(),
cfg.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
cfg.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
))),
&account.hostname,
)
.await?,
);
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?;
start_net.complete();
mount_logs.start();
let log_dir = cfg.datadir().join("main/logs");
let log_dir = Path::new(MAIN_DATA).join("logs");
if tokio::fs::metadata(&log_dir).await.is_err() {
tokio::fs::create_dir_all(&log_dir).await?;
}
@@ -390,8 +399,6 @@ pub async fn init(
mount_logs.complete();
tracing::info!("Mounted Logs");
let mut server_info = peek.as_public().as_server_info().de()?;
load_ca_cert.start();
// write to ca cert store
tokio::fs::write(
@@ -402,58 +409,46 @@ pub async fn init(
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
if tokio::fs::metadata("/home/kiosk/profile").await.is_ok() {
Command::new("certutil")
.arg("-A")
.arg("-n")
.arg("StartOS Local Root CA")
.arg("-t")
.arg("TCu,Cuw,Tuw")
.arg("-i")
.arg("/usr/local/share/ca-certificates/startos-root-ca.crt")
.arg("-d")
.arg("/home/kiosk/fx-profile")
.invoke(ErrorKind::OpenSsl)
.await?;
}
load_ca_cert.complete();
load_wifi.start();
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
&mut server_info.wifi,
)
.await?;
let wifi_interface = find_wifi_iface().await?;
let wifi = db
.mutate(|db| {
let wifi = db.as_public_mut().as_server_info_mut().as_wifi_mut();
wifi.as_interface_mut().ser(&wifi_interface)?;
wifi.de()
})
.await?;
crate::net::wifi::synchronize_network_manager(MAIN_DATA, &wifi).await?;
load_wifi.complete();
tracing::info!("Synchronized WiFi");
init_tmp.start();
let tmp_dir = cfg.datadir().join("package-data/tmp");
let tmp_dir = Path::new(PACKAGE_DATA).join("tmp");
if tokio::fs::metadata(&tmp_dir).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_dir).await?;
}
if tokio::fs::metadata(&tmp_dir).await.is_err() {
tokio::fs::create_dir_all(&tmp_dir).await?;
}
let tmp_var = cfg.datadir().join(format!("package-data/tmp/var"));
let tmp_var = Path::new(PACKAGE_DATA).join("tmp/var");
if tokio::fs::metadata(&tmp_var).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_var).await?;
}
crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?;
let downloading = cfg
.datadir()
.join(format!("package-data/archive/downloading"));
let downloading = Path::new(PACKAGE_DATA).join("archive/downloading");
if tokio::fs::metadata(&downloading).await.is_ok() {
tokio::fs::remove_dir_all(&downloading).await?;
}
let tmp_docker = cfg
.datadir()
.join(format!("package-data/tmp/{CONTAINER_TOOL}"));
let tmp_docker = Path::new(PACKAGE_DATA).join(formatcp!("tmp/{CONTAINER_TOOL}"));
crate::disk::mount::util::bind(&tmp_docker, CONTAINER_DATADIR, false).await?;
init_tmp.complete();
let server_info = db.peek().await.into_public().into_server_info();
set_governor.start();
let governor = if let Some(governor) = &server_info.governor {
let selected_governor = server_info.as_governor().de()?;
let governor = if let Some(governor) = &selected_governor {
if cpupower::get_available_governors()
.await?
.contains(governor)
@@ -474,11 +469,11 @@ pub async fn init(
set_governor.complete();
sync_clock.start();
server_info.ntp_synced = false;
let mut ntp_synced = false;
let mut not_made_progress = 0u32;
for _ in 0..1800 {
if check_time_is_synchronized().await? {
server_info.ntp_synced = true;
ntp_synced = true;
break;
}
let t = SystemTime::now();
@@ -495,7 +490,7 @@ pub async fn init(
break;
}
}
if !server_info.ntp_synced {
if !ntp_synced {
tracing::warn!("Timed out waiting for system time to synchronize");
} else {
tracing::info!("Syncronized system clock");
@@ -503,16 +498,16 @@ pub async fn init(
sync_clock.complete();
enable_zram.start();
if server_info.zram {
crate::system::enable_zram().await?
if server_info.as_zram().de()? {
crate::system::enable_zram().await?;
tracing::info!("Enabled ZRAM");
}
enable_zram.complete();
update_server_info.start();
server_info.ip_info = crate::net::dhcp::init_ips().await?;
server_info.ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
server_info.devices = lshw().await?;
server_info.status_info = ServerStatus {
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let devices = lshw().await?;
let status_info = ServerStatus {
updated: false,
update_progress: None,
backup_progress: None,
@@ -520,10 +515,15 @@ pub async fn init(
restarting: false,
};
db.mutate(|v| {
v.as_public_mut().as_server_info_mut().ser(&server_info)?;
let server_info = v.as_public_mut().as_server_info_mut();
server_info.as_ntp_synced_mut().ser(&ntp_synced)?;
server_info.as_ram_mut().ser(&ram)?;
server_info.as_devices_mut().ser(&devices)?;
server_info.as_status_info_mut().ser(&status_info)?;
Ok(())
})
.await?;
tracing::info!("Updated server info");
update_server_info.complete();
launch_service_network.start();
@@ -532,6 +532,7 @@ pub async fn init(
.arg("lxc-net.service")
.invoke(ErrorKind::Lxc)
.await?;
tracing::info!("Launched service intranet");
launch_service_network.complete();
validate_db.start();
@@ -540,6 +541,7 @@ pub async fn init(
d.ser(&model)
})
.await?;
tracing::info!("Validated database");
validate_db.complete();
if let Some(progress) = postinit {
@@ -548,7 +550,10 @@ pub async fn init(
tracing::info!("System initialized.");
Ok(InitResult { net_ctrl })
Ok(InitResult {
net_ctrl,
os_net_service,
})
}
pub fn init_api<C: Context>() -> ParentHandler<C> {

View File

@@ -2,6 +2,7 @@ use std::ops::Deref;
use std::path::PathBuf;
use std::time::Duration;
use axum::extract::ws;
use clap::builder::ValueParserFactory;
use clap::{value_parser, CommandFactory, FromArgMatches, Parser};
use color_eyre::eyre::eyre;
@@ -12,7 +13,7 @@ use itertools::Itertools;
use models::{FromStrParser, VersionString};
use reqwest::header::{HeaderMap, CONTENT_LENGTH};
use reqwest::Url;
use rpc_toolkit::yajrc::{GenericRpcMethod, RpcError};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::HandlerArgs;
use rustyline_async::ReadlineEvent;
use serde::{Deserialize, Serialize};
@@ -188,7 +189,7 @@ pub async fn sideload(
SideloadParams { session }: SideloadParams,
) -> Result<SideloadResponse, Error> {
let (upload, file) = upload(&ctx, session.clone()).await?;
let (err_send, err_recv) = oneshot::channel::<Error>();
let (err_send, mut err_recv) = oneshot::channel::<Error>();
let progress = Guid::new();
let progress_tracker = FullProgressTracker::new();
let mut progress_listener = progress_tracker.stream(Some(Duration::from_millis(200)));
@@ -198,43 +199,44 @@ pub async fn sideload(
RpcContinuation::ws_authed(
&ctx,
session,
|mut ws| {
use axum::extract::ws::Message;
async move {
if let Err(e) = async {
type RpcResponse = rpc_toolkit::yajrc::RpcResponse<
GenericRpcMethod<&'static str, (), FullProgress>,
>;
|mut ws| async move {
if let Err(e) = async {
loop {
tokio::select! {
res = async {
while let Some(progress) = progress_listener.next().await {
ws.send(Message::Text(
progress = progress_listener.next() => {
if let Some(progress) = progress {
ws.send(ws::Message::Text(
serde_json::to_string(&progress)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
if progress.overall.is_complete() {
return ws.normal_close("complete").await;
}
} else {
return ws.normal_close("complete").await;
}
Ok::<_, Error>(())
} => res?,
err = err_recv => {
}
msg = ws.recv() => {
if msg.transpose().with_kind(ErrorKind::Network)?.is_none() {
return Ok(())
}
}
err = (&mut err_recv) => {
if let Ok(e) = err {
ws.close_result(Err::<&str, _>(e.clone_output())).await?;
return Err(e)
}
}
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error tracking sideload progress: {e}");
tracing::debug!("{e:?}");
}
}
.await
{
tracing::error!("Error tracking sideload progress: {e}");
tracing::debug!("{e:?}");
}
},
Duration::from_secs(600),
),
@@ -258,9 +260,9 @@ pub async fn sideload(
}
.await
{
let _ = err_send.send(e.clone_output());
tracing::error!("Error sideloading package: {e}");
tracing::debug!("{e:?}");
let _ = err_send.send(e);
}
});
Ok(SideloadResponse { upload, progress })

View File

@@ -1,6 +1,11 @@
use const_format::formatcp;
pub const DATA_DIR: &str = "/media/startos/data";
pub const MAIN_DATA: &str = formatcp!("{DATA_DIR}/main");
pub const PACKAGE_DATA: &str = formatcp!("{DATA_DIR}/package-data");
pub const DEFAULT_REGISTRY: &str = "https://registry.start9.com";
// pub const COMMUNITY_MARKETPLACE: &str = "https://community-registry.start9.com";
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1];
pub const HOST_IP: [u8; 4] = [10, 0, 3, 1];
pub use std::env::consts::ARCH;
lazy_static::lazy_static! {
pub static ref PLATFORM: String = {
@@ -82,6 +87,7 @@ use crate::context::{
CliContext, DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext,
};
use crate::disk::fsck::RequiresReboot;
use crate::net::net;
use crate::registry::context::{RegistryContext, RegistryUrlParams};
use crate::util::serde::HandlerExtSerde;
@@ -295,13 +301,20 @@ pub fn server<C: Context>() -> ParentHandler<C> {
.with_about("Set system smtp server and credentials")
.with_call_remote::<CliContext>()
)
.subcommand(
"test-smtp",
from_fn_async(system::test_smtp)
.no_display()
.with_about("Send test email using provided smtp server and credentials")
.with_call_remote::<CliContext>()
)
.subcommand(
"clear-smtp",
from_fn_async(system::clear_system_smtp)
.no_display()
.with_about("Remove system smtp server and credentials")
.with_call_remote::<CliContext>()
)
).subcommand("host", net::host::server_host_api::<C>().with_about("Commands for modifying the host for the system ui"))
}
pub fn package<C: Context>() -> ParentHandler<C> {
@@ -415,7 +428,7 @@ pub fn package<C: Context>() -> ParentHandler<C> {
.subcommand("attach", from_fn_async(service::cli_attach).no_display())
.subcommand(
"host",
net::host::host::<C>().with_about("Manage network hosts for a package"),
net::host::host_api::<C>().with_about("Manage network hosts for a package"),
)
}

View File

@@ -30,6 +30,7 @@ use crate::error::ResultExt;
use crate::lxc::ContainerId;
use crate::prelude::*;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::util::net::WebSocketExt;
use crate::util::serde::Reversible;
use crate::util::Invoke;
@@ -80,34 +81,28 @@ async fn ws_handler(
.with_kind(ErrorKind::Network)?;
}
let mut ws_closed = false;
while let Some(entry) = tokio::select! {
a = logs.try_next() => Some(a?),
a = stream.try_next() => { a.with_kind(crate::ErrorKind::Network)?; ws_closed = true; None }
} {
if let Some(entry) = entry {
let (_, log_entry) = entry.log_entry()?;
stream
.send(ws::Message::Text(
serde_json::to_string(&log_entry).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
loop {
tokio::select! {
entry = logs.try_next() => {
if let Some(entry) = entry? {
let (_, log_entry) = entry.log_entry()?;
stream
.send(ws::Message::Text(
serde_json::to_string(&log_entry).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
} else {
return stream.normal_close("complete").await;
}
},
msg = stream.try_next() => {
if msg.with_kind(crate::ErrorKind::Network)?.is_none() {
return Ok(())
}
}
}
}
if !ws_closed {
stream
.send(ws::Message::Close(Some(ws::CloseFrame {
code: ws::close_code::NORMAL,
reason: "Log Stream Finished".into(),
})))
.await
.with_kind(ErrorKind::Network)?;
drop(stream);
}
Ok(())
}
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]

View File

@@ -8,13 +8,11 @@ use rpc_toolkit::{
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::lxc::{ContainerId, LxcConfig};
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::{
context::{CliContext, RpcContext},
service::ServiceStats,
};
use crate::service::ServiceStats;
pub fn lxc<C: Context>() -> ParentHandler<C> {
ParentHandler::new()

View File

@@ -1,8 +1,8 @@
use std::collections::BTreeSet;
use std::net::Ipv4Addr;
use std::path::Path;
use std::sync::{Arc, Weak};
use std::time::Duration;
use std::{collections::BTreeSet, ffi::OsString};
use clap::builder::ValueParserFactory;
use futures::{AsyncWriteExt, StreamExt};

View File

@@ -1,6 +1,7 @@
use axum::body::Body;
use axum::extract::Request;
use axum::response::Response;
use http::{HeaderMap, HeaderValue};
use http::{HeaderMap, HeaderValue, Method};
use rpc_toolkit::{Empty, Middleware};
#[derive(Clone)]
@@ -52,6 +53,13 @@ impl<Context: Send + Sync + 'static> Middleware<Context> for Cors {
request: &mut Request,
) -> Result<(), Response> {
self.get_cors_headers(request);
if request.method() == Method::OPTIONS {
let mut response = Response::new(Body::empty());
response
.headers_mut()
.extend(std::mem::take(&mut self.headers));
return Err(response);
}
Ok(())
}
async fn process_http_response(&mut self, _: &Context, response: &mut Response) {

View File

@@ -1,6 +1,7 @@
use std::collections::{BTreeMap, BTreeSet};
use std::str::FromStr;
use async_acme::acme::Identifier;
use clap::builder::ValueParserFactory;
use clap::Parser;
use imbl_value::InternedString;
@@ -10,6 +11,7 @@ use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use url::Url;
use crate::context::{CliContext, RpcContext};
@@ -78,10 +80,18 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
async fn read_certificate(
&self,
domains: &[String],
identifiers: &[Identifier],
directory_url: &str,
) -> Result<Option<(String, String)>, Self::Error> {
let domains = JsonKey::new(domains.into_iter().map(InternedString::intern).collect());
let identifiers = JsonKey::new(
identifiers
.into_iter()
.map(|d| match d {
Identifier::Dns(d) => d.into(),
Identifier::Ip(ip) => InternedString::from_display(ip),
})
.collect(),
);
let directory_url = directory_url
.parse::<Url>()
.with_kind(ErrorKind::ParseUrl)?;
@@ -94,7 +104,7 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
.into_acme()
.into_certs()
.into_idx(&directory_url)
.and_then(|a| a.into_idx(&domains))
.and_then(|a| a.into_idx(&identifiers))
else {
return Ok(None);
};
@@ -120,13 +130,21 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
async fn write_certificate(
&self,
domains: &[String],
identifiers: &[Identifier],
directory_url: &str,
key_pem: &str,
certificate_pem: &str,
) -> Result<(), Self::Error> {
tracing::info!("Saving new certificate for {domains:?}");
let domains = JsonKey::new(domains.into_iter().map(InternedString::intern).collect());
tracing::info!("Saving new certificate for {identifiers:?}");
let identifiers = JsonKey::new(
identifiers
.into_iter()
.map(|d| match d {
Identifier::Dns(d) => d.into(),
Identifier::Ip(ip) => InternedString::from_display(ip),
})
.collect(),
);
let directory_url = directory_url
.parse::<Url>()
.with_kind(ErrorKind::ParseUrl)?;
@@ -146,7 +164,7 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
.as_acme_mut()
.as_certs_mut()
.upsert(&directory_url, || Ok(BTreeMap::new()))?
.insert(&domains, &cert)
.insert(&identifiers, &cert)
})
.await?;
@@ -159,18 +177,23 @@ pub fn acme<C: Context>() -> ParentHandler<C> {
.subcommand(
"init",
from_fn_async(init)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_about("Setup ACME certificate acquisition")
.with_call_remote::<CliContext>(),
)
.subcommand(
"domain",
domain::<C>()
.with_about("Add, remove, or view domains for which to acquire ACME certificates"),
"remove",
from_fn_async(remove)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_about("Setup ACME certificate acquisition")
.with_call_remote::<CliContext>(),
)
}
#[derive(Clone, Deserialize, Serialize)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, TS)]
#[ts(type = "string")]
pub struct AcmeProvider(pub Url);
impl FromStr for AcmeProvider {
type Err = <Url as FromStr>::Err;
@@ -180,9 +203,36 @@ impl FromStr for AcmeProvider {
"letsencrypt-staging" => async_acme::acme::LETS_ENCRYPT_STAGING_DIRECTORY.parse(),
s => s.parse(),
}
.map(|mut u: Url| {
let path = u
.path_segments()
.into_iter()
.flatten()
.filter(|p| !p.is_empty())
.map(|p| p.to_owned())
.collect::<Vec<_>>();
if let Ok(mut path_mut) = u.path_segments_mut() {
path_mut.clear();
path_mut.extend(path);
}
u
})
.map(Self)
}
}
impl<'de> Deserialize<'de> for AcmeProvider {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
crate::util::serde::deserialize_from_str(deserializer)
}
}
impl AsRef<str> for AcmeProvider {
fn as_ref(&self) -> &str {
self.0.as_str()
}
}
impl ValueParserFactory for AcmeProvider {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
@@ -200,125 +250,36 @@ pub struct InitAcmeParams {
pub async fn init(
ctx: RpcContext,
InitAcmeParams {
provider: AcmeProvider(provider),
contact,
}: InitAcmeParams,
InitAcmeParams { provider, contact }: InitAcmeParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_acme_mut()
.map_mutate(|acme| {
Ok(Some(AcmeSettings {
provider,
contact,
domains: acme.map(|acme| acme.domains).unwrap_or_default(),
}))
})
.insert(&provider, &AcmeSettings { contact })
})
.await?;
Ok(())
}
pub fn domain<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"add",
from_fn_async(add_domain)
.no_display()
.with_about("Add a domain for which to acquire ACME certificates")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_domain)
.no_display()
.with_about("Remove a domain for which to acquire ACME certificates")
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list_domains)
.with_custom_display_fn(|_, res| {
for domain in res {
println!("{domain}")
}
Ok(())
})
.with_about("List domains for which to acquire ACME certificates")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct DomainParams {
pub domain: InternedString,
pub struct RemoveAcmeParams {
#[arg(long)]
pub provider: AcmeProvider,
}
pub async fn add_domain(
pub async fn remove(
ctx: RpcContext,
DomainParams { domain }: DomainParams,
RemoveAcmeParams { provider }: RemoveAcmeParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_acme_mut()
.transpose_mut()
.ok_or_else(|| {
Error::new(
eyre!("Please call `start-cli net acme init` before adding a domain"),
ErrorKind::InvalidRequest,
)
})?
.as_domains_mut()
.mutate(|domains| {
domains.insert(domain);
Ok(())
})
.remove(&provider)
})
.await?;
Ok(())
}
pub async fn remove_domain(
ctx: RpcContext,
DomainParams { domain }: DomainParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if let Some(acme) = db
.as_public_mut()
.as_server_info_mut()
.as_acme_mut()
.transpose_mut()
{
acme.as_domains_mut().mutate(|domains| {
domains.remove(&domain);
Ok(())
})
} else {
Ok(())
}
})
.await?;
Ok(())
}
pub async fn list_domains(ctx: RpcContext) -> Result<BTreeSet<InternedString>, Error> {
if let Some(acme) = ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_acme()
.transpose()
{
acme.into_domains().de()
} else {
Ok(BTreeSet::new())
}
}

View File

@@ -1,99 +0,0 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::IpAddr;
use clap::Parser;
use futures::TryStreamExt;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::sync::RwLock;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::IpInfo;
use crate::net::utils::{iface_is_physical, list_interfaces};
use crate::prelude::*;
use crate::Error;
lazy_static::lazy_static! {
static ref CACHED_IPS: RwLock<BTreeSet<IpAddr>> = RwLock::new(BTreeSet::new());
}
async fn _ips() -> Result<BTreeSet<IpAddr>, Error> {
Ok(init_ips()
.await?
.values()
.flat_map(|i| {
std::iter::empty()
.chain(i.ipv4.map(IpAddr::from))
.chain(i.ipv6.map(IpAddr::from))
})
.collect())
}
pub async fn ips() -> Result<BTreeSet<IpAddr>, Error> {
let ips = CACHED_IPS.read().await.clone();
if !ips.is_empty() {
return Ok(ips);
}
let ips = _ips().await?;
*CACHED_IPS.write().await = ips.clone();
Ok(ips)
}
pub async fn init_ips() -> Result<BTreeMap<String, IpInfo>, Error> {
let mut res = BTreeMap::new();
let mut ifaces = list_interfaces();
while let Some(iface) = ifaces.try_next().await? {
if iface_is_physical(&iface).await {
let ip_info = IpInfo::for_interface(&iface).await?;
res.insert(iface, ip_info);
}
}
Ok(res)
}
// #[command(subcommands(update))]
pub fn dhcp<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"update",
from_fn_async::<_, _, (), Error, (RpcContext, UpdateParams)>(update)
.no_display()
.with_about("Update IP assigned by dhcp")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct UpdateParams {
interface: String,
}
pub async fn update(
ctx: RpcContext,
UpdateParams { interface }: UpdateParams,
) -> Result<(), Error> {
if iface_is_physical(&interface).await {
let ip_info = IpInfo::for_interface(&interface).await?;
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_ip_info_mut()
.insert(&interface, &ip_info)
})
.await?;
let mut cached = CACHED_IPS.write().await;
if cached.is_empty() {
*cached = _ips().await?;
} else {
cached.extend(
std::iter::empty()
.chain(ip_info.ipv4.map(IpAddr::from))
.chain(ip_info.ipv6.map(IpAddr::from)),
);
}
}
Ok(())
}

View File

@@ -1,6 +1,6 @@
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr};
use std::net::Ipv4Addr;
use std::sync::{Arc, Weak};
use std::time::Duration;
@@ -19,6 +19,7 @@ use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, Respons
use trust_dns_server::ServerFuture;
use crate::net::forward::START9_BRIDGE_IFACE;
use crate::util::sync::Watch;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
@@ -140,38 +141,46 @@ impl RequestHandler for Resolver {
impl DnsController {
#[instrument(skip_all)]
pub async fn init(bind: &[SocketAddr]) -> Result<Self, Error> {
pub async fn init(mut lxcbr_status: Watch<bool>) -> Result<Self, Error> {
let services = Arc::new(RwLock::new(BTreeMap::new()));
let mut server = ServerFuture::new(Resolver {
services: services.clone(),
});
server.register_listener(
TcpListener::bind(bind)
.await
.with_kind(ErrorKind::Network)?,
Duration::from_secs(30),
);
server.register_socket(UdpSocket::bind(bind).await.with_kind(ErrorKind::Network)?);
Command::new("resolvectl")
.arg("dns")
.arg(START9_BRIDGE_IFACE)
.arg("127.0.0.1")
.invoke(ErrorKind::Network)
.await?;
Command::new("resolvectl")
.arg("domain")
.arg(START9_BRIDGE_IFACE)
.arg("embassy")
.invoke(ErrorKind::Network)
.await?;
let dns_server = tokio::spawn(async move {
server.register_listener(
TcpListener::bind((Ipv4Addr::LOCALHOST, 53))
.await
.with_kind(ErrorKind::Network)?,
Duration::from_secs(30),
);
server.register_socket(
UdpSocket::bind((Ipv4Addr::LOCALHOST, 53))
.await
.with_kind(ErrorKind::Network)?,
);
lxcbr_status.wait_for(|a| *a).await;
Command::new("resolvectl")
.arg("dns")
.arg(START9_BRIDGE_IFACE)
.arg("127.0.0.1")
.invoke(ErrorKind::Network)
.await?;
Command::new("resolvectl")
.arg("domain")
.arg(START9_BRIDGE_IFACE)
.arg("embassy")
.invoke(ErrorKind::Network)
.await?;
let dns_server = tokio::spawn(
server
.block_until_done()
.map_err(|e| Error::new(e, ErrorKind::Network)),
)
.await
.map_err(|e| Error::new(e, ErrorKind::Network))
})
.into();
Ok(Self {

View File

@@ -1,13 +1,18 @@
use std::collections::BTreeMap;
use std::collections::{BTreeMap, BTreeSet};
use std::net::SocketAddr;
use std::sync::{Arc, Weak};
use futures::channel::oneshot;
use helpers::NonDetachingJoinHandle;
use id_pool::IdPool;
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use tokio::sync::Mutex;
use tokio::sync::mpsc;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::prelude::*;
use crate::util::sync::Watch;
use crate::util::Invoke;
pub const START9_BRIDGE_IFACE: &str = "lxcbr0";
@@ -34,144 +39,269 @@ impl AvailablePorts {
}
}
#[derive(Debug)]
struct ForwardRequest {
public: bool,
target: SocketAddr,
rc: Weak<()>,
}
#[derive(Debug, Default)]
struct ForwardState {
requested: BTreeMap<u16, ForwardRequest>,
current: BTreeMap<u16, BTreeMap<InternedString, SocketAddr>>,
}
impl ForwardState {
async fn sync(&mut self, interfaces: &BTreeMap<InternedString, bool>) -> Result<(), Error> {
let private_interfaces = interfaces
.iter()
.filter(|(_, public)| !*public)
.map(|(i, _)| i)
.collect::<BTreeSet<_>>();
let all_interfaces = interfaces.keys().collect::<BTreeSet<_>>();
self.requested.retain(|_, req| req.rc.strong_count() > 0);
for external in self
.requested
.keys()
.chain(self.current.keys())
.copied()
.collect::<BTreeSet<_>>()
{
match (
self.requested.get(&external),
self.current.get_mut(&external),
) {
(Some(req), Some(cur)) => {
let expected = if req.public {
&all_interfaces
} else {
&private_interfaces
};
let actual = cur.keys().collect::<BTreeSet<_>>();
let mut to_rm = actual
.difference(expected)
.copied()
.cloned()
.collect::<BTreeSet<_>>();
let mut to_add = expected
.difference(&actual)
.copied()
.cloned()
.collect::<BTreeSet<_>>();
for interface in actual.intersection(expected).copied() {
if cur[interface] != req.target {
to_rm.insert(interface.clone());
to_add.insert(interface.clone());
}
}
for interface in to_rm {
unforward(external, &*interface, cur[&interface]).await?;
cur.remove(&interface);
}
for interface in to_add {
forward(external, &*interface, req.target).await?;
cur.insert(interface, req.target);
}
}
(Some(req), None) => {
let cur = self.current.entry(external).or_default();
for interface in if req.public {
&all_interfaces
} else {
&private_interfaces
}
.into_iter()
.copied()
.cloned()
{
forward(external, &*interface, req.target).await?;
cur.insert(interface, req.target);
}
}
(None, Some(cur)) => {
let to_rm = cur.keys().cloned().collect::<BTreeSet<_>>();
for interface in to_rm {
unforward(external, &*interface, cur[&interface]).await?;
cur.remove(&interface);
}
self.current.remove(&external);
}
_ => (),
}
}
Ok(())
}
}
fn err_has_exited<T>(_: T) -> Error {
Error::new(
eyre!("PortForwardController thread has exited"),
ErrorKind::Unknown,
)
}
pub struct LanPortForwardController {
forwards: Mutex<BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
req: mpsc::UnboundedSender<(
Option<(u16, ForwardRequest)>,
oneshot::Sender<Result<(), Error>>,
)>,
_thread: NonDetachingJoinHandle<()>,
}
impl LanPortForwardController {
pub fn new() -> Self {
pub fn new(mut ip_info: Watch<BTreeMap<InternedString, NetworkInterfaceInfo>>) -> Self {
let (req_send, mut req_recv) = mpsc::unbounded_channel();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let mut state = ForwardState::default();
let mut interfaces = ip_info.peek_and_mark_seen(|ip_info| {
ip_info
.iter()
.map(|(iface, info)| (iface.clone(), info.public()))
.collect()
});
let mut reply: Option<oneshot::Sender<Result<(), Error>>> = None;
loop {
tokio::select! {
msg = req_recv.recv() => {
if let Some((msg, re)) = msg {
if let Some((external, req)) = msg {
state.requested.insert(external, req);
}
reply = Some(re);
} else {
break;
}
}
_ = ip_info.changed() => {
interfaces = ip_info.peek(|ip_info| {
ip_info
.iter()
.map(|(iface, info)| (iface.clone(), info.public()))
.collect()
});
}
}
let res = state.sync(&interfaces).await;
if let Err(e) = &res {
tracing::error!("Error in PortForwardController: {e}");
tracing::debug!("{e:?}");
}
if let Some(re) = reply.take() {
let _ = re.send(res);
}
}
}));
Self {
forwards: Mutex::new(BTreeMap::new()),
req: req_send,
_thread: thread,
}
}
pub async fn add(&self, port: u16, addr: SocketAddr) -> Result<Arc<()>, Error> {
let mut writable = self.forwards.lock().await;
let (prev, mut forward) = if let Some(forward) = writable.remove(&port) {
(
forward.keys().next().cloned(),
forward
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect(),
)
} else {
(None, BTreeMap::new())
};
pub async fn add(&self, port: u16, public: bool, target: SocketAddr) -> Result<Arc<()>, Error> {
let rc = Arc::new(());
forward.insert(addr, Arc::downgrade(&rc));
let next = forward.keys().next().cloned();
if !forward.is_empty() {
writable.insert(port, forward);
}
let (send, recv) = oneshot::channel();
self.req
.send((
Some((
port,
ForwardRequest {
public,
target,
rc: Arc::downgrade(&rc),
},
)),
send,
))
.map_err(err_has_exited)?;
update_forward(port, prev, next).await?;
Ok(rc)
recv.await.map_err(err_has_exited)?.map(|_| rc)
}
pub async fn gc(&self, external: u16) -> Result<(), Error> {
let mut writable = self.forwards.lock().await;
let (prev, forward) = if let Some(forward) = writable.remove(&external) {
(
forward.keys().next().cloned(),
forward
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect(),
)
} else {
(None, BTreeMap::new())
};
let next = forward.keys().next().cloned();
if !forward.is_empty() {
writable.insert(external, forward);
}
pub async fn gc(&self) -> Result<(), Error> {
let (send, recv) = oneshot::channel();
self.req.send((None, send)).map_err(err_has_exited)?;
update_forward(external, prev, next).await
recv.await.map_err(err_has_exited)?
}
}
async fn update_forward(
external: u16,
prev: Option<SocketAddr>,
next: Option<SocketAddr>,
) -> Result<(), Error> {
if prev != next {
if let Some(prev) = prev {
unforward(START9_BRIDGE_IFACE, external, prev).await?;
}
if let Some(next) = next {
forward(START9_BRIDGE_IFACE, external, next).await?;
}
}
Ok(())
}
// iptables -I FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
// iptables -t nat -I PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
async fn forward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> {
Command::new("iptables")
.arg("-I")
.arg("FORWARD")
.arg("-o")
.arg(iface)
.arg("-p")
.arg("tcp")
.arg("-d")
.arg(addr.ip().to_string())
.arg("--dport")
.arg(addr.port().to_string())
.arg("-j")
.arg("ACCEPT")
.invoke(crate::ErrorKind::Network)
.await?;
Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-I")
.arg("PREROUTING")
.arg("-p")
.arg("tcp")
.arg("--dport")
.arg(external.to_string())
.arg("-j")
.arg("DNAT")
.arg("--to")
.arg(addr.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
async fn forward(external: u16, interface: &str, target: SocketAddr) -> Result<(), Error> {
for proto in ["tcp", "udp"] {
Command::new("iptables")
.arg("-I")
.arg("FORWARD")
.arg("-i")
.arg(interface)
.arg("-o")
.arg(START9_BRIDGE_IFACE)
.arg("-p")
.arg(proto)
.arg("-d")
.arg(target.ip().to_string())
.arg("--dport")
.arg(target.port().to_string())
.arg("-j")
.arg("ACCEPT")
.invoke(crate::ErrorKind::Network)
.await?;
Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-I")
.arg("PREROUTING")
.arg("-i")
.arg(interface)
.arg("-p")
.arg(proto)
.arg("--dport")
.arg(external.to_string())
.arg("-j")
.arg("DNAT")
.arg("--to")
.arg(target.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
}
Ok(())
}
// iptables -D FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
// iptables -t nat -D PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
async fn unforward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> {
Command::new("iptables")
.arg("-D")
.arg("FORWARD")
.arg("-o")
.arg(iface)
.arg("-p")
.arg("tcp")
.arg("-d")
.arg(addr.ip().to_string())
.arg("--dport")
.arg(addr.port().to_string())
.arg("-j")
.arg("ACCEPT")
.invoke(crate::ErrorKind::Network)
.await?;
Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-D")
.arg("PREROUTING")
.arg("-p")
.arg("tcp")
.arg("--dport")
.arg(external.to_string())
.arg("-j")
.arg("DNAT")
.arg("--to")
.arg(addr.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
async fn unforward(external: u16, interface: &str, target: SocketAddr) -> Result<(), Error> {
for proto in ["tcp", "udp"] {
Command::new("iptables")
.arg("-D")
.arg("FORWARD")
.arg("-i")
.arg(interface)
.arg("-o")
.arg(START9_BRIDGE_IFACE)
.arg("-p")
.arg(proto)
.arg("-d")
.arg(target.ip().to_string())
.arg("--dport")
.arg(target.port().to_string())
.arg("-j")
.arg("ACCEPT")
.invoke(crate::ErrorKind::Network)
.await?;
Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-D")
.arg("PREROUTING")
.arg("-i")
.arg(interface)
.arg("-p")
.arg(proto)
.arg("--dport")
.arg(external.to_string())
.arg("-j")
.arg("DNAT")
.arg("--to")
.arg(target.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
}
Ok(())
}

View File

@@ -1,17 +1,22 @@
use std::fmt;
use std::str::FromStr;
use std::collections::BTreeSet;
use clap::builder::ValueParserFactory;
use clap::Parser;
use imbl_value::InternedString;
use models::FromStrParser;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::net::acme::AcmeProvider;
use crate::net::host::{all_hosts, HostApiKind};
use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde};
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord, TS)]
#[serde(rename_all = "camelCase")]
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "kebab-case")]
#[serde(rename_all_fields = "camelCase")]
#[serde(tag = "kind")]
#[ts(export)]
pub enum HostAddress {
@@ -22,36 +27,278 @@ pub enum HostAddress {
Domain {
#[ts(type = "string")]
address: InternedString,
public: bool,
acme: Option<AcmeProvider>,
},
}
impl FromStr for HostAddress {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some(addr) = s.strip_suffix(".onion") {
Ok(HostAddress::Onion {
address: addr
.parse::<OnionAddressV3>()
.with_kind(ErrorKind::ParseUrl)?,
})
} else {
Ok(HostAddress::Domain { address: s.into() })
}
}
#[derive(Debug, Deserialize, Serialize, TS)]
pub struct DomainConfig {
pub public: bool,
pub acme: Option<AcmeProvider>,
}
impl fmt::Display for HostAddress {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Onion { address } => write!(f, "{address}"),
Self::Domain { address } => write!(f, "{address}"),
fn check_duplicates(db: &DatabaseModel) -> Result<(), Error> {
let mut onions = BTreeSet::<OnionAddressV3>::new();
let mut domains = BTreeSet::<InternedString>::new();
let mut check_onion = |onion: OnionAddressV3| {
if onions.contains(&onion) {
return Err(Error::new(
eyre!("onion address {onion} is already in use"),
ErrorKind::InvalidRequest,
));
}
onions.insert(onion);
Ok(())
};
let mut check_domain = |domain: InternedString| {
if domains.contains(&domain) {
return Err(Error::new(
eyre!("domain {domain} is already in use"),
ErrorKind::InvalidRequest,
));
}
domains.insert(domain);
Ok(())
};
for host in all_hosts(db) {
let host = host?;
for onion in host.as_onions().de()? {
check_onion(onion)?;
}
for domain in host.as_domains().keys()? {
check_domain(domain)?;
}
}
Ok(())
}
impl ValueParserFactory for HostAddress {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
Self::Parser::new()
}
pub fn address_api<C: Context, Kind: HostApiKind>(
) -> ParentHandler<C, Kind::Params, Kind::InheritedParams> {
ParentHandler::<C, Kind::Params, Kind::InheritedParams>::new()
.subcommand(
"domain",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Add an address to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove an address from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(Kind::inheritance),
)
.subcommand(
"onion",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_onion::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Add an address to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_onion::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove an address from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(Kind::inheritance),
)
.subcommand(
"list",
from_fn_async(list_addresses::<Kind>)
.with_inherited(Kind::inheritance)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
display_serializable(format, res);
return Ok(());
}
let mut table = Table::new();
table.add_row(row![bc => "ADDRESS", "PUBLIC", "ACME PROVIDER"]);
for address in &res {
match address {
HostAddress::Onion { address } => {
table.add_row(row![address, true, "N/A"]);
}
HostAddress::Domain {
address,
public,
acme,
} => {
table.add_row(row![
address,
*public,
acme.as_ref().map(|a| a.0.as_str()).unwrap_or("NONE")
]);
}
}
}
table.print_tty(false)?;
Ok(())
})
.with_about("List addresses for this host")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddDomainParams {
pub domain: InternedString,
#[arg(long)]
pub private: bool,
#[arg(long)]
pub acme: Option<AcmeProvider>,
}
pub async fn add_domain<Kind: HostApiKind>(
ctx: RpcContext,
AddDomainParams {
domain,
private,
acme,
}: AddDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if let Some(acme) = &acme {
if !db.as_public().as_server_info().as_acme().contains_key(&acme)? {
return Err(Error::new(eyre!("unknown acme provider {}, please run acme.init for this provider first", acme.0), ErrorKind::InvalidRequest));
}
}
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.insert(
&domain,
&DomainConfig {
public: !private,
acme,
},
)?;
check_duplicates(db)
})
.await?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct RemoveDomainParams {
pub domain: InternedString,
}
pub async fn remove_domain<Kind: HostApiKind>(
ctx: RpcContext,
RemoveDomainParams { domain }: RemoveDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.remove(&domain)
})
.await?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct OnionParams {
pub onion: String,
}
pub async fn add_onion<Kind: HostApiKind>(
ctx: RpcContext,
OnionParams { onion }: OnionParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
let onion = onion
.strip_suffix(".onion")
.ok_or_else(|| {
Error::new(
eyre!("onion hostname must end in .onion"),
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
ctx.db
.mutate(|db| {
db.as_private().as_key_store().as_onion().get_key(&onion)?;
Kind::host_for(&inheritance, db)?
.as_onions_mut()
.mutate(|a| Ok(a.insert(onion)))?;
check_duplicates(db)
})
.await?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
pub async fn remove_onion<Kind: HostApiKind>(
ctx: RpcContext,
OnionParams { onion }: OnionParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
let onion = onion
.strip_suffix(".onion")
.ok_or_else(|| {
Error::new(
eyre!("onion hostname must end in .onion"),
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_onions_mut()
.mutate(|a| Ok(a.remove(&onion)))
})
.await?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
pub async fn list_addresses<Kind: HostApiKind>(
ctx: RpcContext,
_: Empty,
inheritance: Kind::Inheritance,
) -> Result<Vec<HostAddress>, Error> {
Ok(Kind::host_for(&inheritance, &mut ctx.db.peek().await)?
.de()?
.addresses()
.collect())
}

View File

@@ -1,13 +1,19 @@
use std::collections::BTreeMap;
use std::str::FromStr;
use clap::builder::ValueParserFactory;
use clap::Parser;
use models::{FromStrParser, HostId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::net::forward::AvailablePorts;
use crate::net::host::HostApiKind;
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde};
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, TS)]
#[ts(export)]
@@ -41,12 +47,14 @@ impl FromStr for BindId {
pub struct BindInfo {
pub enabled: bool,
pub options: BindOptions,
pub lan: LanInfo,
pub net: NetInfo,
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct LanInfo {
pub struct NetInfo {
pub public: bool,
pub assigned_port: Option<u16>,
pub assigned_ssl_port: Option<u16>,
}
@@ -63,7 +71,8 @@ impl BindInfo {
Ok(Self {
enabled: true,
options,
lan: LanInfo {
net: NetInfo {
public: false,
assigned_port,
assigned_ssl_port,
},
@@ -74,7 +83,7 @@ impl BindInfo {
available_ports: &mut AvailablePorts,
options: BindOptions,
) -> Result<Self, Error> {
let Self { mut lan, .. } = self;
let Self { net: mut lan, .. } = self;
if options
.secure
.map_or(false, |s| !(s.ssl && options.add_ssl.is_some()))
@@ -104,7 +113,7 @@ impl BindInfo {
Ok(Self {
enabled: true,
options,
lan,
net: lan,
})
}
pub fn disable(&mut self) {
@@ -137,3 +146,99 @@ pub struct AddSslOptions {
// pub add_x_forwarded_headers: bool, // TODO
pub alpn: Option<AlpnInfo>,
}
pub fn binding<C: Context, Kind: HostApiKind>(
) -> ParentHandler<C, Kind::Params, Kind::InheritedParams> {
ParentHandler::<C, Kind::Params, Kind::InheritedParams>::new()
.subcommand(
"list",
from_fn_async(list_bindings::<Kind>)
.with_inherited(Kind::inheritance)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
return Ok(display_serializable(format, res));
}
let mut table = Table::new();
table.add_row(row![bc => "INTERNAL PORT", "ENABLED", "PUBLIC", "EXTERNAL PORT", "EXTERNAL SSL PORT"]);
for (internal, info) in res {
table.add_row(row![
internal,
info.enabled,
info.net.public,
if let Some(port) = info.net.assigned_port {
port.to_string()
} else {
"N/A".to_owned()
},
if let Some(port) = info.net.assigned_ssl_port {
port.to_string()
} else {
"N/A".to_owned()
},
]);
}
table.print_tty(false).unwrap();
Ok(())
})
.with_about("List bindinges for this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"set-public",
from_fn_async(set_public::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(Kind::inheritance)
.no_display()
.with_about("Add an binding to this host")
.with_call_remote::<CliContext>(),
)
}
pub async fn list_bindings<Kind: HostApiKind>(
ctx: RpcContext,
_: Empty,
inheritance: Kind::Inheritance,
) -> Result<BTreeMap<u16, BindInfo>, Error> {
Kind::host_for(&inheritance, &mut ctx.db.peek().await)?
.as_bindings()
.de()
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct BindingSetPublicParams {
internal_port: u16,
#[arg(long)]
public: Option<bool>,
}
pub async fn set_public<Kind: HostApiKind>(
ctx: RpcContext,
BindingSetPublicParams {
internal_port,
public,
}: BindingSetPublicParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_bindings_mut()
.mutate(|b| {
b.get_mut(&internal_port)
.or_not_found(internal_port)?
.net
.public = public.unwrap_or(true);
Ok(())
})
})
.await?;
Kind::sync_host(&ctx, inheritance).await
}

View File

@@ -1,31 +1,37 @@
use std::collections::{BTreeMap, BTreeSet};
use std::future::Future;
use std::panic::RefUnwindSafe;
use clap::Parser;
use imbl_value::InternedString;
use itertools::Itertools;
use models::{HostId, PackageId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, OrEmpty, ParentHandler};
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::context::RpcContext;
use crate::db::model::DatabaseModel;
use crate::net::forward::AvailablePorts;
use crate::net::host::address::HostAddress;
use crate::net::host::binding::{BindInfo, BindOptions};
use crate::net::host::address::{address_api, DomainConfig, HostAddress};
use crate::net::host::binding::{binding, BindInfo, BindOptions};
use crate::net::service_interface::HostnameInfo;
use crate::prelude::*;
pub mod address;
pub mod binding;
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct Host {
pub kind: HostKind,
pub bindings: BTreeMap<u16, BindInfo>,
pub addresses: BTreeSet<HostAddress>,
#[ts(type = "string[]")]
pub onions: BTreeSet<OnionAddressV3>,
#[ts(as = "BTreeMap::<String, DomainConfig>")]
pub domains: BTreeMap<InternedString, DomainConfig>,
/// COMPUTED: NetService::update
pub hostname_info: BTreeMap<u16, Vec<HostnameInfo>>, // internal port -> Hostnames
}
@@ -35,29 +41,28 @@ impl AsRef<Host> for Host {
}
}
impl Host {
pub fn new(kind: HostKind) -> Self {
Self {
kind,
bindings: BTreeMap::new(),
addresses: BTreeSet::new(),
hostname_info: BTreeMap::new(),
}
pub fn new() -> Self {
Self::default()
}
pub fn addresses(&self) -> impl Iterator<Item = &HostAddress> {
// TODO: handle primary
self.addresses.iter()
pub fn addresses<'a>(&'a self) -> impl Iterator<Item = HostAddress> + 'a {
self.onions
.iter()
.cloned()
.map(|address| HostAddress::Onion { address })
.chain(
self.domains
.iter()
.map(
|(address, DomainConfig { public, acme })| HostAddress::Domain {
address: address.clone(),
public: *public,
acme: acme.clone(),
},
),
)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub enum HostKind {
Multi,
// Single,
// Static,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[model = "Model<Self>"]
#[ts(export)]
@@ -76,10 +81,12 @@ impl Map for Hosts {
pub fn host_for<'a>(
db: &'a mut DatabaseModel,
package_id: &PackageId,
package_id: Option<&PackageId>,
host_id: &HostId,
host_kind: HostKind,
) -> Result<&'a mut Model<Host>, Error> {
let Some(package_id) = package_id else {
return Ok(db.as_public_mut().as_server_info_mut().as_host_mut());
};
fn host_info<'a>(
db: &'a mut DatabaseModel,
package_id: &PackageId,
@@ -103,23 +110,31 @@ pub fn host_for<'a>(
None
};
host_info(db, package_id)?.upsert(host_id, || {
let mut h = Host::new(host_kind);
h.addresses.insert(HostAddress::Onion {
address: tor_key
let mut h = Host::new();
h.onions.insert(
tor_key
.or_not_found("generated tor key")?
.public()
.get_onion_address(),
});
);
Ok(h)
})
}
pub fn all_hosts(db: &DatabaseModel) -> impl Iterator<Item = Result<&Model<Host>, Error>> {
[Ok(db.as_public().as_server_info().as_host())]
.into_iter()
.chain(
[db.as_public().as_package_data().as_entries()]
.into_iter()
.flatten_ok()
.map(|entry| entry.and_then(|(_, v)| v.as_hosts().as_entries()))
.flatten_ok()
.map_ok(|(_, v)| v),
)
}
impl Model<Host> {
pub fn set_kind(&mut self, kind: HostKind) -> Result<(), Error> {
match (self.as_kind().de()?, kind) {
(HostKind::Multi, HostKind::Multi) => Ok(()),
}
}
pub fn add_binding(
&mut self,
available_ports: &mut AvailablePorts,
@@ -139,16 +154,78 @@ impl Model<Host> {
}
#[derive(Deserialize, Serialize, Parser)]
pub struct HostParams {
pub struct RequiresPackageId {
package: PackageId,
}
pub fn host<C: Context>() -> ParentHandler<C, HostParams> {
ParentHandler::<C, HostParams>::new()
#[derive(Deserialize, Serialize, Parser)]
pub struct RequiresHostId {
host: HostId,
}
pub trait HostApiKind: 'static {
type Params: Send + Sync + 'static;
type InheritedParams: Send + Sync + 'static;
type Inheritance: RefUnwindSafe + OrEmpty<Self::Inheritance> + Send + Sync + 'static;
fn inheritance(params: Self::Params, inherited: Self::InheritedParams) -> Self::Inheritance;
fn host_for<'a>(
inheritance: &Self::Inheritance,
db: &'a mut DatabaseModel,
) -> Result<&'a mut Model<Host>, Error>;
fn sync_host(
ctx: &RpcContext,
inheritance: Self::Inheritance,
) -> impl Future<Output = Result<(), Error>> + Send;
}
pub struct ForPackage;
impl HostApiKind for ForPackage {
type Params = RequiresHostId;
type InheritedParams = PackageId;
type Inheritance = (PackageId, HostId);
fn inheritance(
RequiresHostId { host }: Self::Params,
package: Self::InheritedParams,
) -> Self::Inheritance {
(package, host)
}
fn host_for<'a>(
(package, host): &Self::Inheritance,
db: &'a mut DatabaseModel,
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, Some(package), host)
}
async fn sync_host(ctx: &RpcContext, (package, host): Self::Inheritance) -> Result<(), Error> {
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.sync_host(host).await?;
Ok(())
}
}
pub struct ForServer;
impl HostApiKind for ForServer {
type Params = Empty;
type InheritedParams = Empty;
type Inheritance = Empty;
fn inheritance(_: Self::Params, _: Self::InheritedParams) -> Self::Inheritance {
Empty {}
}
fn host_for<'a>(
_: &Self::Inheritance,
db: &'a mut DatabaseModel,
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, None, &HostId::default())
}
async fn sync_host(ctx: &RpcContext, _: Self::Inheritance) -> Result<(), Error> {
ctx.os_net_service.sync_host(HostId::default()).await
}
}
pub fn host_api<C: Context>() -> ParentHandler<C, RequiresPackageId> {
ParentHandler::<C, RequiresPackageId>::new()
.subcommand(
"list",
from_fn_async(list_hosts)
.with_inherited(|HostParams { package }, _| package)
.with_inherited(|RequiresPackageId { package }, _| package)
.with_custom_display_fn(|_, ids| {
for id in ids {
println!("{id}")
@@ -159,8 +236,19 @@ pub fn host<C: Context>() -> ParentHandler<C, HostParams> {
)
.subcommand(
"address",
address::<C>().with_inherited(|HostParams { package }, _| package),
address_api::<C, ForPackage>()
.with_inherited(|RequiresPackageId { package }, _| package),
)
.subcommand(
"binding",
binding::<C, ForPackage>().with_inherited(|RequiresPackageId { package }, _| package),
)
}
pub fn server_host_api<C: Context>() -> ParentHandler<C> {
ParentHandler::<C>::new()
.subcommand("address", address_api::<C, ForServer>())
.subcommand("binding", binding::<C, ForServer>())
}
pub async fn list_hosts(
@@ -178,122 +266,3 @@ pub async fn list_hosts(
.into_hosts()
.keys()
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddressApiParams {
host: HostId,
}
pub fn address<C: Context>() -> ParentHandler<C, AddressApiParams, PackageId> {
ParentHandler::<C, AddressApiParams, PackageId>::new()
.subcommand(
"add",
from_fn_async(add_address)
.with_inherited(|AddressApiParams { host }, package| (package, host))
.no_display()
.with_about("Add an address to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_address)
.with_inherited(|AddressApiParams { host }, package| (package, host))
.no_display()
.with_about("Remove an address from this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list_addresses)
.with_inherited(|AddressApiParams { host }, package| (package, host))
.with_custom_display_fn(|_, res| {
for address in res {
println!("{address}")
}
Ok(())
})
.with_about("List addresses for this host")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddressParams {
pub address: HostAddress,
}
pub async fn add_address(
ctx: RpcContext,
AddressParams { address }: AddressParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if let HostAddress::Onion { address } = address {
db.as_private()
.as_key_store()
.as_onion()
.get_key(&address)?;
}
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_addresses_mut()
.mutate(|a| Ok(a.insert(address)))
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
pub async fn remove_address(
ctx: RpcContext,
AddressParams { address }: AddressParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_addresses_mut()
.mutate(|a| Ok(a.remove(&address)))
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
pub async fn list_addresses(
ctx: RpcContext,
_: Empty,
(package, host): (PackageId, HostId),
) -> Result<BTreeSet<HostAddress>, Error> {
ctx.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&package)
.or_not_found(&package)?
.into_hosts()
.into_idx(&host)
.or_not_found(&host)?
.into_addresses()
.de()
}

View File

@@ -21,7 +21,9 @@ impl KeyStore {
local_certs: CertStore::new(account)?,
acme: AcmeCertStore::new(),
};
res.onion.insert(account.tor_key.clone());
for tor_key in account.tor_keys.iter().cloned() {
res.onion.insert(tor_key);
}
Ok(res)
}
}

View File

@@ -1,13 +1,13 @@
use rpc_toolkit::{Context, HandlerExt, ParentHandler};
pub mod acme;
pub mod dhcp;
pub mod dns;
pub mod forward;
pub mod host;
pub mod keys;
pub mod mdns;
pub mod net_controller;
pub mod network_interface;
pub mod service_interface;
pub mod ssl;
pub mod static_server;
@@ -17,20 +17,23 @@ pub mod vhost;
pub mod web_server;
pub mod wifi;
pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
pub fn net<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"tor",
tor::tor::<C>().with_about("Tor commands such as list-services, logs, and reset"),
)
.subcommand(
"dhcp",
dhcp::dhcp::<C>().with_about("Command to update IP assigned from dhcp"),
)
.subcommand(
"acme",
acme::acme::<C>().with_about("Setup automatic clearnet certificate acquisition"),
)
.subcommand(
"network-interface",
network_interface::network_interface_api::<C>()
.with_about("View and edit network interface configurations"),
)
.subcommand(
"vhost",
vhost::vhost_api::<C>().with_about("Manage ssl virtual host proxy"),
)
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,8 @@ use ts_rs::TS;
#[serde(tag = "kind")]
pub enum HostnameInfo {
Ip {
network_interface_id: String,
#[ts(type = "string")]
network_interface_id: InternedString,
public: bool,
hostname: IpHostname,
},
@@ -43,6 +44,8 @@ pub enum IpHostname {
},
Ipv6 {
value: Ipv6Addr,
#[serde(default)]
scope_id: u32,
port: Option<u16>,
ssl_port: Option<u16>,
},
@@ -69,7 +72,6 @@ pub struct ServiceInterface {
pub id: ServiceInterfaceId,
pub name: String,
pub description: String,
pub has_primary: bool,
pub masked: bool,
pub address_info: AddressInfo,
#[serde(rename = "type")]

View File

@@ -17,7 +17,6 @@ use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use tokio::time::Instant;
use tracing::instrument;
use crate::account::AccountInfo;

View File

@@ -8,15 +8,15 @@ use std::time::UNIX_EPOCH;
use async_compression::tokio::bufread::GzipEncoder;
use axum::body::Body;
use axum::extract::{self as x, Request};
use axum::response::Response;
use axum::routing::{any, get, post};
use axum::response::{Redirect, Response};
use axum::routing::{any, get};
use axum::Router;
use base64::display::Base64Display;
use digest::Digest;
use futures::future::ready;
use http::header::{
ACCEPT_ENCODING, ACCEPT_RANGES, CACHE_CONTROL, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH,
CONTENT_RANGE, CONTENT_TYPE, ETAG, RANGE,
CONTENT_RANGE, CONTENT_TYPE, ETAG, HOST, RANGE,
};
use http::request::Parts as RequestParts;
use http::{HeaderValue, Method, StatusCode};
@@ -26,7 +26,6 @@ use new_mime_guess::MimeGuess;
use openssl::hash::MessageDigest;
use openssl::x509::X509;
use rpc_toolkit::{Context, HttpServer, Server};
use sqlx::query;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader};
use tokio_util::io::ReaderStream;
use url::Url;
@@ -47,7 +46,7 @@ use crate::s9pk::S9pk;
use crate::util::io::open_file;
use crate::util::net::SyncBody;
use crate::util::serde::BASE64;
use crate::{diagnostic_api, init_api, install_api, main_api, setup_api};
use crate::{diagnostic_api, init_api, install_api, main_api, setup_api, DATA_DIR};
const NOT_FOUND: &[u8] = b"Not Found";
const METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
@@ -230,6 +229,20 @@ pub fn refresher() -> Router {
}))
}
pub fn redirecter() -> Router {
Router::new().fallback(get(|request: Request| async move {
Redirect::temporary(&format!(
"https://{}{}",
request
.headers()
.get(HOST)
.and_then(|s| s.to_str().ok())
.unwrap_or("localhost"),
request.uri()
))
}))
}
async fn proxy_request(ctx: RpcContext, request: Request, url: String) -> Result<Response, Error> {
if_authorized(&ctx, request, |mut request| async {
for header in PROXY_STRIP_HEADERS {
@@ -253,7 +266,7 @@ fn s9pk_router(ctx: RpcContext) -> Router {
let (parts, _) = request.into_parts();
match FileData::from_path(
&parts,
&ctx.datadir
&Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(s9pk),
@@ -279,7 +292,7 @@ fn s9pk_router(ctx: RpcContext) -> Router {
let s9pk = S9pk::deserialize(
&MultiCursorFile::from(
open_file(
ctx.datadir
Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(s9pk),

View File

@@ -1,16 +1,32 @@
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
use std::collections::BTreeMap;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6};
use std::path::Path;
use async_stream::try_stream;
use color_eyre::eyre::eyre;
use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt};
use ipnet::{Ipv4Net, Ipv6Net};
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use ipnet::{IpNet, Ipv4Net, Ipv6Net};
use nix::net::if_::if_nametoindex;
use tokio::net::{TcpListener, TcpStream};
use tokio::process::Command;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::network_interface::NetworkInterfaceListener;
use crate::net::web_server::Accept;
use crate::prelude::*;
use crate::util::sync::Watch;
use crate::util::Invoke;
use crate::Error;
pub fn ipv6_is_link_local(addr: Ipv6Addr) -> bool {
(addr.segments()[0] & 0xffc0) == 0xfe80
}
pub fn ipv6_is_local(addr: Ipv6Addr) -> bool {
addr.is_loopback() || (addr.segments()[0] & 0xfe00) == 0xfc00 || ipv6_is_link_local(addr)
}
fn parse_iface_ip(output: &str) -> Result<Vec<&str>, Error> {
let output = output.trim();
@@ -112,6 +128,55 @@ pub async fn find_eth_iface() -> Result<String, Error> {
))
}
pub async fn all_socket_addrs_for(port: u16) -> Result<Vec<(InternedString, SocketAddr)>, Error> {
let mut res = Vec::new();
let raw = String::from_utf8(
Command::new("ip")
.arg("-o")
.arg("addr")
.arg("show")
.invoke(ErrorKind::ParseSysInfo)
.await?,
)?;
let err = |item: &str, lineno: usize, line: &str| {
Error::new(
eyre!("failed to parse ip info ({item}[line:{lineno}]) from {line:?}"),
ErrorKind::ParseSysInfo,
)
};
for (idx, line) in raw
.lines()
.map(|l| l.trim())
.enumerate()
.filter(|(_, l)| !l.is_empty())
{
let mut split = line.split_whitespace();
let _num = split.next();
let ifname = split.next().ok_or_else(|| err("ifname", idx, line))?;
let _kind = split.next();
let ipnet_str = split.next().ok_or_else(|| err("ipnet", idx, line))?;
let ipnet = ipnet_str
.parse::<IpNet>()
.with_ctx(|_| (ErrorKind::ParseSysInfo, err("ipnet", idx, ipnet_str)))?;
match ipnet.addr() {
IpAddr::V4(ip4) => res.push((ifname.into(), SocketAddr::new(ip4.into(), port))),
IpAddr::V6(ip6) => res.push((
ifname.into(),
SocketAddr::V6(SocketAddrV6::new(
ip6,
port,
0,
if_nametoindex(ifname)
.with_ctx(|_| (ErrorKind::ParseSysInfo, "reading scope_id"))?,
)),
)),
}
}
Ok(res)
}
pub struct TcpListeners {
listeners: Vec<TcpListener>,
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +1,278 @@
use std::convert::Infallible;
use std::future::Future;
use std::net::SocketAddr;
use std::ops::Deref;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::task::Poll;
use std::time::Duration;
use axum::extract::Request;
use axum::Router;
use axum_server::Handle;
use bytes::Bytes;
use futures::future::{ready, BoxFuture};
use futures::future::Either;
use futures::FutureExt;
use helpers::NonDetachingJoinHandle;
use tokio::sync::{oneshot, watch};
use hyper_util::rt::{TokioIo, TokioTimer};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::oneshot;
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::net::network_interface::{
NetworkInterfaceListener, SelfContainedNetworkInterfaceListener,
};
use crate::net::static_server::{
diagnostic_ui_router, init_ui_router, install_ui_router, main_ui_router, refresher,
diagnostic_ui_router, init_ui_router, install_ui_router, main_ui_router, redirecter, refresher,
setup_ui_router,
};
use crate::prelude::*;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::sync::Watch;
#[derive(Clone)]
pub struct SwappableRouter(watch::Sender<Router>);
impl SwappableRouter {
pub fn new(router: Router) -> Self {
Self(watch::channel(router).0)
}
pub fn swap(&self, router: Router) {
let _ = self.0.send_replace(router);
}
pub struct Accepted {
pub https_redirect: bool,
pub stream: TcpStream,
}
pub struct SwappableRouterService {
router: watch::Receiver<Router>,
changed: Option<BoxFuture<'static, ()>>,
pub trait Accept {
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>>;
}
impl SwappableRouterService {
fn router(&self) -> Router {
self.router.borrow().clone()
}
fn changed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> {
let mut changed = if let Some(changed) = self.changed.take() {
changed
} else {
let mut router = self.router.clone();
async move {
router.changed().await;
impl Accept for Vec<TcpListener> {
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
for listener in &*self {
if let Poll::Ready((stream, _)) = listener.poll_accept(cx)? {
return Poll::Ready(Ok(Accepted {
https_redirect: false,
stream,
}));
}
.boxed()
};
if changed.poll_unpin(cx).is_ready() {
return Poll::Ready(());
}
self.changed = Some(changed);
Poll::Pending
}
}
impl Clone for SwappableRouterService {
fn clone(&self) -> Self {
impl Accept for NetworkInterfaceListener {
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
NetworkInterfaceListener::poll_accept(self, cx, true).map(|res| {
res.map(|a| Accepted {
https_redirect: a.is_public,
stream: a.stream,
})
})
}
}
impl<A: Accept, B: Accept> Accept for Either<A, B> {
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
match self {
Either::Left(a) => a.poll_accept(cx),
Either::Right(b) => b.poll_accept(cx),
}
}
}
impl<A: Accept> Accept for Option<A> {
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
match self {
None => Poll::Pending,
Some(a) => a.poll_accept(cx),
}
}
}
#[pin_project::pin_project]
pub struct Acceptor<A: Accept> {
acceptor: Watch<A>,
}
impl<A: Accept + Send + Sync + 'static> Acceptor<A> {
pub fn new(acceptor: A) -> Self {
Self {
router: self.router.clone(),
changed: None,
acceptor: Watch::new(acceptor),
}
}
fn poll_changed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> {
self.acceptor.poll_changed(cx)
}
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
let _ = self.poll_changed(cx);
self.acceptor.peek_mut(|a| a.poll_accept(cx))
}
async fn accept(&mut self) -> Result<Accepted, Error> {
std::future::poll_fn(|cx| self.poll_accept(cx)).await
}
}
impl<B> tower_service::Service<Request<B>> for SwappableRouterService
where
B: axum::body::HttpBody<Data = Bytes> + Send + 'static,
B::Error: Into<axum::BoxError>,
{
type Response = <Router as tower_service::Service<Request<B>>>::Response;
type Error = <Router as tower_service::Service<Request<B>>>::Error;
type Future = <Router as tower_service::Service<Request<B>>>::Future;
#[inline]
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.changed(cx).is_ready() {
return Poll::Ready(Ok(()));
}
tower_service::Service::<Request<B>>::poll_ready(&mut self.router(), cx)
}
fn call(&mut self, req: Request<B>) -> Self::Future {
self.router().call(req)
impl Acceptor<Vec<TcpListener>> {
pub async fn bind(listen: impl IntoIterator<Item = SocketAddr>) -> Result<Self, Error> {
Ok(Self::new(
futures::future::try_join_all(listen.into_iter().map(TcpListener::bind)).await?,
))
}
}
impl<T> tower_service::Service<T> for SwappableRouter {
type Response = SwappableRouterService;
type Error = Infallible;
type Future = futures::future::Ready<Result<Self::Response, Self::Error>>;
#[inline]
fn poll_ready(
&mut self,
_: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _: T) -> Self::Future {
ready(Ok(SwappableRouterService {
router: self.0.subscribe(),
changed: None,
}))
pub type UpgradableListener =
Option<Either<SelfContainedNetworkInterfaceListener, NetworkInterfaceListener>>;
impl Acceptor<UpgradableListener> {
pub fn bind_upgradable(listener: SelfContainedNetworkInterfaceListener) -> Self {
Self::new(Some(Either::Left(listener)))
}
}
pub struct WebServer {
pub struct WebServerAcceptorSetter<A: Accept> {
acceptor: Watch<A>,
}
impl<A: Accept, B: Accept> WebServerAcceptorSetter<Option<Either<A, B>>> {
pub fn try_upgrade<F: FnOnce(A) -> Result<B, Error>>(&self, f: F) -> Result<(), Error> {
let mut res = Ok(());
self.acceptor.send_modify(|a| {
*a = match a.take() {
Some(Either::Left(a)) => match f(a) {
Ok(b) => Some(Either::Right(b)),
Err(e) => {
res = Err(e);
None
}
},
x => x,
}
});
res
}
}
impl<A: Accept> Deref for WebServerAcceptorSetter<A> {
type Target = Watch<A>;
fn deref(&self) -> &Self::Target {
&self.acceptor
}
}
pub struct WebServer<A: Accept> {
shutdown: oneshot::Sender<()>,
router: SwappableRouter,
router: Watch<Option<Router>>,
acceptor: Watch<A>,
thread: NonDetachingJoinHandle<()>,
}
impl WebServer {
pub fn new(bind: SocketAddr) -> Self {
let router = SwappableRouter::new(refresher());
let thread_router = router.clone();
impl<A: Accept + Send + Sync + 'static> WebServer<A> {
pub fn acceptor_setter(&self) -> WebServerAcceptorSetter<A> {
WebServerAcceptorSetter {
acceptor: self.acceptor.clone(),
}
}
pub fn new(mut acceptor: Acceptor<A>) -> Self {
let acceptor_send = acceptor.acceptor.clone();
let router = Watch::<Option<Router>>::new(None);
let service = router.clone_unseen();
let (shutdown, shutdown_recv) = oneshot::channel();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let handle = Handle::new();
let mut server = axum_server::bind(bind).handle(handle.clone());
server.http_builder().http1().preserve_header_case(true);
server.http_builder().http1().title_case_headers(true);
#[derive(Clone)]
struct QueueRunner {
queue: Arc<RwLock<Option<BackgroundJobQueue>>>,
}
impl<Fut> hyper::rt::Executor<Fut> for QueueRunner
where
Fut: Future + Send + 'static,
{
fn execute(&self, fut: Fut) {
if let Some(q) = &*self.queue.read().unwrap() {
q.add_job(fut);
} else {
tracing::warn!("job queued after shutdown");
}
}
}
if let (Err(e), _) = tokio::join!(server.serve(thread_router), async {
let _ = shutdown_recv.await;
handle.graceful_shutdown(Some(Duration::from_secs(0)));
}) {
tracing::error!("Spawning hyper server error: {}", e);
struct SwappableRouter(Watch<Option<Router>>, bool);
impl hyper::service::Service<hyper::Request<hyper::body::Incoming>> for SwappableRouter {
type Response = <Router as tower_service::Service<
hyper::Request<hyper::body::Incoming>,
>>::Response;
type Error = <Router as tower_service::Service<
hyper::Request<hyper::body::Incoming>,
>>::Error;
type Future = <Router as tower_service::Service<
hyper::Request<hyper::body::Incoming>,
>>::Future;
fn call(&self, req: hyper::Request<hyper::body::Incoming>) -> Self::Future {
use tower_service::Service;
if self.1 {
redirecter().call(req)
} else {
let router = self.0.read();
if let Some(mut router) = router {
router.call(req)
} else {
refresher().call(req)
}
}
}
}
let accept = AtomicBool::new(true);
let queue_cell = Arc::new(RwLock::new(None));
let graceful = hyper_util::server::graceful::GracefulShutdown::new();
let mut server = hyper_util::server::conn::auto::Builder::new(QueueRunner {
queue: queue_cell.clone(),
});
server
.http1()
.timer(TokioTimer::new())
.title_case_headers(true)
.preserve_header_case(true)
.http2()
.timer(TokioTimer::new())
.enable_connect_protocol()
.keep_alive_interval(Duration::from_secs(60))
.keep_alive_timeout(Duration::from_secs(300));
let (queue, mut runner) = BackgroundJobQueue::new();
*queue_cell.write().unwrap() = Some(queue.clone());
let handler = async {
loop {
if let Err(e) = async {
let accepted = acceptor.accept().await?;
queue.add_job(
graceful.watch(
server
.serve_connection_with_upgrades(
TokioIo::new(accepted.stream),
SwappableRouter(service.clone(), accepted.https_redirect),
)
.into_owned(),
),
);
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error accepting HTTP connection: {e}");
tracing::debug!("{e:?}");
}
}
}
.boxed();
tokio::select! {
_ = shutdown_recv => (),
_ = handler => (),
_ = &mut runner => (),
}
accept.store(false, std::sync::atomic::Ordering::SeqCst);
drop(queue);
drop(queue_cell.write().unwrap().take());
if !runner.is_empty() {
runner.await;
}
}));
Self {
shutdown,
router,
thread,
acceptor: acceptor_send,
}
}
@@ -138,7 +282,7 @@ impl WebServer {
}
pub fn serve_router(&mut self, router: Router) {
self.router.swap(router)
self.router.send(Some(router))
}
pub fn serve_main(&mut self, ctx: RpcContext) {

View File

@@ -298,7 +298,7 @@ fn display_wifi_info(params: WithIoFormat<Empty>, info: WifiListInfo) {
let mut table_global = Table::new();
table_global.add_row(row![bc =>
"CONNECTED",
"SIGNAL_STRENGTH",
"SIGNAL STRENGTH",
"COUNTRY",
"ETHERNET",
]);
@@ -306,12 +306,12 @@ fn display_wifi_info(params: WithIoFormat<Empty>, info: WifiListInfo) {
&info
.connected
.as_ref()
.map_or("[N/A]".to_owned(), |c| c.0.clone()),
.map_or("N/A".to_owned(), |c| c.0.clone()),
&info
.connected
.as_ref()
.and_then(|x| info.ssids.get(x))
.map_or("[N/A]".to_owned(), |ss| format!("{}", ss.0)),
.map_or("N/A".to_owned(), |ss| format!("{}", ss.0)),
info.country.as_ref().map(|c| c.alpha2()).unwrap_or("00"),
&format!("{}", info.ethernet)
]);
@@ -897,32 +897,28 @@ impl TypedValueParser for CountryCodeParser {
}
#[instrument(skip_all)]
pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
pub async fn synchronize_network_manager<P: AsRef<Path>>(
main_datadir: P,
wifi: &mut WifiInfo,
wifi: &WifiInfo,
) -> Result<(), Error> {
wifi.interface = find_wifi_iface().await?;
let Some(wifi_iface) = &wifi.interface else {
return Ok(());
};
let persistent = main_datadir.as_ref().join("system-connections");
tracing::debug!("persistent: {:?}", persistent);
// let supplicant = Path::new("/etc/wpa_supplicant.conf");
if tokio::fs::metadata(&persistent).await.is_err() {
tokio::fs::create_dir_all(&persistent).await?;
}
crate::disk::mount::util::bind(&persistent, "/etc/NetworkManager/system-connections", false)
.await?;
// if tokio::fs::metadata(&supplicant).await.is_err() {
// tokio::fs::write(&supplicant, include_str!("wpa_supplicant.conf.base")).await?;
// }
Command::new("systemctl")
.arg("restart")
.arg("NetworkManager")
.invoke(ErrorKind::Wifi)
.await?;
let Some(wifi_iface) = &wifi.interface else {
return Ok(());
};
Command::new("ifconfig")
.arg(wifi_iface)
.arg("up")

View File

@@ -13,11 +13,11 @@ use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
use crate::backup::BackupReport;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::db::model::{Database, DatabaseModel};
use crate::prelude::*;
use crate::util::serde::HandlerExtSerde;
use crate::{backup::BackupReport, db::model::Database};
// #[command(subcommands(list, delete, delete_before, create))]
pub fn notification<C: Context>() -> ParentHandler<C> {

View File

@@ -50,7 +50,7 @@ pub async fn partition(disk: &DiskInfo, overwrite: bool) -> Result<OsPartitionIn
if part_info.guid.is_some() {
if entry.first_lba < if use_efi { 33759266 } else { 33570850 } {
return Err(Error::new(
eyre!("Not enough space before embassy data"),
eyre!("Not enough space before StartOS data"),
crate::ErrorKind::InvalidRequest,
));
}

View File

@@ -6,3 +6,20 @@ pub use tracing::instrument;
pub use crate::db::prelude::*;
pub use crate::ensure_code;
pub use crate::error::{Error, ErrorCollection, ErrorKind, ResultExt};
#[macro_export]
macro_rules! dbg {
() => {{
tracing::debug!("[{}:{}:{}]", file!(), line!(), column!());
}};
($e:expr) => {{
let e = $e;
tracing::debug!("[{}:{}:{}] {} = {e:?}", file!(), line!(), column!(), stringify!($e));
e
}};
($($e:expr),+) => {
($(
crate::dbg!($e)
),+)
}
}

View File

@@ -19,7 +19,6 @@ use crate::context::config::{ContextConfig, CONFIG_PATH};
use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::registry::auth::{SignatureHeader, AUTH_SIG_HEADER};
use crate::registry::device_info::{DeviceInfo, DEVICE_INFO_HEADER};
use crate::registry::signer::sign::AnySigningKey;
use crate::registry::RegistryDatabase;
use crate::rpc_continuations::RpcContinuations;

View File

@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
use axum::Router;
use futures::future::ready;
use imbl_value::InternedString;
use models::DataUrl;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler, Server};
use serde::{Deserialize, Serialize};
@@ -11,13 +10,13 @@ use ts_rs::TS;
use crate::context::CliContext;
use crate::middleware::cors::Cors;
use crate::net::static_server::{bad_request, not_found, server_error};
use crate::net::web_server::WebServer;
use crate::net::web_server::{Accept, WebServer};
use crate::prelude::*;
use crate::registry::auth::Auth;
use crate::registry::context::RegistryContext;
use crate::registry::device_info::DeviceInfoMiddleware;
use crate::registry::os::index::OsIndex;
use crate::registry::package::index::{Category, PackageIndex};
use crate::registry::package::index::PackageIndex;
use crate::registry::signer::SignerInfo;
use crate::rpc_continuations::Guid;
use crate::util::serde::HandlerExtSerde;
@@ -144,7 +143,7 @@ pub fn registry_router(ctx: RegistryContext) -> Router {
)
}
impl WebServer {
impl<A: Accept + Send + Sync + 'static> WebServer<A> {
pub fn serve_registry(&mut self, ctx: RegistryContext) {
self.serve_router(registry_router(ctx))
}

View File

@@ -72,7 +72,6 @@ pub struct PackageVersionInfo {
pub icon: DataUrl<'static>,
pub description: Description,
pub release_notes: String,
#[ts(type = "string")]
pub git_hash: GitHash,
#[ts(type = "string")]
pub license: InternedString,

View File

@@ -24,10 +24,10 @@ impl MerkleArchiveCommitment {
pub fn from_query(query: &str) -> Result<Option<Self>, Error> {
let mut root_sighash = None;
let mut root_maxsize = None;
for (k, v) in form_urlencoded::parse(dbg!(query).as_bytes()) {
for (k, v) in form_urlencoded::parse(query.as_bytes()) {
match &*k {
"rootSighash" => {
root_sighash = Some(dbg!(v).parse()?);
root_sighash = Some(v.parse()?);
}
"rootMaxsize" => {
root_maxsize = Some(v.parse()?);

View File

@@ -1,11 +1,13 @@
use std::path::Path;
use tokio::process::Command;
use ts_rs::TS;
use crate::prelude::*;
use crate::util::Invoke;
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, TS)]
#[ts(type = "string")]
pub struct GitHash(String);
impl GitHash {
@@ -31,6 +33,31 @@ impl GitHash {
}
Ok(GitHash(hash))
}
pub fn load_sync() -> Option<GitHash> {
let mut hash = String::from_utf8(
std::process::Command::new("git")
.arg("rev-parse")
.arg("HEAD")
.output()
.ok()?
.stdout,
)
.ok()?;
if !std::process::Command::new("git")
.arg("diff-index")
.arg("--quiet")
.arg("HEAD")
.arg("--")
.output()
.ok()?
.status
.success()
{
hash += "-modified";
}
Some(GitHash(hash))
}
}
impl AsRef<str> for GitHash {

View File

@@ -3,7 +3,6 @@ use std::path::Path;
use color_eyre::eyre::eyre;
use exver::{Version, VersionRange};
use helpers::const_true;
use imbl_value::InternedString;
pub use models::PackageId;
use models::{mime, ImageId, VolumeId};
@@ -62,8 +61,8 @@ pub struct Manifest {
pub dependencies: Dependencies,
#[serde(default)]
pub hardware_requirements: HardwareRequirements,
#[serde(default)]
#[ts(type = "string | null")]
#[ts(optional)]
#[serde(default = "GitHash::load_sync")]
pub git_hash: Option<GitHash>,
#[serde(default = "current_version")]
#[ts(type = "string")]

View File

@@ -294,7 +294,7 @@ impl CallbackHandler {
}
}
pub async fn call(mut self, args: Vector<Value>) -> Result<(), Error> {
dbg!(eyre!("callback fired: {}", self.handle.is_active()));
crate::dbg!(eyre!("callback fired: {}", self.handle.is_active()));
if let Some(seed) = self.seed.upgrade() {
seed.persistent_container
.callback(self.handle.take(), args)

View File

@@ -17,11 +17,11 @@ use crate::db::model::package::{
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::{FileSystem, MountType};
use crate::rpc_continuations::Guid;
use crate::service::effects::prelude::*;
use crate::status::health_check::NamedHealthCheckResult;
use crate::util::Invoke;
use crate::volume::data_dir;
use crate::DATA_DIR;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
@@ -55,7 +55,7 @@ pub async fn mount(
let context = context.deref()?;
let subpath = subpath.unwrap_or_default();
let subpath = subpath.strip_prefix("/").unwrap_or(&subpath);
let source = data_dir(&context.seed.ctx.datadir, &package_id, &volume_id).join(subpath);
let source = data_dir(DATA_DIR, &package_id, &volume_id).join(subpath);
if tokio::fs::metadata(&source).await.is_err() {
tokio::fs::create_dir_all(&source).await?;
}

View File

@@ -130,10 +130,6 @@ pub fn handler<C: Context>() -> ParentHandler<C> {
"get-host-info",
from_fn_async(net::host::get_host_info).no_cli(),
)
.subcommand(
"get-primary-url",
from_fn_async(net::host::get_primary_url).no_cli(),
)
.subcommand(
"get-container-ip",
from_fn_async(net::info::get_container_ip).no_cli(),

View File

@@ -1,14 +1,12 @@
use models::{HostId, PackageId};
use crate::net::host::binding::{BindId, BindOptions, LanInfo};
use crate::net::host::HostKind;
use crate::net::host::binding::{BindId, BindOptions, NetInfo};
use crate::service::effects::prelude::*;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct BindParams {
kind: HostKind,
id: HostId,
internal_port: u16,
#[serde(flatten)]
@@ -17,15 +15,18 @@ pub struct BindParams {
pub async fn bind(
context: EffectContext,
BindParams {
kind,
id,
internal_port,
options,
}: BindParams,
) -> Result<(), Error> {
let context = context.deref()?;
let mut svc = context.seed.persistent_container.net_service.lock().await;
svc.bind(kind, id, internal_port, options).await
context
.seed
.persistent_container
.net_service
.bind(id, internal_port, options)
.await
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
@@ -41,8 +42,12 @@ pub async fn clear_bindings(
ClearBindingsParams { except }: ClearBindingsParams,
) -> Result<(), Error> {
let context = context.deref()?;
let mut svc = context.seed.persistent_container.net_service.lock().await;
svc.clear_bindings(except.into_iter().collect()).await?;
context
.seed
.persistent_container
.net_service
.clear_bindings(except.into_iter().collect())
.await?;
Ok(())
}
@@ -53,15 +58,36 @@ pub struct GetServicePortForwardParams {
#[ts(optional)]
package_id: Option<PackageId>,
host_id: HostId,
internal_port: u32,
internal_port: u16,
}
pub async fn get_service_port_forward(
context: EffectContext,
data: GetServicePortForwardParams,
) -> Result<LanInfo, Error> {
let internal_port = data.internal_port as u16;
GetServicePortForwardParams {
package_id,
host_id,
internal_port,
}: GetServicePortForwardParams,
) -> Result<NetInfo, Error> {
let context = context.deref()?;
let net_service = context.seed.persistent_container.net_service.lock().await;
net_service.get_lan_port(data.host_id, internal_port)
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
Ok(context
.seed
.ctx
.db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
.as_hosts()
.as_idx(&host_id)
.or_not_found(&host_id)?
.as_bindings()
.de()?
.get(&internal_port)
.or_not_found(lazy_format!("binding for port {internal_port}"))?
.net)
}

View File

@@ -1,35 +1,10 @@
use models::{HostId, PackageId};
use crate::net::host::address::HostAddress;
use crate::net::host::Host;
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct GetPrimaryUrlParams {
#[ts(optional)]
package_id: Option<PackageId>,
host_id: HostId,
#[ts(optional)]
callback: Option<CallbackId>,
}
pub async fn get_primary_url(
context: EffectContext,
GetPrimaryUrlParams {
package_id,
host_id,
callback,
}: GetPrimaryUrlParams,
) -> Result<Option<HostAddress>, Error> {
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
Ok(None) // TODO
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]

View File

@@ -4,6 +4,5 @@ use crate::service::effects::prelude::*;
pub async fn get_container_ip(context: EffectContext) -> Result<Ipv4Addr, Error> {
let context = context.deref()?;
let net_service = context.seed.persistent_container.net_service.lock().await;
Ok(net_service.get_ip())
Ok(context.seed.persistent_container.net_service.get_ip().await)
}

View File

@@ -15,7 +15,6 @@ pub struct ExportServiceInterfaceParams {
id: ServiceInterfaceId,
name: String,
description: String,
has_primary: bool,
masked: bool,
address_info: AddressInfo,
r#type: ServiceInterfaceType,
@@ -26,7 +25,6 @@ pub async fn export_service_interface(
id,
name,
description,
has_primary,
masked,
address_info,
r#type,
@@ -39,7 +37,6 @@ pub async fn export_service_interface(
id: id.clone(),
name,
description,
has_primary,
masked,
address_info,
interface_type: r#type,

View File

@@ -51,10 +51,16 @@ pub async fn get_ssl_certificate(
.iter()
.map(|(_, m)| m.as_hosts().as_entries())
.flatten_ok()
.map_ok(|(_, m)| m.as_addresses().de())
.map_ok(|(_, m)| {
Ok(m.as_onions()
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_domains().keys()?)
.collect::<Vec<_>>())
})
.map(|a| a.and_then(|a| a))
.flatten_ok()
.map_ok(|a| InternedString::from_display(&a))
.try_collect::<_, BTreeSet<_>, _>()?;
for hostname in &hostnames {
if let Some(internal) = hostname
@@ -135,10 +141,16 @@ pub async fn get_ssl_key(
.into_iter()
.map(|m| m.as_hosts().as_entries())
.flatten_ok()
.map_ok(|(_, m)| m.as_addresses().de())
.map_ok(|(_, m)| {
Ok(m.as_onions()
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_domains().keys()?)
.collect::<Vec<_>>())
})
.map(|a| a.and_then(|a| a))
.flatten_ok()
.map_ok(|a| InternedString::from_display(&a))
.try_collect::<_, BTreeSet<_>, _>()?;
for hostname in &hostnames {
if let Some(internal) = hostname

View File

@@ -26,7 +26,7 @@ pub async fn get_store(
callback,
}: GetStoreParams,
) -> Result<Value, Error> {
dbg!(&callback);
crate::dbg!(&callback);
let context = context.deref()?;
let peeked = context.seed.ctx.db.peek().await;
let package_id = package_id.unwrap_or(context.seed.id.clone());

View File

@@ -4,12 +4,11 @@ use imbl_value::InternedString;
use models::ImageId;
use tokio::process::Command;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::rpc_continuations::Guid;
use crate::service::effects::prelude::*;
use crate::service::persistent_container::Subcontainer;
use crate::util::Invoke;
use crate::{
disk::mount::filesystem::overlayfs::OverlayGuard, service::persistent_container::Subcontainer,
};
#[cfg(feature = "container-runtime")]
mod sync;

View File

@@ -48,7 +48,7 @@ use crate::util::net::WebSocketExt;
use crate::util::serde::{NoOutput, Pem};
use crate::util::Never;
use crate::volume::data_dir;
use crate::CAP_1_KiB;
use crate::{CAP_1_KiB, DATA_DIR, PACKAGE_DATA};
pub mod action;
pub mod cli;
@@ -149,10 +149,10 @@ impl ServiceRef {
.values()
.flat_map(|h| h.bindings.values())
.flat_map(|b| {
b.lan
b.net
.assigned_port
.into_iter()
.chain(b.lan.assigned_ssl_port)
.chain(b.net.assigned_ssl_port)
}),
);
Ok(())
@@ -167,17 +167,18 @@ impl ServiceRef {
{
let state = pde.state_info.expect_removing()?;
for volume_id in &state.manifest.volumes {
let path = data_dir(&ctx.datadir, &state.manifest.id, volume_id);
let path = data_dir(DATA_DIR, &state.manifest.id, volume_id);
if tokio::fs::metadata(&path).await.is_ok() {
tokio::fs::remove_dir_all(&path).await?;
}
}
let logs_dir = ctx.datadir.join("logs").join(&state.manifest.id);
let logs_dir = Path::new(PACKAGE_DATA)
.join("logs")
.join(&state.manifest.id);
if tokio::fs::metadata(&logs_dir).await.is_ok() {
tokio::fs::remove_dir_all(&logs_dir).await?;
}
let archive_path = ctx
.datadir
let archive_path = Path::new(PACKAGE_DATA)
.join("archive")
.join("installed")
.join(&state.manifest.id);
@@ -278,7 +279,7 @@ impl Service {
let ctx = ctx.clone();
move |s9pk: S9pk, i: Model<PackageDataEntry>| async move {
for volume_id in &s9pk.as_manifest().volumes {
let path = data_dir(&ctx.datadir, &s9pk.as_manifest().id, volume_id);
let path = data_dir(DATA_DIR, &s9pk.as_manifest().id, volume_id);
if tokio::fs::metadata(&path).await.is_err() {
tokio::fs::create_dir_all(&path).await?;
}
@@ -291,7 +292,7 @@ impl Service {
Self::new(ctx, s9pk, start_stop).await.map(Some)
}
};
let s9pk_dir = ctx.datadir.join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash
let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash
let s9pk_path = s9pk_dir.join(id).with_extension("s9pk");
let Some(entry) = ctx
.db
@@ -604,27 +605,11 @@ impl Service {
})
}
pub async fn update_host(&self, host_id: HostId) -> Result<(), Error> {
let host = self
.seed
.ctx
.db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&self.seed.id)
.or_not_found(&self.seed.id)?
.as_hosts()
.as_idx(&host_id)
.or_not_found(&host_id)?
.de()?;
pub async fn sync_host(&self, host_id: HostId) -> Result<(), Error> {
self.seed
.persistent_container
.net_service
.lock()
.await
.update(host_id, host)
.sync_host(host_id)
.await
}
}
@@ -934,7 +919,6 @@ pub async fn attach(
.with_kind(ErrorKind::Network)?;
current_out = "stdout";
}
dbg!(&current_out);
ws.send(Message::Binary(out))
.await
.with_kind(ErrorKind::Network)?;
@@ -948,7 +932,6 @@ pub async fn attach(
.with_kind(ErrorKind::Network)?;
current_out = "stderr";
}
dbg!(&current_out);
ws.send(Message::Binary(err))
.await
.with_kind(ErrorKind::Network)?;

View File

@@ -39,7 +39,7 @@ use crate::util::io::create_file;
use crate::util::rpc_client::UnixRpcClient;
use crate::util::Invoke;
use crate::volume::data_dir;
use crate::ARCH;
use crate::{ARCH, DATA_DIR, PACKAGE_DATA};
const RPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
@@ -110,7 +110,7 @@ pub struct PersistentContainer {
pub(super) images: BTreeMap<ImageId, Arc<MountGuard>>,
pub(super) subcontainers: Arc<Mutex<BTreeMap<Guid, Subcontainer>>>,
pub(super) state: Arc<watch::Sender<ServiceState>>,
pub(super) net_service: Mutex<NetService>,
pub(super) net_service: NetService,
destroyed: bool,
}
@@ -121,8 +121,8 @@ impl PersistentContainer {
.lxc_manager
.create(
Some(
&ctx.datadir
.join("package-data/logs")
&Path::new(PACKAGE_DATA)
.join("logs")
.join(&s9pk.as_manifest().id),
),
LxcConfig::default(),
@@ -157,7 +157,7 @@ impl PersistentContainer {
.await?;
let mount = MountGuard::mount(
&IdMapped::new(
Bind::new(data_dir(&ctx.datadir, &s9pk.as_manifest().id, volume)),
Bind::new(data_dir(DATA_DIR, &s9pk.as_manifest().id, volume)),
0,
100000,
65536,
@@ -285,7 +285,7 @@ impl PersistentContainer {
images,
subcontainers: Arc::new(Mutex::new(BTreeMap::new())),
state: Arc::new(watch::channel(ServiceState::new(start)).0),
net_service: Mutex::new(net_service),
net_service,
destroyed: false,
})
}
@@ -452,7 +452,7 @@ impl PersistentContainer {
#[instrument(skip_all)]
pub async fn exit(mut self) -> Result<(), Error> {
if let Some(destroy) = self.destroy(false) {
dbg!(destroy.await)?;
destroy.await?;
}
tracing::info!("Service for {} exited", self.s9pk.as_manifest().id);

View File

@@ -155,7 +155,7 @@ impl serde::Serialize for Sandbox {
pub struct CallbackId(u64);
impl CallbackId {
pub fn register(self, container: &PersistentContainer) -> CallbackHandle {
dbg!(eyre!(
crate::dbg!(eyre!(
"callback {} registered for {}",
self.0,
container.s9pk.as_manifest().id

View File

@@ -36,7 +36,7 @@ impl Actor for ServiceActor {
ServiceActorLoopNext::DontWait => (),
}
}
})
});
}
}
@@ -92,7 +92,6 @@ async fn service_actor_loop(
..
} => MainStatus::Stopped,
};
let previous = i.as_status().de()?;
i.as_status_mut().ser(&main_status)?;
return Ok(previous
.major_changes(&main_status)

View File

@@ -1,9 +1,11 @@
use std::path::Path;
use std::sync::Arc;
use std::time::Duration;
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::{Future, FutureExt};
use futures::stream::FuturesUnordered;
use futures::{Future, FutureExt, StreamExt};
use helpers::NonDetachingJoinHandle;
use imbl::OrdMap;
use imbl_value::InternedString;
@@ -27,6 +29,7 @@ use crate::service::start_stop::StartStop;
use crate::service::{LoadDisposition, Service, ServiceRef};
use crate::status::MainStatus;
use crate::util::serde::Pem;
use crate::DATA_DIR;
pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>;
pub type InstallFuture = BoxFuture<'static, Result<(), Error>>;
@@ -66,8 +69,12 @@ impl ServiceMap {
progress.start();
let ids = ctx.db.peek().await.as_public().as_package_data().keys()?;
progress.set_total(ids.len() as u64);
for id in ids {
if let Err(e) = self.load(ctx, &id, LoadDisposition::Retry).await {
let mut jobs = FuturesUnordered::new();
for id in &ids {
jobs.push(self.load(ctx, id, LoadDisposition::Retry));
}
while let Some(res) = jobs.next().await {
if let Err(e) = res {
tracing::error!("Error loading installed package as service: {e}");
tracing::debug!("{e:?}");
}
@@ -220,8 +227,7 @@ impl ServiceMap {
Ok(async move {
let (installed_path, sync_progress_task) = reload_guard
.handle(async {
let download_path = ctx
.datadir
let download_path = Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("downloading")
.join(&id)
@@ -251,8 +257,7 @@ impl ServiceMap {
file.sync_all().await?;
download_progress.complete();
let installed_path = ctx
.datadir
let installed_path = Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(&id)

View File

@@ -15,6 +15,7 @@ use crate::service::ServiceActor;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::actor::{ConflictBuilder, Handler};
use crate::util::future::RemoteCancellable;
use crate::util::serde::NoOutput;
pub(in crate::service) struct Backup {
pub path: PathBuf,
@@ -48,7 +49,7 @@ impl Handler<Backup> for ServiceActor {
.mount_backup(path, ReadWrite)
.await?;
seed.persistent_container
.execute(id, ProcedureName::CreateBackup, Value::Null, None)
.execute::<NoOutput>(id, ProcedureName::CreateBackup, Value::Null, None)
.await?;
backup_guard.unmount(true).await?;

View File

@@ -11,6 +11,7 @@ use crate::service::ServiceActor;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::actor::{ConflictBuilder, Handler};
use crate::util::future::RemoteCancellable;
use crate::util::serde::NoOutput;
pub(in crate::service) struct Restore {
pub path: PathBuf,
@@ -38,7 +39,7 @@ impl Handler<Restore> for ServiceActor {
.mount_backup(path, ReadOnly)
.await?;
seed.persistent_container
.execute(id, ProcedureName::RestoreBackup, Value::Null, None)
.execute::<NoOutput>(id, ProcedureName::RestoreBackup, Value::Null, None)
.await?;
backup_guard.unmount(true).await?;
@@ -48,7 +49,7 @@ impl Handler<Restore> for ServiceActor {
Ok::<_, Error>(())
}
.map(|x| {
if let Err(err) = dbg!(x) {
if let Err(err) = x {
tracing::debug!("{:?}", err);
tracing::warn!("{}", err);
}

View File

@@ -4,6 +4,7 @@ use std::sync::Arc;
use std::time::Duration;
use color_eyre::eyre::eyre;
use const_format::formatcp;
use josekit::jwk::Jwk;
use patch_db::json_ptr::ROOT;
use rpc_toolkit::yajrc::RpcError;
@@ -30,7 +31,7 @@ use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::disk::util::{pvscan, recovery_info, DiskInfo, StartOsRecoveryInfo};
use crate::disk::REPAIR_DISK_PATH;
use crate::init::{init, InitPhases, InitResult};
use crate::net::net_controller::PreInitNetController;
use crate::net::net_controller::NetController;
use crate::net::ssl::root_ca_start_time;
use crate::prelude::*;
use crate::progress::{FullProgress, PhaseProgressTrackerHandle};
@@ -38,7 +39,7 @@ use crate::rpc_continuations::Guid;
use crate::util::crypto::EncryptedWire;
use crate::util::io::{create_file, dir_copy, dir_size, Counter};
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
use crate::{Error, ErrorKind, ResultExt, DATA_DIR, MAIN_DATA, PACKAGE_DATA};
pub fn setup<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
@@ -79,10 +80,11 @@ async fn setup_init(
ctx: &SetupContext,
password: Option<String>,
init_phases: InitPhases,
) -> Result<(AccountInfo, PreInitNetController), Error> {
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
) -> Result<(AccountInfo, InitResult), Error> {
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
let account = net_ctrl
let account = init_result
.net_ctrl
.db
.mutate(|m| {
let mut account = AccountInfo::load(m)?;
@@ -98,7 +100,7 @@ async fn setup_init(
})
.await?;
Ok((account, net_ctrl))
Ok((account, init_result))
}
#[derive(Deserialize, Serialize, TS)]
@@ -140,7 +142,7 @@ pub async fn attach(
disk_phase.start();
let requires_reboot = crate::disk::main::import(
&*disk_guid,
&setup_ctx.datadir,
DATA_DIR,
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
} else {
@@ -155,7 +157,7 @@ pub async fn attach(
.with_ctx(|_| (ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
}
if requires_reboot.0 {
crate::disk::main::export(&*disk_guid, &setup_ctx.datadir).await?;
crate::disk::main::export(&*disk_guid, DATA_DIR).await?;
return Err(Error::new(
eyre!(
"Errors were corrected with your disk, but the server must be restarted in order to proceed"
@@ -167,7 +169,7 @@ pub async fn attach(
let (account, net_ctrl) = setup_init(&setup_ctx, password, init_phases).await?;
let rpc_ctx = RpcContext::init(&setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
let rpc_ctx = RpcContext::init(&setup_ctx.webserver, &setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
Ok(((&account).try_into()?, rpc_ctx))
})?;
@@ -391,18 +393,13 @@ pub async fn execute_inner(
crate::disk::main::create(
&[start_os_logicalname],
&pvscan().await?,
&ctx.datadir,
DATA_DIR,
encryption_password,
)
.await?,
);
let _ = crate::disk::main::import(
&*guid,
&ctx.datadir,
RepairStrategy::Preen,
encryption_password,
)
.await?;
let _ = crate::disk::main::import(&*guid, DATA_DIR, RepairStrategy::Preen, encryption_password)
.await?;
disk_phase.complete();
let progress = SetupExecuteProgress {
@@ -456,9 +453,16 @@ async fn fresh_setup(
db.put(&ROOT, &Database::init(&account)?).await?;
drop(db);
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?;
let rpc_ctx = RpcContext::init(
&ctx.webserver,
&ctx.config,
guid,
Some(init_result),
rpc_ctx_phases,
)
.await?;
Ok(((&account).try_into()?, rpc_ctx))
}
@@ -513,10 +517,10 @@ async fn migrate(
)
.await?;
let main_transfer_args = ("/media/startos/migrate/main/", "/embassy-data/main/");
let main_transfer_args = ("/media/startos/migrate/main/", formatcp!("{MAIN_DATA}/"));
let package_data_transfer_args = (
"/media/startos/migrate/package-data/",
"/embassy-data/package-data/",
formatcp!("{PACKAGE_DATA}/"),
);
let tmpdir = Path::new(package_data_transfer_args.0).join("tmp");
@@ -571,7 +575,14 @@ async fn migrate(
let (account, net_ctrl) = setup_init(&ctx, Some(start_os_password), init_phases).await?;
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?;
let rpc_ctx = RpcContext::init(
&ctx.webserver,
&ctx.config,
guid,
Some(net_ctrl),
rpc_ctx_phases,
)
.await?;
Ok(((&account).try_into()?, rpc_ctx))
}

View File

@@ -1,4 +1,4 @@
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use crate::context::RpcContext;
@@ -7,7 +7,7 @@ use crate::init::{STANDBY_MODE_PATH, SYSTEM_REBUILD_PATH};
use crate::prelude::*;
use crate::sound::SHUTDOWN;
use crate::util::Invoke;
use crate::PLATFORM;
use crate::{DATA_DIR, PLATFORM};
#[derive(Debug, Clone)]
pub struct Shutdown {
@@ -87,7 +87,7 @@ pub async fn shutdown(ctx: RpcContext) -> Result<(), Error> {
.await?;
ctx.shutdown
.send(Some(Shutdown {
export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())),
export_args: Some((ctx.disk_guid.clone(), Path::new(DATA_DIR).to_owned())),
restart: false,
}))
.map_err(|_| ())
@@ -107,7 +107,7 @@ pub async fn restart(ctx: RpcContext) -> Result<(), Error> {
.await?;
ctx.shutdown
.send(Some(Shutdown {
export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())),
export_args: Some((ctx.disk_guid.clone(), Path::new(DATA_DIR).to_owned())),
restart: true,
}))
.map_err(|_| ())

View File

@@ -3,20 +3,23 @@ use std::path::Path;
use clap::builder::ValueParserFactory;
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::InternedString;
use models::FromStrParser;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::fs::OpenOptions;
use tokio::process::Command;
use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::hostname::Hostname;
use crate::prelude::*;
use crate::util::io::create_file;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::util::serde::{display_serializable, HandlerExtSerde, Pem, WithIoFormat};
use crate::util::Invoke;
pub const SSH_AUTHORIZED_KEYS_FILE: &str = "/home/start9/.ssh/authorized_keys";
pub const SSH_DIR: &str = "/home/start9/.ssh";
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct SshKeys(BTreeMap<InternedString, WithTimeData<SshPubKey>>);
@@ -143,7 +146,7 @@ pub async fn add(ctx: RpcContext, AddParams { key }: AddParams) -> Result<SshKey
))
})
.await?;
sync_keys(&keys, SSH_AUTHORIZED_KEYS_FILE).await?;
sync_pubkeys(&keys, SSH_DIR).await?;
Ok(res)
}
@@ -175,7 +178,7 @@ pub async fn delete(
}
})
.await?;
sync_keys(&keys, SSH_AUTHORIZED_KEYS_FILE).await
sync_pubkeys(&keys, SSH_DIR).await
}
fn display_all_ssh_keys(params: WithIoFormat<Empty>, result: Vec<SshKeyResponse>) {
@@ -226,23 +229,90 @@ pub async fn list(ctx: RpcContext) -> Result<Vec<SshKeyResponse>, Error> {
}
#[instrument(skip_all)]
pub async fn sync_keys<P: AsRef<Path>>(keys: &SshKeys, dest: P) -> Result<(), Error> {
pub async fn sync_keys<P: AsRef<Path>>(
hostname: &Hostname,
privkey: &Pem<ssh_key::PrivateKey>,
pubkeys: &SshKeys,
ssh_dir: P,
) -> Result<(), Error> {
use tokio::io::AsyncWriteExt;
let dest = dest.as_ref();
let ssh_dir = dest.parent().ok_or_else(|| {
Error::new(
eyre!("SSH Key File cannot be \"/\""),
crate::ErrorKind::Filesystem,
)
})?;
let ssh_dir = ssh_dir.as_ref();
if tokio::fs::metadata(ssh_dir).await.is_err() {
tokio::fs::create_dir_all(ssh_dir).await?;
}
let mut f = create_file(dest).await?;
for key in keys.0.values() {
let id_alg = if privkey.0.algorithm().is_ed25519() {
"id_ed25519"
} else if privkey.0.algorithm().is_ecdsa() {
"id_ecdsa"
} else if privkey.0.algorithm().is_rsa() {
"id_rsa"
} else {
"id_unknown"
};
let privkey_path = ssh_dir.join(id_alg);
let mut f = OpenOptions::new()
.create(true)
.write(true)
.mode(0o600)
.open(&privkey_path)
.await
.with_ctx(|_| {
(
ErrorKind::Filesystem,
lazy_format!("create {privkey_path:?}"),
)
})?;
f.write_all(privkey.to_string().as_bytes()).await?;
f.write_all(b"\n").await?;
f.sync_all().await?;
let mut f = create_file(ssh_dir.join(id_alg).with_extension("pub")).await?;
f.write_all(
(privkey
.0
.public_key()
.to_openssh()
.with_kind(ErrorKind::OpenSsh)?
+ " start9@"
+ &*hostname.0)
.as_bytes(),
)
.await?;
f.write_all(b"\n").await?;
f.sync_all().await?;
let mut f = create_file(ssh_dir.join("authorized_keys")).await?;
for key in pubkeys.0.values() {
f.write_all(key.0.to_key_format().as_bytes()).await?;
f.write_all(b"\n").await?;
}
Command::new("chown")
.arg("-R")
.arg("start9:startos")
.arg(ssh_dir)
.invoke(ErrorKind::Filesystem)
.await?;
Ok(())
}
#[instrument(skip_all)]
pub async fn sync_pubkeys<P: AsRef<Path>>(pubkeys: &SshKeys, ssh_dir: P) -> Result<(), Error> {
use tokio::io::AsyncWriteExt;
let ssh_dir = ssh_dir.as_ref();
if tokio::fs::metadata(ssh_dir).await.is_err() {
tokio::fs::create_dir_all(ssh_dir).await?;
}
let mut f = create_file(ssh_dir.join("authorized_keys")).await?;
for key in pubkeys.0.values() {
f.write_all(key.0.to_key_format().as_bytes()).await?;
f.write_all(b"\n").await?;
}
Ok(())
}

View File

@@ -80,7 +80,7 @@ impl MainStatus {
}
}
pub fn backing_up(self) -> Self {
pub fn backing_up(&self) -> Self {
MainStatus::BackingUp {
on_complete: if self.running() {
StartStop::Start

View File

@@ -1,12 +1,18 @@
use std::collections::BTreeSet;
use std::fmt;
use std::sync::Arc;
use chrono::Utc;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use imbl::vector;
use mail_send::mail_builder::{self, MessageBuilder};
use mail_send::SmtpClientBuilder;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use rustls::crypto::CryptoProvider;
use rustls::RootCertStore;
use rustls_pki_types::CertificateDer;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use tokio::process::Command;
use tokio::sync::broadcast::Receiver;
@@ -24,6 +30,7 @@ use crate::util::cpupower::{get_available_governors, set_governor, Governor};
use crate::util::io::open_file;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::util::Invoke;
use crate::{MAIN_DATA, PACKAGE_DATA};
pub fn experimental<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
@@ -802,10 +809,10 @@ pub async fn get_mem_info() -> Result<MetricsMemory, Error> {
#[instrument(skip_all)]
async fn get_disk_info() -> Result<MetricsDisk, Error> {
let package_used_task = get_used("/embassy-data/package-data");
let package_available_task = get_available("/embassy-data/package-data");
let os_used_task = get_used("/embassy-data/main");
let os_available_task = get_available("/embassy-data/main");
let package_used_task = get_used(PACKAGE_DATA);
let package_available_task = get_available(PACKAGE_DATA);
let os_used_task = get_used(MAIN_DATA);
let os_available_task = get_available(MAIN_DATA);
let (package_used, package_available, os_used, os_available) = futures::try_join!(
package_used_task,
@@ -871,6 +878,93 @@ pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> {
}
Ok(())
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct TestSmtpParams {
#[arg(long)]
pub server: String,
#[arg(long)]
pub port: u16,
#[arg(long)]
pub from: String,
#[arg(long)]
pub to: String,
#[arg(long)]
pub login: String,
#[arg(long)]
pub password: Option<String>,
}
pub async fn test_smtp(
_: RpcContext,
TestSmtpParams {
server,
port,
from,
to,
login,
password,
}: TestSmtpParams,
) -> Result<(), Error> {
use rustls_pki_types::pem::PemObject;
let Some(pass_val) = password else {
return Err(Error::new(
eyre!("mail-send requires a password"),
ErrorKind::InvalidRequest,
));
};
let mut root_cert_store = RootCertStore::empty();
let pem = tokio::fs::read("/etc/ssl/certs/ca-certificates.crt").await?;
for cert in CertificateDer::pem_slice_iter(&pem) {
root_cert_store.add_parsable_certificates([cert.with_kind(ErrorKind::OpenSsl)?]);
}
let cfg = Arc::new(
rustls::ClientConfig::builder_with_provider(Arc::new(
rustls::crypto::ring::default_provider(),
))
.with_safe_default_protocol_versions()?
.with_root_certificates(root_cert_store)
.with_no_client_auth(),
);
let client = SmtpClientBuilder::new_with_tls_config(server, port, cfg)
.implicit_tls(false)
.credentials((login.split("@").next().unwrap().to_owned(), pass_val));
fn parse_address<'a>(addr: &'a str) -> mail_builder::headers::address::Address<'a> {
if addr.find("<").map_or(false, |start| {
addr.find(">").map_or(false, |end| start < end)
}) {
addr.split_once("<")
.map(|(name, addr)| (name.trim(), addr.strip_suffix(">").unwrap_or(addr)))
.unwrap()
.into()
} else {
addr.into()
}
}
let message = MessageBuilder::new()
.from(parse_address(&from))
.to(parse_address(&to))
.subject("StartOS Test Email")
.text_body("This is a test email sent from your StartOS Server");
client
.connect()
.await
.map_err(|e| {
Error::new(
eyre!("mail-send connection error: {:?}", e),
ErrorKind::Unknown,
)
})?
.send(message)
.await
.map_err(|e| Error::new(eyre!("mail-send send error: {:?}", e), ErrorKind::Unknown))?;
Ok(())
}
#[tokio::test]
#[ignore]

View File

@@ -20,7 +20,7 @@ use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::efivarfs::{self, EfiVarFs};
use crate::disk::mount::filesystem::efivarfs::EfiVarFs;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::filesystem::MountType;
use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard};
@@ -106,7 +106,7 @@ pub async fn update_system(
.with_kind(ErrorKind::Database)?,
)
.await;
while {
loop {
let progress = ctx
.db
.peek()
@@ -122,14 +122,22 @@ pub async fn update_system(
))
.await
.with_kind(ErrorKind::Network)?;
progress.is_some()
} {
sub.recv().await;
if progress.is_none() {
return ws.normal_close("complete").await;
}
tokio::select! {
_ = sub.recv() => (),
res = async {
loop {
if ws.recv().await.transpose().with_kind(ErrorKind::Network)?.is_none() {
return Ok(())
}
}
} => {
return res
}
}
}
ws.normal_close("complete").await?;
Ok::<_, Error>(())
}
.await
{

View File

@@ -15,8 +15,13 @@ impl BackgroundJobQueue {
},
)
}
pub fn add_job(&self, fut: impl Future<Output = ()> + Send + 'static) {
let _ = self.0.send(fut.boxed());
pub fn add_job(&self, fut: impl Future + Send + 'static) {
let _ = self.0.send(
async {
fut.await;
}
.boxed(),
);
}
}

View File

@@ -1,11 +1,13 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::future::abortable;
use futures::stream::{AbortHandle, Abortable};
use futures::Future;
use futures::future::{abortable, pending, BoxFuture, FusedFuture};
use futures::stream::{AbortHandle, Abortable, BoxStream};
use futures::{Future, FutureExt, Stream, StreamExt};
use tokio::sync::watch;
use crate::prelude::*;
#[pin_project::pin_project(PinnedDrop)]
pub struct DropSignaling<F> {
#[pin]
@@ -102,6 +104,60 @@ impl CancellationHandle {
}
}
#[derive(Default)]
pub struct Until<'a> {
streams: Vec<BoxStream<'a, Result<(), Error>>>,
async_fns: Vec<Box<dyn FnMut() -> BoxFuture<'a, Result<(), Error>> + Send + 'a>>,
}
impl<'a> Until<'a> {
pub fn new() -> Self {
Self::default()
}
pub fn with_stream(
mut self,
stream: impl Stream<Item = Result<(), Error>> + Send + 'a,
) -> Self {
self.streams.push(stream.boxed());
self
}
pub fn with_async_fn<F, Fut>(mut self, mut f: F) -> Self
where
F: FnMut() -> Fut + Send + 'a,
Fut: Future<Output = Result<(), Error>> + FusedFuture + Send + 'a,
{
self.async_fns.push(Box::new(move || f().boxed()));
self
}
pub async fn run<Fut: Future<Output = Result<(), Error>> + Send>(
&mut self,
fut: Fut,
) -> Result<(), Error> {
let (res, _, _) = futures::future::select_all(
self.streams
.iter_mut()
.map(|s| {
async {
s.next().await.transpose()?.ok_or_else(|| {
Error::new(eyre!("stream is empty"), ErrorKind::Cancelled)
})
}
.boxed()
})
.chain(self.async_fns.iter_mut().map(|f| f()))
.chain([async {
fut.await?;
pending().await
}
.boxed()]),
)
.await;
res
}
}
#[tokio::test]
async fn test_cancellable() {
use std::sync::Arc;

View File

@@ -15,7 +15,7 @@ use futures::future::{BoxFuture, Fuse};
use futures::{AsyncSeek, FutureExt, Stream, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use nix::unistd::{Gid, Uid};
use tokio::fs::File;
use tokio::fs::{File, OpenOptions};
use tokio::io::{
duplex, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, DuplexStream, ReadBuf, WriteHalf,
};
@@ -460,18 +460,30 @@ impl<T> BackTrackingIO<T> {
}
}
}
pub fn rewind(&mut self) -> Vec<u8> {
pub fn rewind<'a>(&'a mut self) -> (Vec<u8>, &'a [u8]) {
match std::mem::take(&mut self.buffer) {
BTBuffer::Buffering { read, write } => {
self.buffer = BTBuffer::Rewound {
read: Cursor::new(read),
};
write
(
write,
match &self.buffer {
BTBuffer::Rewound { read } => read.get_ref(),
_ => unreachable!(),
},
)
}
BTBuffer::NotBuffering => Vec::new(),
BTBuffer::NotBuffering => (Vec::new(), &[]),
BTBuffer::Rewound { read } => {
self.buffer = BTBuffer::Rewound { read };
Vec::new()
(
Vec::new(),
match &self.buffer {
BTBuffer::Rewound { read } => read.get_ref(),
_ => unreachable!(),
},
)
}
}
}
@@ -529,7 +541,6 @@ impl<T: std::io::Read> std::io::Read for BackTrackingIO<T> {
}
BTBuffer::NotBuffering => self.io.read(buf),
BTBuffer::Rewound { read } => {
let mut ready = false;
if (read.position() as usize) < read.get_ref().len() {
let n = std::io::Read::read(read, buf)?;
if n != 0 {
@@ -923,6 +934,21 @@ pub async fn create_file(path: impl AsRef<Path>) -> Result<File, Error> {
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))
}
pub async fn append_file(path: impl AsRef<Path>) -> Result<File, Error> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("mkdir -p {parent:?}")))?;
}
OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))
}
pub async fn delete_file(path: impl AsRef<Path>) -> Result<(), Error> {
let path = path.as_ref();
tokio::fs::remove_file(path)

View File

@@ -1,13 +1,62 @@
use std::io;
use std::fs::File;
use std::io::{self, Write};
use std::sync::{Arc, Mutex, MutexGuard};
use lazy_static::lazy_static;
use tracing::Subscriber;
use tracing_subscriber::fmt::MakeWriter;
use tracing_subscriber::util::SubscriberInitExt;
#[derive(Clone)]
pub struct EmbassyLogger {}
lazy_static! {
pub static ref LOGGER: StartOSLogger = StartOSLogger::init();
}
impl EmbassyLogger {
fn base_subscriber() -> impl Subscriber {
#[derive(Clone)]
pub struct StartOSLogger {
logfile: LogFile,
}
#[derive(Clone, Default)]
struct LogFile(Arc<Mutex<Option<File>>>);
impl<'a> MakeWriter<'a> for LogFile {
type Writer = Box<dyn Write + 'a>;
fn make_writer(&'a self) -> Self::Writer {
let f = self.0.lock().unwrap();
if f.is_some() {
struct TeeWriter<'a>(MutexGuard<'a, Option<File>>);
impl<'a> Write for TeeWriter<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = if let Some(f) = &mut *self.0 {
f.write(buf)?
} else {
buf.len()
};
io::stderr().write_all(&buf[..n])?;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
if let Some(f) = &mut *self.0 {
f.flush()?;
}
Ok(())
}
}
Box::new(TeeWriter(f))
} else {
drop(f);
Box::new(io::stderr())
}
}
}
impl StartOSLogger {
pub fn enable(&self) {}
pub fn set_logfile(&self, logfile: Option<File>) {
*self.logfile.0.lock().unwrap() = logfile;
}
fn base_subscriber(logfile: LogFile) -> impl Subscriber {
use tracing_error::ErrorLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
@@ -24,7 +73,7 @@ impl EmbassyLogger {
.add_directive("tokio=trace".parse().unwrap())
.add_directive("runtime=trace".parse().unwrap());
let fmt_layer = fmt::layer()
.with_writer(io::stderr)
.with_writer(logfile)
.with_line_number(true)
.with_file(true)
.with_target(true);
@@ -39,11 +88,12 @@ impl EmbassyLogger {
sub
}
pub fn init() -> Self {
Self::base_subscriber().init();
fn init() -> Self {
let logfile = LogFile::default();
Self::base_subscriber(logfile.clone()).init();
color_eyre::install().unwrap_or_else(|_| tracing::warn!("tracing too many times"));
EmbassyLogger {}
StartOSLogger { logfile }
}
}

View File

@@ -19,13 +19,8 @@ pub trait WebSocketExt {
}
impl WebSocketExt for ws::WebSocket {
async fn normal_close(mut self, msg: impl Into<Cow<'static, str>> + Send) -> Result<(), Error> {
self.send(ws::Message::Close(Some(CloseFrame {
code: 1000,
reason: msg.into(),
})))
.await
.with_kind(ErrorKind::Network)
async fn normal_close(self, msg: impl Into<Cow<'static, str>> + Send) -> Result<(), Error> {
self.close_result(Ok::<_, Error>(msg)).await
}
async fn close_result(
mut self,
@@ -38,15 +33,23 @@ impl WebSocketExt for ws::WebSocket {
reason: msg.into(),
})))
.await
.with_kind(ErrorKind::Network),
.with_kind(ErrorKind::Network)?,
Err(e) => self
.send(ws::Message::Close(Some(CloseFrame {
code: 1011,
reason: e.to_string().into(),
})))
.await
.with_kind(ErrorKind::Network),
.with_kind(ErrorKind::Network)?,
}
while !matches!(
self.recv()
.await
.transpose()
.with_kind(ErrorKind::Network)?,
Some(ws::Message::Close(_)) | None
) {}
Ok(())
}
}

View File

@@ -3,7 +3,6 @@ use std::path::Path;
use clap::Parser;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use url::Url;
use crate::context::CliContext;
use crate::prelude::*;

View File

@@ -47,7 +47,7 @@ impl RpcClient {
let mut lines = BufReader::new(reader).lines();
while let Some(line) = lines.next_line().await.transpose() {
match line.map_err(Error::from).and_then(|l| {
serde_json::from_str::<RpcResponse>(dbg!(&l))
serde_json::from_str::<RpcResponse>(crate::dbg!(&l))
.with_kind(ErrorKind::Deserialization)
}) {
Ok(l) => {
@@ -114,7 +114,7 @@ impl RpcClient {
let (send, recv) = oneshot::channel();
w.lock().await.insert(id.clone(), send);
self.writer
.write_all((dbg!(serde_json::to_string(&request))? + "\n").as_bytes())
.write_all((crate::dbg!(serde_json::to_string(&request))? + "\n").as_bytes())
.await
.map_err(|e| {
let mut err = rpc_toolkit::yajrc::INTERNAL_ERROR.clone();
@@ -154,7 +154,7 @@ impl RpcClient {
params,
};
self.writer
.write_all((dbg!(serde_json::to_string(&request))? + "\n").as_bytes())
.write_all((crate::dbg!(serde_json::to_string(&request))? + "\n").as_bytes())
.await
.map_err(|e| {
let mut err = rpc_toolkit::yajrc::INTERNAL_ERROR.clone();

View File

@@ -1,3 +1,8 @@
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Poll, Waker};
#[derive(Debug, Default)]
pub struct SyncMutex<T>(std::sync::Mutex<T>);
impl<T> SyncMutex<T> {
pub fn new(t: T) -> Self {
@@ -10,3 +15,148 @@ impl<T> SyncMutex<T> {
f(&*self.0.lock().unwrap())
}
}
struct WatchShared<T> {
version: u64,
data: T,
wakers: Vec<Waker>,
}
impl<T> WatchShared<T> {
fn modified(&mut self) {
self.version += 1;
for waker in self.wakers.drain(..) {
waker.wake();
}
}
}
#[pin_project::pin_project]
pub struct Watch<T> {
shared: Arc<SyncMutex<WatchShared<T>>>,
version: u64,
}
impl<T> Clone for Watch<T> {
fn clone(&self) -> Self {
Self {
shared: self.shared.clone(),
version: self.version,
}
}
}
impl<T> Watch<T> {
pub fn new(init: T) -> Self {
Self {
shared: Arc::new(SyncMutex::new(WatchShared {
version: 1,
data: init,
wakers: Vec::new(),
})),
version: 0,
}
}
pub fn clone_unseen(&self) -> Self {
Self {
shared: self.shared.clone(),
version: 0,
}
}
pub fn poll_changed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> {
self.shared.mutate(|shared| {
if shared.version != self.version {
self.version = shared.version;
Poll::Ready(())
} else {
let waker = cx.waker();
if !shared.wakers.iter().any(|w| w.will_wake(waker)) {
shared.wakers.push(waker.clone());
}
Poll::Pending
}
})
}
pub async fn changed(&mut self) {
futures::future::poll_fn(|cx| self.poll_changed(cx)).await
}
pub async fn wait_for<F: FnMut(&T) -> bool>(&mut self, mut f: F) {
loop {
if self.peek(&mut f) {
break;
}
self.changed().await;
}
}
pub fn send_if_modified<F: FnOnce(&mut T) -> bool>(&self, modify: F) -> bool {
self.shared.mutate(|shared| {
let changed = modify(&mut shared.data);
if changed {
shared.modified();
}
changed
})
}
pub fn send_modify<U, F: FnOnce(&mut T) -> U>(&self, modify: F) -> U {
self.shared.mutate(|shared| {
let res = modify(&mut shared.data);
shared.modified();
res
})
}
pub fn send_replace(&self, new: T) -> T {
self.send_modify(|a| std::mem::replace(a, new))
}
pub fn send(&self, new: T) {
self.send_replace(new);
}
pub fn mark_changed(&self) {
self.shared.mutate(|shared| shared.modified())
}
pub fn mark_unseen(&mut self) {
self.version = 0;
}
pub fn mark_seen(&mut self) {
self.shared.peek(|shared| {
self.version = shared.version;
})
}
pub fn peek<U, F: FnOnce(&T) -> U>(&self, f: F) -> U {
self.shared.peek(|shared| f(&shared.data))
}
pub fn peek_and_mark_seen<U, F: FnOnce(&T) -> U>(&mut self, f: F) -> U {
self.shared.peek(|shared| {
self.version = shared.version;
f(&shared.data)
})
}
pub fn peek_mut<U, F: FnOnce(&mut T) -> U>(&self, f: F) -> U {
self.shared.mutate(|shared| f(&mut shared.data))
}
}
impl<T: Clone> Watch<T> {
pub fn read(&self) -> T {
self.peek(|a| a.clone())
}
}
impl<T: Clone> futures::Stream for Watch<T> {
type Item = T;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.project();
this.shared.mutate(|shared| {
if shared.version != *this.version {
*this.version = shared.version;
Poll::Ready(Some(shared.data.clone()))
} else {
let waker = cx.waker();
if !shared.wakers.iter().any(|w| w.will_wake(waker)) {
shared.wakers.push(waker.clone());
}
Poll::Pending
}
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
(1, None)
}
}

View File

@@ -7,7 +7,7 @@ use futures::future::BoxFuture;
use futures::{Future, FutureExt};
use imbl::Vector;
use imbl_value::{to_value, InternedString};
use patch_db::json_ptr::{JsonPointer, ROOT};
use patch_db::json_ptr::ROOT;
use crate::context::RpcContext;
use crate::db::model::Database;
@@ -29,7 +29,12 @@ mod v0_3_6_alpha_7;
mod v0_3_6_alpha_8;
mod v0_3_6_alpha_9;
pub type Current = v0_3_6_alpha_9::Version; // VERSION_BUMP
mod v0_3_6_alpha_10;
mod v0_3_6_alpha_11;
mod v0_3_6_alpha_12;
mod v0_3_6_alpha_13;
pub type Current = v0_3_6_alpha_13::Version; // VERSION_BUMP
impl Current {
#[instrument(skip(self, db))]
@@ -52,7 +57,7 @@ impl Current {
let pre_ups = PreUps::load(&from, &self).await?;
db.apply_function(|mut db| {
migrate_from_unchecked(&from, &self, pre_ups, &mut db)?;
Ok::<_, Error>((db, ()))
Ok::<_, Error>((to_value(&from_value::<Database>(db.clone())?)?, ()))
})
.await?;
}
@@ -62,31 +67,44 @@ impl Current {
}
}
pub async fn post_init(ctx: &RpcContext) -> Result<(), Error> {
let mut peek;
while let Some(version) = {
peek = ctx.db.peek().await;
peek.as_public()
.as_server_info()
.as_post_init_migration_todos()
.de()?
.first()
.cloned()
.map(Version::from_exver_version)
.as_ref()
.map(Version::as_version_t)
.transpose()?
} {
version.0.post_up(ctx).await?;
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_post_init_migration_todos_mut()
.mutate(|m| Ok(m.remove(&version.0.semver())))
})
.await?;
pub async fn post_init(
ctx: &RpcContext,
mut progress: PhaseProgressTrackerHandle,
) -> Result<(), Error> {
let mut peek = ctx.db.peek().await;
let todos = peek
.as_public()
.as_server_info()
.as_post_init_migration_todos()
.de()?;
if !todos.is_empty() {
progress.set_total(todos.len() as u64);
while let Some(version) = {
peek = ctx.db.peek().await;
peek.as_public()
.as_server_info()
.as_post_init_migration_todos()
.de()?
.first()
.cloned()
.map(Version::from_exver_version)
.as_ref()
.map(Version::as_version_t)
.transpose()?
} {
version.0.post_up(ctx).await?;
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_post_init_migration_todos_mut()
.mutate(|m| Ok(m.remove(&version.0.semver())))
})
.await?;
progress += 1;
}
}
progress.complete();
Ok(())
}
@@ -108,6 +126,10 @@ enum Version {
V0_3_6_alpha_7(Wrapper<v0_3_6_alpha_7::Version>),
V0_3_6_alpha_8(Wrapper<v0_3_6_alpha_8::Version>),
V0_3_6_alpha_9(Wrapper<v0_3_6_alpha_9::Version>),
V0_3_6_alpha_10(Wrapper<v0_3_6_alpha_10::Version>),
V0_3_6_alpha_11(Wrapper<v0_3_6_alpha_11::Version>),
V0_3_6_alpha_12(Wrapper<v0_3_6_alpha_12::Version>),
V0_3_6_alpha_13(Wrapper<v0_3_6_alpha_13::Version>), // VERSION_BUMP
Other(exver::Version),
}
@@ -141,6 +163,10 @@ impl Version {
Self::V0_3_6_alpha_7(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_8(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_9(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_10(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_11(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_12(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_13(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::Other(v) => {
return Err(Error::new(
eyre!("unknown version {v}"),
@@ -166,6 +192,10 @@ impl Version {
Version::V0_3_6_alpha_7(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_8(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_9(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_10(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_11(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_12(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_13(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::Other(x) => x.clone(),
}
}

View File

@@ -17,17 +17,16 @@ StartOS v0.3.6 is a complete rewrite of the OS internals (everything you don't s
## Changelog
- [Switch to lxc-based container runtime](#lxc)
- [Update s9pk archive format](#new-s9pk-archive-format)
- [Improve config](#better-config)
- [Unify Actions](#unify-actions)
- [Update s9pk archive format](#s9pk-archive-format)
- [Improve Actions](#actions)
- [Use squashfs images for OS updates](#squashfs-updates)
- [Introduce Typescript package API and SDK](#typescript-package-api-and-sdk)
- [Introduce Typescript package API and SDK](#typescript-sdk)
- [Remove Postgresql](#remove-postgressql)
- [Implement detailed progress reporting](#progress-reporting)
- [Improve registry protocol](#registry-protocol)
- [Replace unique .local URLs with unique ports](#lan-port-forwarding)
- [Use start-fs Fuse module for improved backups](#improved-backups)
- [Switch to Exver for versioning](#Exver)
- [Switch to Exver for versioning](#exver)
- [Support clearnet hosting via start-cli](#clearnet)
### LXC
@@ -38,21 +37,17 @@ StartOS now uses a nested container paradigm based on LXC for the outer containe
The S9PK archive format has been overhauled to allow for signature verification of partial downloads, and allow direct mounting of container images without unpacking the s9pk.
### Better config
Expanded support for input types and a new UI makes configuring services easier and more powerful.
### Actions
Actions take arbitrary form input _and_ return arbitrary responses, thus satisfying the needs of both Config and Properties, which will be removed in a future release. This gives packages developers the ability to break up Config and Properties into smaller, more specific formats, or to exclude them entirely without polluting the UI.
Actions take arbitrary form input and return arbitrary responses, thus satisfying the needs of both Config and Properties, which have been removed. The new actions API gives packages developers the ability to break up Config and Properties into smaller, more specific formats, or to exclude them entirely without polluting the UI. Improved form design and new input types round out the actions experience.
### Squashfs updates
StartOS now uses squashfs images to represent OS updates. This allows for better update verification, and improved reliability over rsync updates.
### Typescript package API and SDK
### Typescript SDK
StartOS now exposes a Typescript API. Package developers can take advantage in a simple, typesafe way using the new start-sdk. A barebones StartOS package (s9pk) can be produced in minutes with minimal knowledge or skill. More advanced developers can use the SDK to create highly customized user experiences with their service.
Package developers can now take advantage of StartOS APIs using the new start-sdk, available in Typescript. A barebones StartOS package (s9pk) can be produced in minutes with minimal knowledge or skill. More advanced developers can use the SDK to create highly customized user experiences with their service.
### Remove PostgresSQL
@@ -76,8 +71,14 @@ The new start-fs fuse module unifies file system expectations for various platfo
### Exver
StartOS now uses Extended Versioning (Exver), which consists of three parts, separated by semicolons: (1) a Semver-compliant upstream version, (2) a Semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors can be thought of as alternative implementations of services, where a user would only want one or the other installed, and data can feasibly be migrating beetween the two. Another common characteristic of flavors is that they satisfy the same API requirement of dependents, though this is not strictly necessary. A valid Exver looks something like this: `#knots:28.0.:1.0-beta.1`. This would translate to "the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 27.0".
StartOS now uses Extended Versioning (Exver), which consists of three parts: (1) a Semver-compliant upstream version, (2) a Semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors can be thought of as alternative implementations of services, where a user would only want one or the other installed, and data can feasibly be migrating between the two. Another common characteristic of flavors is that they satisfy the same API requirement of dependents, though this is not strictly necessary. A valid Exver looks something like this: `#knots:28.0.:1.0-beta.1`. This would translate to "the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 27.0".
### Clearnet
It is now possible, and quite easy, to expose specific services interfaces to the public Internet on a standard domain using start-cli. This functionality will be expanded upon and moved into the StartOS UI in a future release.
It is now possible, and quite easy, to expose service interfaces to the public Internet on a standard domain using start-cli. In addition to choosing which service interfaces to expose on which domains/subdomains, users have two options:
1. Open ports on their router. This option is free and easy to accomplish with most routers. The drawback is that the user's home IP address is revealed to anyone accessing the exposes resources. For example, hosting a blog in this way would reveal your home IP address, and therefor your approximate location on Earth, to your readers.
2. Use a Wireguard VPN to proxy web traffic. This option requires the user to provision a $5-$10/month remote VPS and perform a few, simple commands. The result is the successful obfuscation of the users home IP address.
The CLI-driven clearnet functionality will be expanded upon and moved into the main StartOS UI in a future release.

View File

@@ -1,19 +1,16 @@
use std::collections::BTreeMap;
use std::future::Future;
use std::path::Path;
use chrono::{DateTime, Utc};
use const_format::formatcp;
use ed25519_dalek::SigningKey;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::{json, InternedString};
use itertools::Itertools;
use models::PackageId;
use openssl::pkey::{PKey, Private};
use openssl::pkey::PKey;
use openssl::x509::X509;
use patch_db::ModelExt;
use sqlx::postgres::PgConnectOptions;
use sqlx::{PgPool, Row};
use ssh_key::Fingerprint;
use tokio::process::Command;
use torut::onion::TorSecretKeyV3;
@@ -23,15 +20,11 @@ use crate::account::AccountInfo;
use crate::auth::Sessions;
use crate::backup::target::cifs::CifsTargets;
use crate::context::RpcContext;
use crate::db::model::Database;
use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::util::unmount;
use crate::hostname::Hostname;
use crate::net::forward::AvailablePorts;
use crate::net::keys::KeyStore;
use crate::net::ssl::CertStore;
use crate::net::tor;
use crate::net::tor::OnionStore;
use crate::notifications::{Notification, Notifications};
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
@@ -39,6 +32,7 @@ use crate::ssh::{SshKeys, SshPubKey};
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::{Pem, PemEncoding};
use crate::util::Invoke;
use crate::{DATA_DIR, PACKAGE_DATA};
lazy_static::lazy_static! {
static ref V0_3_6_alpha_0: exver::Version = exver::Version::new(
@@ -191,7 +185,6 @@ async fn init_postgres(datadir: impl AsRef<Path>) -> Result<PgPool, Error> {
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
dbg!("Init Postgres Done");
Ok(secret_store)
}
@@ -200,7 +193,7 @@ pub struct Version;
impl VersionT for Version {
type Previous = v0_3_5_2::Version;
type PreUpRes = (AccountInfo, SshKeys, CifsTargets, Notifications);
type PreUpRes = (AccountInfo, SshKeys, CifsTargets);
fn semver(self) -> exver::Version {
V0_3_6_alpha_0.clone()
}
@@ -208,22 +201,16 @@ impl VersionT for Version {
&V0_3_0_COMPAT
}
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
let pg = init_postgres("/embassy-data").await?;
let pg = init_postgres(DATA_DIR).await?;
let account = previous_account_info(&pg).await?;
let ssh_keys = previous_ssh_keys(&pg).await?;
let cifs = previous_cifs(&pg).await?;
let notifications = previous_notifications(pg).await?;
Ok((account, ssh_keys, cifs, notifications))
Ok((account, ssh_keys, cifs))
}
fn up(
self,
db: &mut Value,
(account, ssh_keys, cifs, notifications): Self::PreUpRes,
) -> Result<(), Error> {
fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result<(), Error> {
let wifi = json!({
"infterface": db["server-info"]["wifi"]["interface"],
"ssids": db["server-info"]["wifi"]["ssids"],
@@ -305,7 +292,7 @@ impl VersionT for Version {
value["sshPubkeys"] = to_value(&ssh_keys)?;
value["availablePorts"] = to_value(&AvailablePorts::new())?;
value["sessions"] = to_value(&Sessions::new())?;
value["notifications"] = to_value(&notifications)?;
value["notifications"] = to_value(&Notifications::new())?;
value["cifs"] = to_value(&cifs)?;
value["packageStores"] = json!({});
value
@@ -315,7 +302,6 @@ impl VersionT for Version {
"private": private,
});
dbg!("Should be done with the up");
*db = next;
Ok(())
}
@@ -329,7 +315,7 @@ impl VersionT for Version {
#[instrument(skip(self, ctx))]
/// MUST be idempotent, and is run after *all* db migrations
async fn post_up(self, ctx: &RpcContext) -> Result<(), Error> {
let path = Path::new("/embassy-data/package-data/archive/");
let path = Path::new(formatcp!("{PACKAGE_DATA}/archive/"));
if !path.is_dir() {
return Err(Error::new(
eyre!(
@@ -383,64 +369,6 @@ impl VersionT for Version {
}
}
async fn previous_notifications(pg: sqlx::Pool<sqlx::Postgres>) -> Result<Notifications, Error> {
let notification_cursor = sqlx::query(r#"SELECT * FROM notifications"#)
.fetch_all(&pg)
.await?;
let notifications = {
let mut notifications = Notifications::default();
for row in notification_cursor {
let package_id = serde_json::from_str::<PackageId>(
row.try_get("package_id")
.with_ctx(|_| (ErrorKind::Database, "package_id"))?,
)
.ok();
let created_at = row
.try_get("created_at")
.with_ctx(|_| (ErrorKind::Database, "created_at"))?;
let code = row
.try_get::<i64, _>("code")
.with_ctx(|_| (ErrorKind::Database, "code"))? as u32;
let id = row
.try_get::<i64, _>("id")
.with_ctx(|_| (ErrorKind::Database, "id"))? as u32;
let level = serde_json::from_str(
row.try_get("level")
.with_ctx(|_| (ErrorKind::Database, "level"))?,
)
.with_kind(ErrorKind::Database)
.with_ctx(|_| (ErrorKind::Database, "level: serde_json "))?;
let title = row
.try_get("title")
.with_ctx(|_| (ErrorKind::Database, "title"))?;
let message = row
.try_get("message")
.with_ctx(|_| (ErrorKind::Database, "message"))?;
let data = serde_json::from_str(
row.try_get("data")
.with_ctx(|_| (ErrorKind::Database, "data"))?,
)
.unwrap_or_default();
notifications.0.insert(
id,
Notification {
package_id,
created_at,
code,
level,
title,
message,
data,
},
);
}
notifications
};
Ok(notifications)
}
#[tracing::instrument(skip_all)]
async fn previous_cifs(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<CifsTargets, Error> {
let cifs = sqlx::query(r#"SELECT * FROM cifs_shares"#)
@@ -448,16 +376,17 @@ async fn previous_cifs(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<CifsTargets, E
.await?
.into_iter()
.map(|row| {
let id: i64 = row.try_get("id")?;
let id: i32 = row.try_get("id")?;
Ok::<_, Error>((
id,
Cifs {
hostname: row
.try_get("hostname")
.with_ctx(|_| (ErrorKind::Database, "hostname"))?,
path: serde_json::from_str(row.try_get("path")?)
.with_kind(ErrorKind::Database)
.with_ctx(|_| (ErrorKind::Database, "path"))?,
path: row
.try_get::<String, _>("path")
.with_ctx(|_| (ErrorKind::Database, "path"))?
.into(),
username: row
.try_get("username")
.with_ctx(|_| (ErrorKind::Database, "username"))?,
@@ -486,7 +415,7 @@ async fn previous_account_info(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<Accoun
password: account_query
.try_get("password")
.with_ctx(|_| (ErrorKind::Database, "password"))?,
tor_key: TorSecretKeyV3::try_from(
tor_keys: vec![TorSecretKeyV3::try_from(
if let Some(bytes) = account_query
.try_get::<Option<Vec<u8>>, _>("tor_key")
.with_ctx(|_| (ErrorKind::Database, "tor_key"))?
@@ -511,7 +440,7 @@ async fn previous_account_info(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<Accoun
.with_ctx(|_| (ErrorKind::Database, "password.u8 32"))?,
)
},
)?,
)?],
server_id: account_query
.try_get("server_id")
.with_ctx(|_| (ErrorKind::Database, "server_id"))?,

View File

@@ -0,0 +1,94 @@
use std::collections::{BTreeMap, BTreeSet};
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_9, VersionT};
use crate::net::host::address::DomainConfig;
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_3_6_alpha_10: exver::Version = exver::Version::new(
[0, 3, 6],
[PreReleaseSegment::String("alpha".into()), 10.into()]
);
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "kind")]
enum HostAddress {
Onion { address: OnionAddressV3 },
Domain { address: InternedString },
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_3_6_alpha_9::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_3_6_alpha_10.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
for (_, package) in db["public"]["packageData"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.packageData to be an object"),
ErrorKind::Database,
)
})?
.iter_mut()
{
for (_, host) in package["hosts"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.packageData[id].hosts to be an object"),
ErrorKind::Database,
)
})?
.iter_mut()
{
let mut onions = BTreeSet::new();
let mut domains = BTreeMap::new();
let addresses = from_value::<BTreeSet<HostAddress>>(host["addresses"].clone())?;
for address in addresses {
match address {
HostAddress::Onion { address } => {
onions.insert(address);
}
HostAddress::Domain { address } => {
domains.insert(
address,
DomainConfig {
public: true,
acme: None,
},
);
}
}
}
host["onions"] = to_value(&onions)?;
host["domains"] = to_value(&domains)?;
}
}
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -0,0 +1,83 @@
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::json;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_10, VersionT};
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_3_6_alpha_11: exver::Version = exver::Version::new(
[0, 3, 6],
[PreReleaseSegment::String("alpha".into()), 11.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_3_6_alpha_10::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_3_6_alpha_11.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
let acme = std::mem::replace(
&mut db["public"]["serverInfo"]["acme"],
Value::Object(Default::default()),
);
if !acme.is_null() && acme["provider"].as_str().is_some() {
db["public"]["serverInfo"]["acme"]
[&acme["provider"].as_str().or_not_found("provider")?] =
json!({ "contact": &acme["contact"] });
}
for (_, package) in db["public"]["packageData"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.packageData to be an object"),
ErrorKind::Database,
)
})?
.iter_mut()
{
for (_, host) in package["hosts"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.packageData[id].hosts to be an object"),
ErrorKind::Database,
)
})?
.iter_mut()
{
for (_, bind) in host["bindings"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.packageData[id].hosts[hostId].bindings to be an object"),
ErrorKind::Database,
)
})?
.iter_mut()
{
bind["net"] = bind["lan"].clone();
bind["net"]["public"] = Value::Bool(false);
}
}
}
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -0,0 +1,68 @@
use std::collections::BTreeMap;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::json;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_11, VersionT};
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_3_6_alpha_12: exver::Version = exver::Version::new(
[0, 3, 6],
[PreReleaseSegment::String("alpha".into()), 12.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_3_6_alpha_11::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_3_6_alpha_12.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
let bindings: BTreeMap<u16, Value> = [(
80,
json!({
"enabled": false,
"options": {
"preferredExternalPort": 80,
"addSsl": {
"preferredExternalPort": 443,
"alpn": { "specified": [ "http/1.1", "h2" ] },
},
"secure": null,
},
"net": {
"assignedPort": null,
"assignedSslPort": 443,
"public": false,
}
}),
)]
.into_iter()
.collect();
let onion = db["public"]["serverInfo"]["onionAddress"].clone();
db["public"]["serverInfo"]["host"] = json!({
"bindings": bindings,
"onions": [onion],
"domains": {},
"hostnameInfo": {},
});
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -0,0 +1,39 @@
use std::collections::BTreeMap;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::json;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_12, VersionT};
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_3_6_alpha_13: exver::Version = exver::Version::new(
[0, 3, 6],
[PreReleaseSegment::String("alpha".into()), 13.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_3_6_alpha_12::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_3_6_alpha_13.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -27,7 +27,7 @@ impl VersionT for Version {
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
}
async fn post_up<'a>(self, ctx: &'a crate::context::RpcContext) -> Result<(), Error> {

View File

@@ -1,5 +1,5 @@
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::{json, InOMap};
use imbl_value::json;
use tokio::process::Command;
use super::v0_3_5::V0_3_0_COMPAT;

View File

@@ -1,3 +1,5 @@
use std::path::Path;
use exver::{PreReleaseSegment, VersionRange};
use tokio::fs::File;
@@ -12,6 +14,7 @@ use crate::s9pk::v2::SIG_CONTEXT;
use crate::s9pk::S9pk;
use crate::service::LoadDisposition;
use crate::util::io::create_file;
use crate::DATA_DIR;
lazy_static::lazy_static! {
static ref V0_3_6_alpha_8: exver::Version = exver::Version::new(
@@ -40,7 +43,7 @@ impl VersionT for Version {
Ok(())
}
async fn post_up(self, ctx: &crate::context::RpcContext) -> Result<(), Error> {
let s9pk_dir = ctx.datadir.join(PKG_ARCHIVE_DIR).join("installed");
let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed");
if tokio::fs::metadata(&s9pk_dir).await.is_ok() {
let mut read_dir = tokio::fs::read_dir(&s9pk_dir).await?;

View File

@@ -1,10 +1,9 @@
use std::path::{Path, PathBuf};
pub use helpers::script_dir;
use models::PackageId;
pub use models::VolumeId;
use models::{HostId, PackageId};
use crate::net::PACKAGE_CERT_PATH;
use crate::prelude::*;
use crate::util::VersionString;
@@ -36,7 +35,3 @@ pub fn asset_dir<P: AsRef<Path>>(
pub fn backup_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(BACKUP_DIR).join(pkg_id).join("data")
}
pub fn cert_dir(pkg_id: &PackageId, host_id: &HostId) -> PathBuf {
Path::new(PACKAGE_CERT_PATH).join(pkg_id).join(host_id)
}