Merge branch 'next/major' of github.com:Start9Labs/start-os into mcp

This commit is contained in:
Matt Hill
2026-03-17 15:51:59 -06:00
96 changed files with 1628 additions and 600 deletions

22
core/Cargo.lock generated
View File

@@ -3376,6 +3376,15 @@ dependencies = [
"serde_json",
]
[[package]]
name = "keccak"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653"
dependencies = [
"cpufeatures",
]
[[package]]
name = "kv"
version = "0.24.0"
@@ -5985,6 +5994,16 @@ dependencies = [
"digest 0.10.7",
]
[[package]]
name = "sha3"
version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
dependencies = [
"digest 0.10.7",
"keccak",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
@@ -6415,7 +6434,7 @@ dependencies = [
[[package]]
name = "start-os"
version = "0.4.0-alpha.20"
version = "0.4.0-alpha.21"
dependencies = [
"aes",
"async-acme",
@@ -6519,6 +6538,7 @@ dependencies = [
"serde_yml",
"sha-crypt",
"sha2 0.10.9",
"sha3",
"signal-hook",
"socket2 0.6.2",
"socks5-impl",

View File

@@ -15,7 +15,7 @@ license = "MIT"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.20" # VERSION_BUMP
version = "0.4.0-alpha.21" # VERSION_BUMP
[lib]
name = "startos"
@@ -200,6 +200,7 @@ serde_toml = { package = "toml", version = "0.9.9+spec-1.0.0" }
serde_yaml = { package = "serde_yml", version = "0.0.12" }
sha-crypt = "0.5.0"
sha2 = "0.10.2"
sha3 = "0.10"
signal-hook = "0.3.17"
socket2 = { version = "0.6.0", features = ["all"] }
socks5-impl = { version = "0.7.2", features = ["client", "server"] }

View File

@@ -67,6 +67,10 @@ if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT:-}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-cli --target=$TARGET

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -1255,6 +1255,13 @@ backup.bulk.leaked-reference:
fr_FR: "référence fuitée vers BackupMountGuard"
pl_PL: "wyciekła referencja do BackupMountGuard"
backup.bulk.service-not-ready:
en_US: "Cannot create a backup of a service that is still initializing or in an error state"
de_DE: "Es kann keine Sicherung eines Dienstes erstellt werden, der noch initialisiert wird oder sich im Fehlerzustand befindet"
es_ES: "No se puede crear una copia de seguridad de un servicio que aún se está inicializando o está en estado de error"
fr_FR: "Impossible de créer une sauvegarde d'un service encore en cours d'initialisation ou en état d'erreur"
pl_PL: "Nie można utworzyć kopii zapasowej usługi, która jest jeszcze inicjalizowana lub znajduje się w stanie błędu"
# backup/restore.rs
backup.restore.package-error:
en_US: "Error restoring package %{id}: %{error}"

View File

@@ -300,6 +300,15 @@ async fn perform_backup(
error: backup_result,
},
);
} else {
backup_report.insert(
id.clone(),
PackageBackupReport {
error: Some(
t!("backup.bulk.service-not-ready").to_string(),
),
},
);
}
}

View File

@@ -10,6 +10,7 @@ use tracing::instrument;
use ts_rs::TS;
use super::target::BackupTargetId;
use crate::PackageId;
use crate::backup::os::OsBackup;
use crate::context::setup::SetupResult;
use crate::context::{RpcContext, SetupContext};
@@ -26,7 +27,6 @@ use crate::service::service_map::DownloadInstallFuture;
use crate::setup::SetupExecuteProgress;
use crate::system::{save_language, sync_kiosk};
use crate::util::serde::{IoFormat, Pem};
use crate::{PLATFORM, PackageId};
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
@@ -90,7 +90,7 @@ pub async fn recover_full_server(
recovery_source: TmpMountGuard,
server_id: &str,
recovery_password: &str,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
init_phases,
@@ -123,7 +123,6 @@ pub async fn recover_full_server(
os_backup.account.hostname = h;
}
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
let language = ctx.language.peek(|a| a.clone());

View File

@@ -149,6 +149,11 @@ impl MultiExecutable {
}
pub fn execute(&self) {
#[cfg(feature = "backtrace-on-stack-overflow")]
unsafe {
backtrace_on_stack_overflow::enable()
};
set_locale_from_env();
let mut popped = Vec::with_capacity(2);

View File

@@ -190,7 +190,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
}
}
});
rt.shutdown_timeout(Duration::from_secs(60));
rt.shutdown_timeout(Duration::from_millis(100));
res
};

View File

@@ -39,7 +39,7 @@ impl DiagnosticContext {
shutdown,
disk_guid,
error: Arc::new(error.into()),
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
})))
}
}

View File

@@ -32,7 +32,7 @@ impl InitContext {
error: watch::channel(None).0,
progress,
shutdown,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
})))
}
}

View File

@@ -62,8 +62,8 @@ pub struct RpcContextSeed {
pub db: TypedPatchDb<Database>,
pub sync_db: watch::Sender<u64>,
pub account: SyncRwLock<AccountInfo>,
pub net_controller: Arc<NetController>,
pub os_net_service: NetService,
pub net_controller: Arc<NetController>,
pub s9pk_arch: Option<&'static str>,
pub services: ServiceMap,
pub cancellable_installs: SyncMutex<BTreeMap<PackageId, oneshot::Sender<()>>>,
@@ -346,10 +346,10 @@ impl RpcContext {
services,
cancellable_installs: SyncMutex::new(BTreeMap::new()),
metrics_cache,
rpc_continuations: RpcContinuations::new(Some(shutdown.clone())),
shutdown,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_continuations: OpenAuthedContinuations::new(),
rpc_continuations: RpcContinuations::new(),
wifi_manager: Arc::new(RwLock::new(wifi_interface.clone().map(|i| WpaCli::init(i)))),
current_secret: Arc::new(
Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| {

View File

@@ -85,7 +85,7 @@ impl SetupContext {
result: OnceCell::new(),
disk_guid: OnceCell::new(),
shutdown,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
install_rootfs: SyncMutex::new(None),
language: SyncMutex::new(None),
keyboard: SyncMutex::new(None),

View File

@@ -31,7 +31,7 @@ pub struct Database {
impl Database {
pub fn init(
account: &AccountInfo,
kiosk: Option<bool>,
kiosk: bool,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {

View File

@@ -49,7 +49,7 @@ pub struct Public {
impl Public {
pub fn init(
account: &AccountInfo,
kiosk: Option<bool>,
kiosk: bool,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {
@@ -149,7 +149,7 @@ impl Public {
echoip_urls: default_echoip_urls(),
ram: 0,
devices: Vec::new(),
kiosk,
kiosk: Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"),
language,
keyboard,
},

View File

@@ -25,20 +25,28 @@ pub enum RepairStrategy {
Preen,
Aggressive,
}
/// Detects the filesystem type of a block device using `grub-probe`.
/// Returns e.g. `"ext2"` (for ext4), `"btrfs"`, etc.
pub async fn detect_filesystem(
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<String, Error> {
Ok(String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
.to_owned())
}
impl RepairStrategy {
pub async fn fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match &*String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
{
match &*detect_filesystem(&logicalname).await? {
"ext2" => self.e2fsck(logicalname).await,
"btrfs" => self.btrfs_check(logicalname).await,
fs => {

View File

@@ -7,7 +7,7 @@ use rust_i18n::t;
use tokio::process::Command;
use tracing::instrument;
use super::fsck::{RepairStrategy, RequiresReboot};
use super::fsck::{RepairStrategy, RequiresReboot, detect_filesystem};
use super::util::pvscan;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadWrite};
@@ -301,6 +301,37 @@ pub async fn mount_fs<P: AsRef<Path>>(
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
blockdev_path = Path::new("/dev/mapper").join(&full_name);
}
// Convert ext4 → btrfs on the package-data partition if needed
let fs_type = detect_filesystem(&blockdev_path).await?;
if fs_type == "ext2" {
tracing::info!("Running e2fsck before converting {name} from ext4 to btrfs");
Command::new("e2fsck")
.arg("-fy")
.arg(&blockdev_path)
.invoke(ErrorKind::DiskManagement)
.await?;
tracing::info!("Converting {name} from ext4 to btrfs");
Command::new("btrfs-convert")
.arg("--no-progress")
.arg(&blockdev_path)
.invoke(ErrorKind::DiskManagement)
.await?;
// Defragment after conversion for optimal performance
let tmp_mount = datadir.as_ref().join(format!("{name}.convert-tmp"));
tokio::fs::create_dir_all(&tmp_mount).await?;
BlockDev::new(&blockdev_path)
.mount(&tmp_mount, ReadWrite)
.await?;
Command::new("btrfs")
.args(["filesystem", "defragment", "-r"])
.arg(&tmp_mount)
.invoke(ErrorKind::DiskManagement)
.await?;
unmount(&tmp_mount, false).await?;
tokio::fs::remove_dir(&tmp_mount).await?;
}
let reboot = repair.fsck(&blockdev_path).await?;
if !guid.ends_with("_UNENC") {
@@ -342,3 +373,99 @@ pub async fn mount_all_fs<P: AsRef<Path>>(
reboot |= mount_fs(guid, &datadir, "package-data", repair, password).await?;
Ok(reboot)
}
/// Temporarily activates a VG and opens LUKS to probe the `package-data`
/// filesystem type. Returns `None` if probing fails (e.g. LV doesn't exist).
#[instrument(skip_all)]
pub async fn probe_package_data_fs(guid: &str) -> Result<Option<String>, Error> {
// Import and activate the VG
match Command::new("vgimport")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
{
Ok(_) => {}
Err(e)
if format!("{}", e.source)
.lines()
.any(|l| l.trim() == format!("Volume group \"{}\" is not exported", guid)) =>
{
// Already imported, that's fine
}
Err(e) => {
tracing::warn!("Could not import VG {guid} for filesystem probe: {e}");
return Ok(None);
}
}
if let Err(e) = Command::new("vgchange")
.arg("-ay")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
{
tracing::warn!("Could not activate VG {guid} for filesystem probe: {e}");
return Ok(None);
}
let mut opened_luks = false;
let result = async {
let lv_path = Path::new("/dev").join(guid).join("package-data");
if tokio::fs::metadata(&lv_path).await.is_err() {
return Ok(None);
}
let blockdev_path = if !guid.ends_with("_UNENC") {
let full_name = format!("{guid}_package-data");
let password = DEFAULT_PASSWORD;
if let Some(parent) = Path::new(PASSWORD_PATH).parent() {
tokio::fs::create_dir_all(parent).await?;
}
tokio::fs::write(PASSWORD_PATH, password)
.await
.with_ctx(|_| (ErrorKind::Filesystem, PASSWORD_PATH))?;
Command::new("cryptsetup")
.arg("-q")
.arg("luksOpen")
.arg("--allow-discards")
.arg(format!("--key-file={PASSWORD_PATH}"))
.arg(format!("--keyfile-size={}", password.len()))
.arg(&lv_path)
.arg(&full_name)
.invoke(ErrorKind::DiskManagement)
.await?;
let _ = tokio::fs::remove_file(PASSWORD_PATH).await;
opened_luks = true;
PathBuf::from(format!("/dev/mapper/{full_name}"))
} else {
lv_path.clone()
};
detect_filesystem(&blockdev_path).await.map(Some)
}
.await;
// Always clean up: close LUKS, deactivate VG, export VG
if opened_luks {
let full_name = format!("{guid}_package-data");
Command::new("cryptsetup")
.arg("-q")
.arg("luksClose")
.arg(&full_name)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
}
Command::new("vgchange")
.arg("-an")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
Command::new("vgexport")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
result
}

View File

@@ -41,6 +41,7 @@ pub struct DiskInfo {
pub partitions: Vec<PartitionInfo>,
pub capacity: u64,
pub guid: Option<InternedString>,
pub filesystem: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize, ts_rs::TS)]
@@ -55,6 +56,7 @@ pub struct PartitionInfo {
pub used: Option<u64>,
pub start_os: BTreeMap<String, StartOsRecoveryInfo>,
pub guid: Option<InternedString>,
pub filesystem: Option<String>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, ts_rs::TS)]
@@ -374,6 +376,15 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disk_info.capacity = part_info.capacity;
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
if let Some(guid) = g {
disk_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!("Failed to probe filesystem for {guid}: {e}");
None
});
}
} else {
disk_info.partitions = vec![part_info];
}
@@ -384,11 +395,31 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disk_info.partitions = Vec::with_capacity(index.parts.len());
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
if let Some(guid) = g {
disk_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!("Failed to probe filesystem for {guid}: {e}");
None
});
}
} else {
for part in index.parts {
let mut part_info = part_info(part).await;
if let Some(g) = disk_guids.get(&part_info.logicalname) {
part_info.guid = g.clone();
if let Some(guid) = g {
part_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!(
"Failed to probe filesystem for {guid}: {e}"
);
None
});
}
}
disk_info.partitions.push(part_info);
}
@@ -461,6 +492,7 @@ async fn disk_info(disk: PathBuf) -> DiskInfo {
partitions: Vec::new(),
capacity,
guid: None,
filesystem: None,
}
}
@@ -544,6 +576,7 @@ async fn part_info(part: PathBuf) -> PartitionInfo {
used,
start_os,
guid: None,
filesystem: None,
}
}

View File

@@ -174,7 +174,9 @@ pub async fn init(
local_auth.complete();
// Re-enroll MOK on every boot if Secure Boot key exists but isn't enrolled yet
if let Err(e) = crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await {
if let Err(e) =
crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await
{
tracing::warn!("MOK enrollment failed: {e}");
}
@@ -369,7 +371,7 @@ pub async fn init(
enable_zram.complete();
update_server_info.start();
sync_kiosk(server_info.as_kiosk().de()?).await?;
sync_kiosk(server_info.as_kiosk().de()?.unwrap_or(false)).await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let devices = lshw().await?;
let status_info = ServerStatus {

View File

@@ -820,7 +820,6 @@ impl NetService {
break;
}
}
self.shutdown = true;
Ok(())
}
@@ -832,6 +831,7 @@ impl NetService {
impl Drop for NetService {
fn drop(&mut self) {
if !self.shutdown {
self.shutdown = true;
let svc = std::mem::replace(self, Self::dummy());
tokio::spawn(async move { svc.remove_all().await.log_err() });
}

View File

@@ -145,9 +145,10 @@ pub struct GatewayInfo {
pub public: bool,
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[derive(Clone, Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct ServiceInterface {
pub id: ServiceInterfaceId,
pub name: String,

View File

@@ -188,7 +188,7 @@ lazy_static::lazy_static! {
}
fn asn1_time_to_system_time(time: &Asn1TimeRef) -> Result<SystemTime, Error> {
let diff = time.diff(&**ASN1_UNIX_EPOCH)?;
let diff = ASN1_UNIX_EPOCH.diff(time)?;
let mut res = UNIX_EPOCH;
if diff.days >= 0 {
res += Duration::from_secs(diff.days as u64 * 86400);

View File

@@ -509,7 +509,7 @@ where
drop(queue_cell.replace(None));
if !runner.is_empty() {
tokio::time::timeout(Duration::from_secs(60), runner)
tokio::time::timeout(Duration::from_millis(100), runner)
.await
.log_err();
}

View File

@@ -141,7 +141,7 @@ impl RegistryContext {
listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN),
db,
datadir,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {

View File

@@ -17,6 +17,7 @@ use ts_rs::TS;
#[allow(unused_imports)]
use crate::prelude::*;
use crate::shutdown::Shutdown;
use crate::util::future::TimedResource;
use crate::util::net::WebSocket;
use crate::util::{FromStrParser, new_guid};
@@ -98,12 +99,15 @@ pub type RestHandler = Box<dyn FnOnce(Request) -> RestFuture + Send>;
pub struct WebSocketFuture {
kill: Option<broadcast::Receiver<()>>,
shutdown: Option<broadcast::Receiver<Option<Shutdown>>>,
fut: BoxFuture<'static, ()>,
}
impl Future for WebSocketFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.kill.as_ref().map_or(false, |k| !k.is_empty()) {
if self.kill.as_ref().map_or(false, |k| !k.is_empty())
|| self.shutdown.as_ref().map_or(false, |s| !s.is_empty())
{
Poll::Ready(())
} else {
self.fut.poll_unpin(cx)
@@ -138,6 +142,7 @@ impl RpcContinuation {
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill: None,
shutdown: None,
fut: handler(ws.into()).boxed(),
}),
timeout,
@@ -170,6 +175,7 @@ impl RpcContinuation {
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill,
shutdown: None,
fut: handler(ws.into()).boxed(),
}),
timeout,
@@ -183,15 +189,21 @@ impl RpcContinuation {
}
}
pub struct RpcContinuations(AsyncMutex<BTreeMap<Guid, RpcContinuation>>);
pub struct RpcContinuations {
continuations: AsyncMutex<BTreeMap<Guid, RpcContinuation>>,
shutdown: Option<broadcast::Sender<Option<Shutdown>>>,
}
impl RpcContinuations {
pub fn new() -> Self {
RpcContinuations(AsyncMutex::new(BTreeMap::new()))
pub fn new(shutdown: Option<broadcast::Sender<Option<Shutdown>>>) -> Self {
RpcContinuations {
continuations: AsyncMutex::new(BTreeMap::new()),
shutdown,
}
}
#[instrument(skip_all)]
pub async fn clean(&self) {
let mut continuations = self.0.lock().await;
let mut continuations = self.continuations.lock().await;
let mut to_remove = Vec::new();
for (guid, cont) in &*continuations {
if cont.is_timed_out() {
@@ -206,23 +218,28 @@ impl RpcContinuations {
#[instrument(skip_all)]
pub async fn add(&self, guid: Guid, handler: RpcContinuation) {
self.clean().await;
self.0.lock().await.insert(guid, handler);
self.continuations.lock().await.insert(guid, handler);
}
pub async fn get_ws_handler(&self, guid: &Guid) -> Option<WebSocketHandler> {
let mut continuations = self.0.lock().await;
let mut continuations = self.continuations.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
return None;
}
let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else {
return None;
};
x.get().await
let handler = x.get().await?;
let shutdown = self.shutdown.as_ref().map(|s| s.subscribe());
Some(Box::new(move |ws| {
let mut fut = handler(ws);
fut.shutdown = shutdown;
fut
}))
}
pub async fn get_rest_handler(&self, guid: &Guid) -> Option<RestHandler> {
let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap<Guid, RpcContinuation>> =
self.0.lock().await;
let mut continuations = self.continuations.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
return None;
}

View File

@@ -12,18 +12,20 @@ use serde::{Deserialize, Serialize};
use tracing::warn;
use ts_rs::TS;
use crate::db::model::package::PackageState;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::host::Host;
use crate::net::service_interface::ServiceInterface;
use crate::net::ssl::FullchainCertData;
use crate::prelude::*;
use crate::service::effects::context::EffectContext;
use crate::service::effects::net::ssl::Algorithm;
use crate::service::rpc::{CallbackHandle, CallbackId};
use crate::service::{Service, ServiceActorSeed};
use crate::status::StatusInfo;
use crate::util::collections::EqMap;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::sync::SyncMutex;
use crate::status::StatusInfo;
use crate::{GatewayId, HostId, PackageId, ServiceInterfaceId};
/// Abstraction for callbacks that are triggered by patchdb subscriptions.
@@ -66,9 +68,7 @@ impl<K: Ord + Clone + Send + Sync + 'static> DbWatchedCallbacks<K> {
.map(|(_, handlers)| CallbackHandlers(handlers))
.filter(|cb| !cb.0.is_empty())
}) {
let value = watch
.peek_and_mark_seen()
.unwrap_or_default();
let value = watch.peek_and_mark_seen().unwrap_or_default();
if let Err(e) = cbs.call(vector![value]).await {
tracing::error!("Error in {label} callback: {e}");
tracing::debug!("{e:?}");
@@ -99,6 +99,10 @@ pub struct ServiceCallbacks {
inner: SyncMutex<ServiceCallbackMap>,
get_host_info: Arc<DbWatchedCallbacks<(PackageId, HostId)>>,
get_status: Arc<DbWatchedCallbacks<PackageId>>,
get_service_interface: Arc<DbWatchedCallbacks<(PackageId, ServiceInterfaceId)>>,
list_service_interfaces: Arc<DbWatchedCallbacks<PackageId>>,
get_system_smtp: Arc<DbWatchedCallbacks<()>>,
get_service_manifest: Arc<DbWatchedCallbacks<PackageId>>,
}
impl Default for ServiceCallbacks {
@@ -107,21 +111,21 @@ impl Default for ServiceCallbacks {
inner: SyncMutex::new(ServiceCallbackMap::default()),
get_host_info: Arc::new(DbWatchedCallbacks::new("host info")),
get_status: Arc::new(DbWatchedCallbacks::new("get_status")),
get_service_interface: Arc::new(DbWatchedCallbacks::new("get_service_interface")),
list_service_interfaces: Arc::new(DbWatchedCallbacks::new("list_service_interfaces")),
get_system_smtp: Arc::new(DbWatchedCallbacks::new("get_system_smtp")),
get_service_manifest: Arc::new(DbWatchedCallbacks::new("get_service_manifest")),
}
}
}
#[derive(Default)]
struct ServiceCallbackMap {
get_service_interface: BTreeMap<(PackageId, ServiceInterfaceId), Vec<CallbackHandler>>,
list_service_interfaces: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_system_smtp: Vec<CallbackHandler>,
get_ssl_certificate: EqMap<
(BTreeSet<InternedString>, FullchainCertData, Algorithm),
(NonDetachingJoinHandle<()>, Vec<CallbackHandler>),
>,
get_container_ip: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_service_manifest: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_outbound_gateway: BTreeMap<PackageId, (NonDetachingJoinHandle<()>, Vec<CallbackHandler>)>,
}
@@ -132,24 +136,10 @@ impl ServiceCallbacks {
pub fn gc(&self) {
self.mutate(|this| {
this.get_service_interface.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.list_service_interfaces.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_system_smtp
.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
this.get_ssl_certificate.retain(|_, (_, v)| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_service_manifest.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_outbound_gateway.retain(|_, (_, v)| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
@@ -157,70 +147,38 @@ impl ServiceCallbacks {
});
self.get_host_info.gc();
self.get_status.gc();
self.get_service_interface.gc();
self.list_service_interfaces.gc();
self.get_system_smtp.gc();
self.get_service_manifest.gc();
}
pub(super) fn add_get_service_interface(
&self,
package_id: PackageId,
service_interface_id: ServiceInterfaceId,
watch: TypedDbWatch<ServiceInterface>,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.get_service_interface
.entry((package_id, service_interface_id))
.or_default()
.push(handler);
})
self.get_service_interface
.add((package_id, service_interface_id), watch, handler);
}
#[must_use]
pub fn get_service_interface(
&self,
id: &(PackageId, ServiceInterfaceId),
) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.get_service_interface.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_list_service_interfaces(
pub(super) fn add_list_service_interfaces<T: Send + 'static>(
&self,
package_id: PackageId,
watch: TypedDbWatch<T>,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.list_service_interfaces
.entry(package_id)
.or_default()
.push(handler);
})
self.list_service_interfaces.add(package_id, watch, handler);
}
#[must_use]
pub fn list_service_interfaces(&self, id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.list_service_interfaces.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_system_smtp(&self, handler: CallbackHandler) {
self.mutate(|this| {
this.get_system_smtp.push(handler);
})
}
#[must_use]
pub fn get_system_smtp(&self) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(std::mem::take(&mut this.get_system_smtp)))
.filter(|cb| !cb.0.is_empty())
})
pub(super) fn add_get_system_smtp<T: Send + 'static>(
&self,
watch: TypedDbWatch<T>,
handler: CallbackHandler,
) {
self.get_system_smtp.add((), watch, handler);
}
pub(super) fn add_get_host_info(
@@ -376,23 +334,13 @@ impl ServiceCallbacks {
})
}
pub(super) fn add_get_service_manifest(&self, package_id: PackageId, handler: CallbackHandler) {
self.mutate(|this| {
this.get_service_manifest
.entry(package_id)
.or_default()
.push(handler)
})
}
#[must_use]
pub fn get_service_manifest(&self, package_id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
this.get_service_manifest
.remove(package_id)
.map(CallbackHandlers)
.filter(|cb| !cb.0.is_empty())
})
pub(super) fn add_get_service_manifest(
&self,
package_id: PackageId,
watch: TypedDbWatch<PackageState>,
handler: CallbackHandler,
) {
self.get_service_manifest.add(package_id, watch, handler);
}
}

View File

@@ -399,27 +399,38 @@ pub async fn get_service_manifest(
callback,
}: GetServiceManifestParams,
) -> Result<Manifest, Error> {
use crate::db::model::package::PackageState;
let context = context.deref()?;
let ptr = format!("/public/packageData/{}/stateInfo", package_id)
.parse()
.expect("valid json pointer");
let mut watch = context
.seed
.ctx
.db
.watch(ptr)
.await
.typed::<PackageState>();
let manifest = watch
.peek_and_mark_seen()?
.as_manifest(ManifestPreference::Old)
.de()?;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context
.seed
.ctx
.callbacks
.add_get_service_manifest(package_id.clone(), CallbackHandler::new(&context, callback));
.add_get_service_manifest(
package_id.clone(),
watch,
CallbackHandler::new(&context, callback),
);
}
let db = context.seed.ctx.db.peek().await;
let manifest = db
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
.as_state_info()
.as_manifest(ManifestPreference::New)
.de()?;
Ok(manifest)
}

View File

@@ -1,7 +1,5 @@
use std::collections::BTreeMap;
use imbl::vector;
use crate::net::service_interface::{AddressInfo, ServiceInterface, ServiceInterfaceType};
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
@@ -42,7 +40,7 @@ pub async fn export_service_interface(
interface_type: r#type,
};
let res = context
context
.seed
.ctx
.db
@@ -56,27 +54,8 @@ pub async fn export_service_interface(
ifaces.insert(&id, &service_interface)?;
Ok(())
})
.await;
res.result?;
if res.revision.is_some() {
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.get_service_interface(&(package_id.clone(), id))
{
callbacks.call(vector![]).await?;
}
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.list_service_interfaces(&package_id)
{
callbacks.call(vector![]).await?;
}
}
.await
.result?;
Ok(())
}
@@ -101,26 +80,34 @@ pub async fn get_service_interface(
) -> Result<Option<ServiceInterface>, Error> {
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
let db = context.seed.ctx.db.peek().await;
let ptr = format!(
"/public/packageData/{}/serviceInterfaces/{}",
package_id, service_interface_id
)
.parse()
.expect("valid json pointer");
let mut watch = context
.seed
.ctx
.db
.watch(ptr)
.await
.typed::<ServiceInterface>();
let res = watch.peek_and_mark_seen()?.de().ok();
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_get_service_interface(
package_id.clone(),
service_interface_id.clone(),
watch,
CallbackHandler::new(&context, callback),
);
}
let interface = db
.as_public()
.as_package_data()
.as_idx(&package_id)
.and_then(|m| m.as_service_interfaces().as_idx(&service_interface_id))
.map(|m| m.de())
.transpose()?;
Ok(interface)
Ok(res)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
@@ -142,27 +129,23 @@ pub async fn list_service_interfaces(
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
let ptr = format!("/public/packageData/{}/serviceInterfaces", package_id)
.parse()
.expect("valid json pointer");
let mut watch = context.seed.ctx.db.watch(ptr).await;
let res = imbl_value::from_value(watch.peek_and_mark_seen()?)
.unwrap_or_default();
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_list_service_interfaces(
package_id.clone(),
watch.typed::<BTreeMap<ServiceInterfaceId, ServiceInterface>>(),
CallbackHandler::new(&context, callback),
);
}
let res = context
.seed
.ctx
.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&package_id)
.map(|m| m.into_service_interfaces().de())
.transpose()?
.unwrap_or_default();
Ok(res)
}
@@ -180,52 +163,22 @@ pub async fn clear_service_interfaces(
let context = context.deref()?;
let package_id = context.seed.id.clone();
let res = context
context
.seed
.ctx
.db
.mutate(|db| {
let mut removed = Vec::new();
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_service_interfaces_mut()
.mutate(|s| {
Ok(s.retain(|id, _| {
if except.contains(id) {
true
} else {
removed.push(id.clone());
false
}
}))
})?;
Ok(removed)
Ok(s.retain(|id, _| except.contains(id)))
})
})
.await;
let removed = res.result?;
if res.revision.is_some() {
for id in removed {
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.get_service_interface(&(package_id.clone(), id))
{
callbacks.call(vector![]).await?;
}
}
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.list_service_interfaces(&package_id)
{
callbacks.call(vector![]).await?;
}
}
.await
.result?;
Ok(())
}

View File

@@ -16,25 +16,25 @@ pub async fn get_system_smtp(
) -> Result<Option<SmtpValue>, Error> {
let context = context.deref()?;
let ptr = "/public/serverInfo/smtp"
.parse()
.expect("valid json pointer");
let mut watch = context.seed.ctx.db.watch(ptr).await;
let res = imbl_value::from_value(watch.peek_and_mark_seen()?)
.with_kind(ErrorKind::Deserialization)?;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context
.seed
.ctx
.callbacks
.add_get_system_smtp(CallbackHandler::new(&context, callback));
.add_get_system_smtp(
watch.typed::<Option<SmtpValue>>(),
CallbackHandler::new(&context, callback),
);
}
let res = context
.seed
.ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_smtp()
.de()?;
Ok(res)
}

View File

@@ -432,11 +432,15 @@ impl Service {
tracing::error!("Error installing service: {e}");
tracing::debug!("{e:?}")
}) {
crate::volume::remove_install_backup(id).await.log_err();
return Ok(Some(service));
}
}
}
cleanup(ctx, id, false).await.log_err();
crate::volume::restore_volumes_from_install_backup(id)
.await
.log_err();
ctx.db
.mutate(|v| v.as_public_mut().as_package_data_mut().remove(id))
.await
@@ -471,37 +475,60 @@ impl Service {
tracing::error!("Error installing service: {e}");
tracing::debug!("{e:?}")
}) {
crate::volume::remove_install_backup(id).await.log_err();
return Ok(Some(service));
}
}
}
let s9pk = S9pk::open(s9pk_path, Some(id)).await?;
ctx.db
.mutate({
|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_state_info_mut()
.map_mutate(|s| {
if let PackageState::Updating(UpdatingState {
manifest, ..
}) = s
{
Ok(PackageState::Installed(InstalledState { manifest }))
} else {
Err(Error::new(
eyre!("{}", t!("service.mod.race-condition-detected")),
ErrorKind::Database,
))
}
})
}
})
.await
.result?;
handle_installed(s9pk).await
match async {
let s9pk = S9pk::open(s9pk_path, Some(id)).await?;
ctx.db
.mutate({
|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_state_info_mut()
.map_mutate(|s| {
if let PackageState::Updating(UpdatingState {
manifest,
..
}) = s
{
Ok(PackageState::Installed(InstalledState { manifest }))
} else {
Err(Error::new(
eyre!(
"{}",
t!("service.mod.race-condition-detected")
),
ErrorKind::Database,
))
}
})
}
})
.await
.result?;
handle_installed(s9pk).await
}
.await
{
Ok(service) => {
crate::volume::remove_install_backup(id).await.log_err();
Ok(service)
}
Err(e) => {
tracing::error!(
"Update rollback failed for {id}, restoring volume snapshot: {e}"
);
crate::volume::restore_volumes_from_install_backup(id)
.await
.log_err();
Err(e)
}
}
}
PackageStateMatchModelRef::Removing(_) | PackageStateMatchModelRef::Restoring(_) => {
if let Ok(s9pk) = S9pk::open(s9pk_path, Some(id)).await.map_err(|e| {
@@ -650,17 +677,6 @@ impl Service {
tokio::task::yield_now().await;
}
// Trigger manifest callbacks after successful installation
let manifest = service.seed.persistent_container.s9pk.as_manifest();
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
let manifest_value =
serde_json::to_value(manifest).with_kind(ErrorKind::Serialization)?;
callbacks
.call(imbl::vector![manifest_value.into()])
.await
.log_err();
}
Ok(service)
}

View File

@@ -307,6 +307,8 @@ impl ServiceMap {
finalization_progress.start();
let s9pk = S9pk::open(&installed_path, Some(&id)).await?;
let data_version = get_data_version(&id).await?;
// Snapshot existing volumes before install/update modifies them
crate::volume::snapshot_volumes_for_install(&id).await?;
let prev = if let Some(service) = service.take() {
ensure_code!(
recovery_source.is_none(),
@@ -382,6 +384,8 @@ impl ServiceMap {
cleanup.await?;
}
crate::volume::remove_install_backup(&id).await.log_err();
drop(service);
sync_progress_task.await.map_err(|_| {

View File

@@ -1,8 +1,6 @@
use std::collections::BTreeSet;
use std::path::Path;
use imbl::vector;
use crate::context::RpcContext;
use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState};
use crate::net::host::all_hosts;
@@ -94,11 +92,6 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(),
));
}
};
// Trigger manifest callbacks with null to indicate uninstall
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
callbacks.call(vector![Value::Null]).await.log_err();
}
if !soft {
let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id);
crate::util::io::delete_dir(&path).await?;

View File

@@ -115,7 +115,7 @@ pub async fn list_disks(ctx: SetupContext) -> Result<Vec<DiskInfo>, Error> {
async fn setup_init(
ctx: &SetupContext,
password: Option<String>,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
init_phases: InitPhases,
) -> Result<(AccountInfo, InitResult), Error> {
@@ -137,9 +137,8 @@ async fn setup_init(
account.save(m)?;
let info = m.as_public_mut().as_server_info_mut();
info.as_password_hash_mut().ser(&account.password)?;
if let Some(kiosk) = kiosk {
info.as_kiosk_mut().ser(&Some(kiosk))?;
}
info.as_kiosk_mut()
.ser(&Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"))?;
if let Some(language) = language.clone() {
info.as_language_mut().ser(&Some(language))?;
}
@@ -174,8 +173,7 @@ async fn setup_init(
pub struct AttachParams {
pub password: Option<EncryptedWire>,
pub guid: InternedString,
#[ts(optional)]
pub kiosk: Option<bool>,
pub kiosk: bool,
}
#[instrument(skip_all)]
@@ -411,8 +409,7 @@ pub struct SetupExecuteParams {
guid: InternedString,
password: Option<EncryptedWire>,
recovery_source: Option<RecoverySource<EncryptedWire>>,
#[ts(optional)]
kiosk: Option<bool>,
kiosk: bool,
name: Option<InternedString>,
hostname: Option<InternedString>,
}
@@ -549,7 +546,7 @@ pub async fn execute_inner(
guid: InternedString,
password: Option<String>,
recovery_source: Option<RecoverySource<String>>,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
) -> Result<(SetupResult, RpcContext), Error> {
let progress = &ctx.progress;
@@ -622,7 +619,7 @@ async fn fresh_setup(
ctx: &SetupContext,
guid: InternedString,
password: &str,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
init_phases,
@@ -633,7 +630,6 @@ async fn fresh_setup(
let account = AccountInfo::new(password, root_ca_start_time().await, hostname)?;
let db = ctx.db().await?;
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
let language = ctx.language.peek(|a| a.clone());
@@ -684,7 +680,7 @@ async fn recover(
recovery_source: BackupTargetFS,
server_id: String,
recovery_password: String,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
progress: SetupExecuteProgress,
) -> Result<(SetupResult, RpcContext), Error> {
@@ -709,7 +705,7 @@ async fn migrate(
guid: InternedString,
old_guid: &str,
password: Option<String>,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
init_phases,

View File

@@ -6,7 +6,6 @@ use chrono::Utc;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use imbl::vector;
use imbl_value::InternedString;
use rpc_toolkit::{Context, Empty, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -319,13 +318,11 @@ pub fn kernel_logs<C: Context + AsRef<RpcContinuations>>() -> ParentHandler<C, L
const DISABLE_KIOSK_PATH: &str =
"/media/startos/config/overlay/etc/systemd/system/getty@tty1.service.d/autologin.conf";
pub async fn sync_kiosk(kiosk: Option<bool>) -> Result<(), Error> {
if let Some(kiosk) = kiosk {
if kiosk {
enable_kiosk().await?;
} else {
disable_kiosk().await?;
}
pub async fn sync_kiosk(kiosk: bool) -> Result<(), Error> {
if kiosk {
enable_kiosk().await?;
} else {
disable_kiosk().await?;
}
Ok(())
}
@@ -1150,9 +1147,6 @@ pub async fn set_system_smtp(ctx: RpcContext, smtp: SmtpValue) -> Result<(), Err
})
.await
.result?;
if let Some(callbacks) = ctx.callbacks.get_system_smtp() {
callbacks.call(vector![to_value(&smtp)?]).await?;
}
Ok(())
}
pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> {
@@ -1165,9 +1159,6 @@ pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> {
})
.await
.result?;
if let Some(callbacks) = ctx.callbacks.get_system_smtp() {
callbacks.call(vector![Value::Null]).await?;
}
Ok(())
}

View File

@@ -201,7 +201,7 @@ impl TunnelContext {
listen,
db,
datadir,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
open_authed_continuations: OpenAuthedContinuations::new(),
ephemeral_sessions: SyncMutex::new(Sessions::new()),
net_iface,

View File

@@ -13,7 +13,6 @@ use futures::{FutureExt, Stream, StreamExt, ready};
use http::header::CONTENT_LENGTH;
use http::{HeaderMap, StatusCode};
use imbl_value::InternedString;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt};
use tokio::sync::watch;
@@ -23,6 +22,7 @@ use crate::progress::{PhaseProgressTrackerHandle, ProgressUnits};
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::s9pk::merkle_archive::source::multi_cursor_file::{FileCursor, MultiCursorFile};
use crate::util::direct_io::DirectIoFile;
use crate::util::io::{TmpDir, create_file};
pub async fn upload(
@@ -69,16 +69,6 @@ impl Progress {
false
}
}
fn handle_write(&mut self, res: &std::io::Result<usize>) -> bool {
match res {
Ok(a) => {
self.written += *a as u64;
self.tracker += *a as u64;
true
}
Err(e) => self.handle_error(e),
}
}
async fn expected_size(watch: &mut watch::Receiver<Self>) -> Option<u64> {
watch
.wait_for(|progress| progress.error.is_some() || progress.expected_size.is_some())
@@ -192,16 +182,19 @@ impl UploadingFile {
complete: false,
});
let file = create_file(path).await?;
let multi_cursor = MultiCursorFile::open(&file).await?;
let direct_file = DirectIoFile::from_tokio_file(file).await?;
let uploading = Self {
tmp_dir: None,
file: MultiCursorFile::open(&file).await?,
file: multi_cursor,
progress: progress.1,
};
Ok((
UploadHandle {
tmp_dir: None,
file,
file: direct_file,
progress: progress.0,
last_synced: 0,
},
uploading,
))
@@ -346,8 +339,9 @@ impl AsyncSeek for UploadingFileReader {
pub struct UploadHandle {
tmp_dir: Option<Arc<TmpDir>>,
#[pin]
file: File,
file: DirectIoFile,
progress: watch::Sender<Progress>,
last_synced: u64,
}
impl UploadHandle {
pub async fn upload(&mut self, request: Request) {
@@ -394,6 +388,19 @@ impl UploadHandle {
if let Err(e) = self.file.sync_all().await {
self.progress.send_if_modified(|p| p.handle_error(&e));
}
// Update progress with final synced bytes
self.update_sync_progress();
}
fn update_sync_progress(&mut self) {
let synced = self.file.bytes_synced();
let delta = synced - self.last_synced;
if delta > 0 {
self.last_synced = synced;
self.progress.send_modify(|p| {
p.written += delta;
p.tracker += delta;
});
}
}
}
#[pin_project::pinned_drop]
@@ -410,13 +417,23 @@ impl AsyncWrite for UploadHandle {
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
// Update progress based on bytes actually flushed to disk
let synced = this.file.bytes_synced();
let delta = synced - *this.last_synced;
if delta > 0 {
*this.last_synced = synced;
this.progress.send_modify(|p| {
p.written += delta;
p.tracker += delta;
});
}
match this.file.poll_write(cx, buf) {
Poll::Ready(res) => {
Poll::Ready(Err(e)) => {
this.progress
.send_if_modified(|progress| progress.handle_write(&res));
Poll::Ready(res)
.send_if_modified(|progress| progress.handle_error(&e));
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
a => a,
}
}
fn poll_flush(

292
core/src/util/direct_io.rs Normal file
View File

@@ -0,0 +1,292 @@
use std::alloc::Layout;
use std::io::Write;
use std::os::fd::AsRawFd;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::AsyncWrite;
use tokio::task::JoinHandle;
const BLOCK_SIZE: usize = 4096;
const BUF_CAP: usize = 256 * 1024; // 256KB
/// Aligned buffer for O_DIRECT I/O.
struct AlignedBuf {
ptr: *mut u8,
len: usize,
}
// SAFETY: We have exclusive ownership of the allocation.
unsafe impl Send for AlignedBuf {}
impl AlignedBuf {
fn new() -> Self {
let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap();
// SAFETY: layout has non-zero size
let ptr = unsafe { std::alloc::alloc(layout) };
if ptr.is_null() {
std::alloc::handle_alloc_error(layout);
}
Self { ptr, len: 0 }
}
fn as_slice(&self) -> &[u8] {
// SAFETY: ptr is valid for len bytes, properly aligned, exclusively owned
unsafe { std::slice::from_raw_parts(self.ptr, self.len) }
}
fn push(&mut self, data: &[u8]) -> usize {
let n = data.len().min(BUF_CAP - self.len);
// SAFETY: src and dst don't overlap, both valid for n bytes
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), self.ptr.add(self.len), n);
}
self.len += n;
n
}
fn aligned_len(&self) -> usize {
self.len & !(BLOCK_SIZE - 1)
}
fn drain_front(&mut self, n: usize) {
debug_assert!(n <= self.len);
let remaining = self.len - n;
if remaining > 0 {
// SAFETY: regions may overlap, so we use copy (memmove)
unsafe {
std::ptr::copy(self.ptr.add(n), self.ptr, remaining);
}
}
self.len = remaining;
}
/// Extract aligned data into a new buffer for flushing, leaving remainder.
fn take_aligned(&mut self) -> Option<(AlignedBuf, u64)> {
let aligned = self.aligned_len();
if aligned == 0 {
return None;
}
let mut flush_buf = AlignedBuf::new();
flush_buf.push(&self.as_slice()[..aligned]);
self.drain_front(aligned);
Some((flush_buf, aligned as u64))
}
}
impl Drop for AlignedBuf {
fn drop(&mut self) {
let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap();
// SAFETY: ptr was allocated with this layout in new()
unsafe { std::alloc::dealloc(self.ptr, layout) };
}
}
enum FileState {
Idle(std::fs::File),
Flushing(JoinHandle<std::io::Result<(std::fs::File, u64)>>),
Done,
}
/// A file writer that uses O_DIRECT to bypass the kernel page cache.
///
/// Buffers writes in an aligned buffer and flushes to disk in the background.
/// New writes can proceed while a flush is in progress (double-buffering).
/// Progress is tracked via [`bytes_synced`](Self::bytes_synced), which reflects
/// bytes actually written to disk.
pub struct DirectIoFile {
file_state: FileState,
buf: AlignedBuf,
synced: u64,
}
impl DirectIoFile {
fn new(file: std::fs::File) -> Self {
Self {
file_state: FileState::Idle(file),
buf: AlignedBuf::new(),
synced: 0,
}
}
/// Convert an existing tokio File into a DirectIoFile by adding O_DIRECT.
pub async fn from_tokio_file(file: tokio::fs::File) -> std::io::Result<Self> {
let std_file = file.into_std().await;
let fd = std_file.as_raw_fd();
// SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops
unsafe {
let flags = libc::fcntl(fd, libc::F_GETFL);
if flags == -1 {
return Err(std::io::Error::last_os_error());
}
#[cfg(target_os = "linux")]
if libc::fcntl(fd, libc::F_SETFL, flags | libc::O_DIRECT) == -1 {
return Err(std::io::Error::last_os_error());
}
}
Ok(Self::new(std_file))
}
/// Number of bytes confirmed written to disk.
pub fn bytes_synced(&self) -> u64 {
self.synced
}
/// Flush any remaining buffered data and sync to disk.
///
/// Removes the O_DIRECT flag for the final partial-block write, then
/// calls fsync. Updates `bytes_synced` to the final total.
pub async fn sync_all(&mut self) -> std::io::Result<()> {
// Wait for any in-flight flush
self.await_flush().await?;
let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else {
return Ok(());
};
let mut buf = std::mem::replace(&mut self.buf, AlignedBuf::new());
let remaining = buf.len as u64;
tokio::task::spawn_blocking(move || {
let mut file = file;
// Write any aligned portion
let aligned = buf.aligned_len();
if aligned > 0 {
let slice = unsafe { std::slice::from_raw_parts(buf.ptr, aligned) };
file.write_all(slice)?;
buf.drain_front(aligned);
}
// Write remainder with O_DIRECT disabled
if buf.len > 0 {
let fd = file.as_raw_fd();
// SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops
#[cfg(target_os = "linux")]
unsafe {
let flags = libc::fcntl(fd, libc::F_GETFL);
libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_DIRECT);
}
file.write_all(buf.as_slice())?;
}
file.sync_all()
})
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??;
self.synced += remaining;
Ok(())
}
async fn await_flush(&mut self) -> std::io::Result<()> {
if let FileState::Flushing(handle) = &mut self.file_state {
let (file, flushed) = handle
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??;
self.synced += flushed;
self.file_state = FileState::Idle(file);
}
Ok(())
}
/// Non-blocking poll: try to complete a pending flush.
/// Returns Ready(Ok(())) if idle (or just became idle), Pending if still flushing.
fn poll_complete_flush(&mut self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
if let FileState::Flushing(handle) = &mut self.file_state {
match Pin::new(handle).poll(cx) {
Poll::Ready(Ok(Ok((file, flushed)))) => {
self.synced += flushed;
self.file_state = FileState::Idle(file);
}
Poll::Ready(Ok(Err(e))) => {
self.file_state = FileState::Done;
return Poll::Ready(Err(e));
}
Poll::Ready(Err(e)) => {
self.file_state = FileState::Done;
return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::Other, e)));
}
Poll::Pending => return Poll::Pending,
}
}
Poll::Ready(Ok(()))
}
/// Start a background flush of aligned data if the file is idle.
fn maybe_start_flush(&mut self) {
if !matches!(self.file_state, FileState::Idle(_)) {
return;
}
let Some((flush_buf, count)) = self.buf.take_aligned() else {
return;
};
let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else {
unreachable!()
};
let handle = tokio::task::spawn_blocking(move || {
let mut file = file;
file.write_all(flush_buf.as_slice())?;
Ok((file, count))
});
self.file_state = FileState::Flushing(handle);
}
}
impl AsyncWrite for DirectIoFile {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
// Try to complete any pending flush (non-blocking, registers waker)
match self.poll_complete_flush(cx) {
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
_ => {} // Pending is fine — we can still accept writes into the buffer
}
// If file just became idle and buffer has aligned data, start a flush
// to free buffer space before accepting new data
self.maybe_start_flush();
// Accept data into the buffer
let n = self.buf.push(buf);
if n == 0 {
// Buffer full, must wait for flush to complete and free space.
// Waker was already registered by poll_complete_flush above.
return Poll::Pending;
}
// If file is idle and we now have aligned data, start flushing
self.maybe_start_flush();
Poll::Ready(Ok(n))
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.poll_complete_flush(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Ready(Ok(())) => {}
}
if self.buf.aligned_len() > 0 {
self.maybe_start_flush();
// Poll the just-started flush
return self.poll_complete_flush(cx).map(|r| r.map(|_| ()));
}
Poll::Ready(Ok(()))
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.poll_complete_flush(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Ready(Ok(())) => {}
}
self.file_state = FileState::Done;
Poll::Ready(Ok(()))
}
}

View File

@@ -38,6 +38,7 @@ pub mod collections;
pub mod cpupower;
pub mod crypto;
pub mod data_url;
pub mod direct_io;
pub mod future;
pub mod http_reader;
pub mod io;

View File

@@ -60,8 +60,9 @@ mod v0_4_0_alpha_17;
mod v0_4_0_alpha_18;
mod v0_4_0_alpha_19;
mod v0_4_0_alpha_20;
mod v0_4_0_alpha_21;
pub type Current = v0_4_0_alpha_20::Version; // VERSION_BUMP
pub type Current = v0_4_0_alpha_21::Version; // VERSION_BUMP
impl Current {
#[instrument(skip(self, db))]
@@ -189,7 +190,8 @@ enum Version {
V0_4_0_alpha_17(Wrapper<v0_4_0_alpha_17::Version>),
V0_4_0_alpha_18(Wrapper<v0_4_0_alpha_18::Version>),
V0_4_0_alpha_19(Wrapper<v0_4_0_alpha_19::Version>),
V0_4_0_alpha_20(Wrapper<v0_4_0_alpha_20::Version>), // VERSION_BUMP
V0_4_0_alpha_20(Wrapper<v0_4_0_alpha_20::Version>),
V0_4_0_alpha_21(Wrapper<v0_4_0_alpha_21::Version>), // VERSION_BUMP
Other(exver::Version),
}
@@ -252,7 +254,8 @@ impl Version {
Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_18(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_21(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::Other(v) => {
return Err(Error::new(
eyre!("unknown version {v}"),
@@ -307,7 +310,8 @@ impl Version {
Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_18(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_21(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::Other(x) => x.clone(),
}
}

View File

@@ -143,7 +143,8 @@ pub struct Version;
impl VersionT for Version {
type Previous = v0_3_5_2::Version;
type PreUpRes = (AccountInfo, SshKeys, CifsTargets);
/// (package_id, host_id, expanded_key)
type PreUpRes = (AccountInfo, SshKeys, CifsTargets, Vec<(String, String, [u8; 64])>);
fn semver(self) -> exver::Version {
V0_3_6_alpha_0.clone()
}
@@ -158,15 +159,17 @@ impl VersionT for Version {
let cifs = previous_cifs(&pg).await?;
let tor_keys = previous_tor_keys(&pg).await?;
Command::new("systemctl")
.arg("stop")
.arg("postgresql@*.service")
.invoke(crate::ErrorKind::Database)
.await?;
Ok((account, ssh_keys, cifs))
Ok((account, ssh_keys, cifs, tor_keys))
}
fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result<Value, Error> {
fn up(self, db: &mut Value, (account, ssh_keys, cifs, tor_keys): Self::PreUpRes) -> Result<Value, Error> {
let prev_package_data = db["package-data"].clone();
let wifi = json!({
@@ -183,6 +186,11 @@ impl VersionT for Version {
"shuttingDown": db["server-info"]["status-info"]["shutting-down"],
"restarting": db["server-info"]["status-info"]["restarting"],
});
let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?;
let onion_address = tor_address
.replace("https://", "")
.replace("http://", "")
.replace(".onion/", "");
let server_info = {
let mut server_info = json!({
"arch": db["server-info"]["arch"],
@@ -196,15 +204,9 @@ impl VersionT for Version {
});
server_info["postInitMigrationTodos"] = json!({});
let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?;
// Maybe we do this like the Public::init does
server_info["torAddress"] = json!(tor_address);
server_info["onionAddress"] = json!(
tor_address
.replace("https://", "")
.replace("http://", "")
.replace(".onion/", "")
);
server_info["torAddress"] = json!(&tor_address);
server_info["onionAddress"] = json!(&onion_address);
server_info["networkInterfaces"] = json!({});
server_info["statusInfo"] = status_info;
server_info["wifi"] = wifi;
@@ -233,6 +235,30 @@ impl VersionT for Version {
let private = {
let mut value = json!({});
value["keyStore"] = crate::dbg!(to_value(&keystore)?);
// Preserve tor onion keys so later migrations (v0_4_0_alpha_20) can
// include them in onion-migration.json for the tor service.
if !tor_keys.is_empty() {
let mut onion_map: Value = json!({});
let onion_obj = onion_map.as_object_mut().unwrap();
let mut tor_migration = imbl::Vector::<Value>::new();
for (package_id, host_id, key_bytes) in &tor_keys {
let onion_addr = onion_address_from_key(key_bytes);
let encoded_key =
base64::Engine::encode(&crate::util::serde::BASE64, key_bytes);
onion_obj.insert(
onion_addr.as_str().into(),
Value::String(encoded_key.clone().into()),
);
tor_migration.push_back(json!({
"hostname": &onion_addr,
"packageId": package_id,
"hostId": host_id,
"key": &encoded_key,
}));
}
value["keyStore"]["onion"] = onion_map;
value["torMigration"] = Value::Array(tor_migration);
}
value["password"] = to_value(&account.password)?;
value["compatS9pkKey"] =
to_value(&crate::db::model::private::generate_developer_key())?;
@@ -498,3 +524,109 @@ async fn previous_ssh_keys(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<SshKeys, E
};
Ok(ssh_keys)
}
/// Returns `Vec<(package_id, host_id, expanded_key)>`.
/// Server key uses `("STARTOS", "STARTOS")`.
#[tracing::instrument(skip_all)]
async fn previous_tor_keys(
pg: &sqlx::Pool<sqlx::Postgres>,
) -> Result<Vec<(String, String, [u8; 64])>, Error> {
let mut keys = Vec::new();
// Server tor key from the account table.
// Older installs have tor_key (64 bytes). Newer installs (post-NetworkKeys migration)
// made tor_key nullable and use network_key (32 bytes, needs expansion) instead.
let row = sqlx::query(r#"SELECT tor_key, network_key FROM account"#)
.fetch_one(pg)
.await
.with_kind(ErrorKind::Database)?;
if let Ok(tor_key) = row.try_get::<Vec<u8>, _>("tor_key") {
if let Ok(key) = <[u8; 64]>::try_from(tor_key) {
keys.push(("STARTOS".to_owned(), "STARTOS".to_owned(), key));
}
} else if let Ok(net_key) = row.try_get::<Vec<u8>, _>("network_key") {
if let Ok(seed) = <[u8; 32]>::try_from(net_key) {
keys.push((
"STARTOS".to_owned(),
"STARTOS".to_owned(),
crate::util::crypto::ed25519_expand_key(&seed),
));
}
}
// Package tor keys from the network_keys table (32-byte keys that need expansion)
if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM network_keys"#)
.fetch_all(pg)
.await
{
for row in rows {
let Ok(package) = row.try_get::<String, _>("package") else {
continue;
};
let Ok(interface) = row.try_get::<String, _>("interface") else {
continue;
};
let Ok(key_bytes) = row.try_get::<Vec<u8>, _>("key") else {
continue;
};
if let Ok(seed) = <[u8; 32]>::try_from(key_bytes) {
keys.push((
package,
interface,
crate::util::crypto::ed25519_expand_key(&seed),
));
}
}
}
// Package tor keys from the tor table (already 64-byte expanded keys)
if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM tor"#)
.fetch_all(pg)
.await
{
for row in rows {
let Ok(package) = row.try_get::<String, _>("package") else {
continue;
};
let Ok(interface) = row.try_get::<String, _>("interface") else {
continue;
};
let Ok(key_bytes) = row.try_get::<Vec<u8>, _>("key") else {
continue;
};
if let Ok(key) = <[u8; 64]>::try_from(key_bytes) {
keys.push((package, interface, key));
}
}
}
Ok(keys)
}
/// Derive the tor v3 onion address (without .onion suffix) from a 64-byte
/// expanded ed25519 secret key.
fn onion_address_from_key(expanded_key: &[u8; 64]) -> String {
use sha3::Digest;
// Derive public key from expanded secret key using ed25519-dalek v1
let esk =
ed25519_dalek_v1::ExpandedSecretKey::from_bytes(expanded_key).expect("invalid tor key");
let pk = ed25519_dalek_v1::PublicKey::from(&esk);
let pk_bytes = pk.to_bytes();
// Compute onion v3 address: base32(pubkey || checksum || version)
// checksum = SHA3-256(".onion checksum" || pubkey || version)[0..2]
let mut hasher = sha3::Sha3_256::new();
hasher.update(b".onion checksum");
hasher.update(&pk_bytes);
hasher.update(b"\x03");
let hash = hasher.finalize();
let mut raw = [0u8; 35];
raw[..32].copy_from_slice(&pk_bytes);
raw[32] = hash[0]; // checksum byte 0
raw[33] = hash[1]; // checksum byte 1
raw[34] = 0x03; // version
base32::encode(base32::Alphabet::Rfc4648 { padding: false }, &raw).to_ascii_lowercase()
}

View File

@@ -2,11 +2,13 @@ use std::path::Path;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::json;
use reqwest::Url;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_19};
use crate::context::RpcContext;
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_20: exver::Version = exver::Version::new(
@@ -33,74 +35,106 @@ impl VersionT for Version {
}
#[instrument(skip_all)]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
// Extract onion migration data before removing it
let onion_store = db
// Use the pre-built torMigration data from v0_3_6_alpha_0 if available.
// This contains all (hostname, packageId, hostId, key) entries with keys
// already resolved, avoiding the issue where packageData is empty during
// migration (packages aren't reinstalled until post_up).
let migration_data = if let Some(tor_migration) = db
.get("private")
.and_then(|p| p.get("keyStore"))
.and_then(|k| k.get("onion"))
.cloned()
.unwrap_or(Value::Object(Default::default()));
let mut addresses = imbl::Vector::<Value>::new();
// Extract OS host onion addresses
if let Some(onions) = db
.get("public")
.and_then(|p| p.get("serverInfo"))
.and_then(|s| s.get("network"))
.and_then(|n| n.get("host"))
.and_then(|h| h.get("onions"))
.and_then(|o| o.as_array())
.and_then(|p| p.get("torMigration"))
.and_then(|t| t.as_array())
{
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.unwrap_or_default();
addresses.push_back(json!({
"hostname": hostname,
"packageId": "STARTOS",
"hostId": "STARTOS",
"key": key,
}));
json!({
"addresses": tor_migration.clone(),
})
} else {
// Fallback for fresh installs or installs that didn't go through
// v0_3_6_alpha_0 with the torMigration field.
let onion_store = db
.get("private")
.and_then(|p| p.get("keyStore"))
.and_then(|k| k.get("onion"))
.cloned()
.unwrap_or(Value::Object(Default::default()));
let mut addresses = imbl::Vector::<Value>::new();
// Extract OS host onion addresses
if let Some(onions) = db
.get("public")
.and_then(|p| p.get("serverInfo"))
.and_then(|s| s.get("network"))
.and_then(|n| n.get("host"))
.and_then(|h| h.get("onions"))
.and_then(|o| o.as_array())
{
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.ok_or_else(|| {
Error::new(
eyre!("missing tor key for onion address {hostname}"),
ErrorKind::Database,
)
})?;
addresses.push_back(json!({
"hostname": hostname,
"packageId": "STARTOS",
"hostId": "startos-ui",
"key": key,
}));
}
}
}
}
// Extract package host onion addresses
if let Some(packages) = db
.get("public")
.and_then(|p| p.get("packageData"))
.and_then(|p| p.as_object())
{
for (package_id, package) in packages.iter() {
if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) {
for (host_id, host) in hosts.iter() {
if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) {
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.unwrap_or_default();
addresses.push_back(json!({
"hostname": hostname,
"packageId": &**package_id,
"hostId": &**host_id,
"key": key,
}));
// Extract package host onion addresses
if let Some(packages) = db
.get("public")
.and_then(|p| p.get("packageData"))
.and_then(|p| p.as_object())
{
for (package_id, package) in packages.iter() {
if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) {
for (host_id, host) in hosts.iter() {
if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) {
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.ok_or_else(|| {
Error::new(
eyre!(
"missing tor key for onion address {hostname}"
),
ErrorKind::Database,
)
})?;
addresses.push_back(json!({
"hostname": hostname,
"packageId": &**package_id,
"hostId": &**host_id,
"key": key,
}));
}
}
}
}
}
}
}
}
let migration_data = json!({
"addresses": addresses,
});
json!({
"addresses": addresses,
})
};
// Clean up torMigration from private
if let Some(private) = db.get_mut("private").and_then(|p| p.as_object_mut()) {
private.remove("torMigration");
}
// Remove onions and tor-related fields from server host
if let Some(host) = db
@@ -200,7 +234,7 @@ impl VersionT for Version {
}
#[instrument(skip_all)]
async fn post_up(self, _ctx: &RpcContext, input: Value) -> Result<(), Error> {
async fn post_up(self, ctx: &RpcContext, input: Value) -> Result<(), Error> {
let path = Path::new(
"/media/startos/data/package-data/volumes/tor/data/startos/onion-migration.json",
);
@@ -209,6 +243,53 @@ impl VersionT for Version {
crate::util::io::write_file_atomic(path, json).await?;
// Sideload the bundled tor s9pk
let s9pk_path_str = format!("/usr/lib/startos/tor_{}.s9pk", crate::ARCH);
let s9pk_path = Path::new(&s9pk_path_str);
if tokio::fs::metadata(s9pk_path).await.is_ok() {
if let Err(e) = async {
let package_s9pk = tokio::fs::File::open(s9pk_path).await?;
let file = MultiCursorFile::open(&package_s9pk).await?;
let key = ctx.db.peek().await.into_private().into_developer_key();
let registry_url =
Url::parse("https://registry.start9.com/").with_kind(ErrorKind::ParseUrl)?;
ctx.services
.install(
ctx.clone(),
|| crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None),
None,
None::<crate::util::Never>,
None,
)
.await?
.await?
.await?;
// Set the marketplace URL on the installed tor package
let tor_id = "tor".parse::<crate::PackageId>()?;
ctx.db
.mutate(|db| {
if let Some(pkg) =
db.as_public_mut().as_package_data_mut().as_idx_mut(&tor_id)
{
pkg.as_registry_mut().ser(&Some(registry_url))?;
}
Ok(())
})
.await
.result?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error installing tor package: {e}");
tracing::debug!("{e:?}");
}
}
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {

View File

@@ -0,0 +1,37 @@
use exver::{PreReleaseSegment, VersionRange};
use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_20};
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_21: exver::Version = exver::Version::new(
[0, 4, 0],
[PreReleaseSegment::String("alpha".into()), 21.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_4_0_alpha_20::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_4_0_alpha_21.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
#[instrument(skip_all)]
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,13 +1,19 @@
use std::path::{Path, PathBuf};
use tokio::process::Command;
use crate::PackageId;
pub use crate::VolumeId;
use crate::prelude::*;
use crate::util::Invoke;
use crate::util::VersionString;
use crate::DATA_DIR;
pub const PKG_VOLUME_DIR: &str = "package-data/volumes";
pub const BACKUP_DIR: &str = "/media/startos/backups";
const INSTALL_BACKUP_SUFFIX: &str = ".install-backup";
pub fn data_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, volume_id: &VolumeId) -> PathBuf {
datadir
.as_ref()
@@ -33,3 +39,70 @@ pub fn asset_dir<P: AsRef<Path>>(
pub fn backup_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(BACKUP_DIR).join(pkg_id).join("data")
}
fn pkg_volume_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(pkg_id)
}
fn install_backup_path(pkg_id: &PackageId) -> PathBuf {
Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(format!("{pkg_id}{INSTALL_BACKUP_SUFFIX}"))
}
/// Creates a COW snapshot of the package volume directory before install.
/// Uses `cp --reflink=always` so it's instant on btrfs and fails gracefully
/// on ext4 (no backup, current behavior preserved).
/// Returns `true` if a backup was created, `false` if no data existed or
/// the filesystem doesn't support reflinks.
pub async fn snapshot_volumes_for_install(pkg_id: &PackageId) -> Result<bool, Error> {
let src = pkg_volume_dir(pkg_id);
if tokio::fs::metadata(&src).await.is_err() {
return Ok(false);
}
let dst = install_backup_path(pkg_id);
// Remove any stale backup from a previous failed attempt
crate::util::io::delete_dir(&dst).await?;
match Command::new("cp")
.arg("-a")
.arg("--reflink=always")
.arg(&src)
.arg(&dst)
.invoke(ErrorKind::Filesystem)
.await
{
Ok(_) => {
tracing::info!("Created install backup for {pkg_id} at {dst:?}");
Ok(true)
}
Err(e) => {
tracing::warn!(
"Could not create install backup for {pkg_id} \
(filesystem may not support reflinks): {e}"
);
// Clean up partial copy if any
crate::util::io::delete_dir(&dst).await?;
Ok(false)
}
}
}
/// Restores the package volume directory from a COW snapshot after a failed
/// install. The current (possibly corrupted) volume dir is deleted first.
/// No-op if no backup exists.
pub async fn restore_volumes_from_install_backup(pkg_id: &PackageId) -> Result<(), Error> {
let backup = install_backup_path(pkg_id);
if tokio::fs::metadata(&backup).await.is_err() {
return Ok(());
}
let dst = pkg_volume_dir(pkg_id);
crate::util::io::delete_dir(&dst).await?;
crate::util::io::rename(&backup, &dst).await?;
tracing::info!("Restored volumes from install backup for {pkg_id}");
Ok(())
}
/// Removes the install backup after a successful install.
pub async fn remove_install_backup(pkg_id: &PackageId) -> Result<(), Error> {
crate::util::io::delete_dir(&install_backup_path(pkg_id)).await
}