From 65e1c9e5d862eb239ff515756f9d33806285b5b9 Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Mon, 16 Mar 2026 10:50:33 -0600 Subject: [PATCH 1/7] chore: bump sdk to beta.60 --- container-runtime/package-lock.json | 2 +- sdk/CHANGELOG.md | 18 ++++++++++++++++++ sdk/package/package-lock.json | 4 ++-- sdk/package/package.json | 2 +- 4 files changed, 22 insertions(+), 4 deletions(-) diff --git a/container-runtime/package-lock.json b/container-runtime/package-lock.json index 22b581db5..6757e3235 100644 --- a/container-runtime/package-lock.json +++ b/container-runtime/package-lock.json @@ -37,7 +37,7 @@ }, "../sdk/dist": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.60", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/sdk/CHANGELOG.md b/sdk/CHANGELOG.md index 881673e60..38e6d02e2 100644 --- a/sdk/CHANGELOG.md +++ b/sdk/CHANGELOG.md @@ -1,5 +1,23 @@ # Changelog +## 0.4.0-beta.60 — StartOS v0.4.0-alpha.20 (2026-03-16) + +### Added + +- Tunnel TS type exports and port forward labels +- Secure Boot MOK key enrollment fields in `SetupInfo` + +### Changed + +- Consolidated `Watchable` base class with generic `map`/`eq` support; renamed `call` to `fetch` +- Moved `GetServiceManifest` and `GetSslCertificate` from `package/` to `base/` +- Simplified `getServiceInterface`, `getServiceInterfaces`, `GetOutboundGateway`, `GetSystemSmtp`, and `fileHelper` using `Watchable` base class +- Simplified SDK Makefile with rsync + +### Fixed + +- Added `restart_again` flag to `DesiredStatus::Restarting` + ## 0.4.0-beta.59 — StartOS v0.4.0-alpha.20 (2026-03-06) ### Added diff --git a/sdk/package/package-lock.json b/sdk/package/package-lock.json index ea4287eb8..315a1d8ee 100644 --- a/sdk/package/package-lock.json +++ b/sdk/package/package-lock.json @@ -1,12 +1,12 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.60", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.60", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/sdk/package/package.json b/sdk/package/package.json index 2d31ac87e..722867719 100644 --- a/sdk/package/package.json +++ b/sdk/package/package.json @@ -1,6 +1,6 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.60", "description": "Software development kit to facilitate packaging services for StartOS", "main": "./package/lib/index.js", "types": "./package/lib/index.d.ts", From 9f7bc74a1e46997f745b1b3e064b5e3bbf03f3d2 Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Mon, 16 Mar 2026 13:38:27 -0600 Subject: [PATCH 2/7] feat: add bundled tor s9pk download and build infrastructure --- .gitignore | 1 + Makefile | 9 ++++++++- build/download-tor-s9pk.sh | 14 ++++++++++++++ core/Cargo.lock | 20 ++++++++++++++++++++ core/Cargo.toml | 1 + 5 files changed, 44 insertions(+), 1 deletion(-) create mode 100755 build/download-tor-s9pk.sh diff --git a/.gitignore b/.gitignore index 10d8b5424..32c700eef 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ secrets.db tmp web/.i18n-checked docs/USER.md +*.s9pk diff --git a/Makefile b/Makefile index d04824d67..151f19ee6 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,8 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json) -BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) +TOR_S9PK := build/lib/tor_$(ARCH).s9pk +BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) $(TOR_S9PK) IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/) STARTD_SRC := core/startd.service $(BUILD_SRC) CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE) @@ -188,6 +189,9 @@ install: $(STARTOS_TARGETS) $(call mkdir,$(DESTDIR)/lib/systemd/system) $(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service) + if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \ + sed -i '/^Environment=/a Environment=RUST_BACKTRACE=full' $(DESTDIR)/lib/systemd/system/startd.service; \ + fi $(call mkdir,$(DESTDIR)/usr/lib) $(call rm,$(DESTDIR)/usr/lib/startos) @@ -312,6 +316,9 @@ build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(sh $(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE) ./build/download-firmware.sh $(PLATFORM) +$(TOR_S9PK): ./build/download-tor-s9pk.sh + ./build/download-tor-s9pk.sh $(ARCH) + core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE) ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox diff --git a/build/download-tor-s9pk.sh b/build/download-tor-s9pk.sh new file mode 100755 index 000000000..8feb9f597 --- /dev/null +++ b/build/download-tor-s9pk.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +set -e + +ARCH=$1 + +if [ -z "$ARCH" ]; then + >&2 echo "usage: $0 " + exit 1 +fi + +curl --fail -L -o "./lib/tor_${ARCH}.s9pk" "https://s9pks.nyc3.cdn.digitaloceanspaces.com/tor_${ARCH}.s9pk" diff --git a/core/Cargo.lock b/core/Cargo.lock index 10caba090..ef8f9bb3e 100644 --- a/core/Cargo.lock +++ b/core/Cargo.lock @@ -3376,6 +3376,15 @@ dependencies = [ "serde_json", ] +[[package]] +name = "keccak" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +dependencies = [ + "cpufeatures", +] + [[package]] name = "kv" version = "0.24.0" @@ -5985,6 +5994,16 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -6519,6 +6538,7 @@ dependencies = [ "serde_yml", "sha-crypt", "sha2 0.10.9", + "sha3", "signal-hook", "socket2 0.6.2", "socks5-impl", diff --git a/core/Cargo.toml b/core/Cargo.toml index cb1eb3c51..47341d188 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -200,6 +200,7 @@ serde_toml = { package = "toml", version = "0.9.9+spec-1.0.0" } serde_yaml = { package = "serde_yml", version = "0.0.12" } sha-crypt = "0.5.0" sha2 = "0.10.2" +sha3 = "0.10" signal-hook = "0.3.17" socket2 = { version = "0.6.0", features = ["all"] } socks5-impl = { version = "0.7.2", features = ["client", "server"] } From 90d8d39adfdaf18e883cba954c28f8075032d32c Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Mon, 16 Mar 2026 13:38:40 -0600 Subject: [PATCH 3/7] feat: migrate tor onion keys during v0.3.6a0 to v0.4.0a20 upgrade Preserves tor service onion addresses by extracting keys from old database tables and preparing them for inclusion in the new tor service. --- core/src/version/v0_3_6_alpha_0.rs | 154 ++++++++++++++++++++-- core/src/version/v0_4_0_alpha_20.rs | 195 ++++++++++++++++++++-------- 2 files changed, 281 insertions(+), 68 deletions(-) diff --git a/core/src/version/v0_3_6_alpha_0.rs b/core/src/version/v0_3_6_alpha_0.rs index fbae2fc2f..e9c623bad 100644 --- a/core/src/version/v0_3_6_alpha_0.rs +++ b/core/src/version/v0_3_6_alpha_0.rs @@ -143,7 +143,8 @@ pub struct Version; impl VersionT for Version { type Previous = v0_3_5_2::Version; - type PreUpRes = (AccountInfo, SshKeys, CifsTargets); + /// (package_id, host_id, expanded_key) + type PreUpRes = (AccountInfo, SshKeys, CifsTargets, Vec<(String, String, [u8; 64])>); fn semver(self) -> exver::Version { V0_3_6_alpha_0.clone() } @@ -158,15 +159,17 @@ impl VersionT for Version { let cifs = previous_cifs(&pg).await?; + let tor_keys = previous_tor_keys(&pg).await?; + Command::new("systemctl") .arg("stop") .arg("postgresql@*.service") .invoke(crate::ErrorKind::Database) .await?; - Ok((account, ssh_keys, cifs)) + Ok((account, ssh_keys, cifs, tor_keys)) } - fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result { + fn up(self, db: &mut Value, (account, ssh_keys, cifs, tor_keys): Self::PreUpRes) -> Result { let prev_package_data = db["package-data"].clone(); let wifi = json!({ @@ -183,6 +186,11 @@ impl VersionT for Version { "shuttingDown": db["server-info"]["status-info"]["shutting-down"], "restarting": db["server-info"]["status-info"]["restarting"], }); + let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?; + let onion_address = tor_address + .replace("https://", "") + .replace("http://", "") + .replace(".onion/", ""); let server_info = { let mut server_info = json!({ "arch": db["server-info"]["arch"], @@ -196,15 +204,9 @@ impl VersionT for Version { }); server_info["postInitMigrationTodos"] = json!({}); - let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?; // Maybe we do this like the Public::init does - server_info["torAddress"] = json!(tor_address); - server_info["onionAddress"] = json!( - tor_address - .replace("https://", "") - .replace("http://", "") - .replace(".onion/", "") - ); + server_info["torAddress"] = json!(&tor_address); + server_info["onionAddress"] = json!(&onion_address); server_info["networkInterfaces"] = json!({}); server_info["statusInfo"] = status_info; server_info["wifi"] = wifi; @@ -233,6 +235,30 @@ impl VersionT for Version { let private = { let mut value = json!({}); value["keyStore"] = crate::dbg!(to_value(&keystore)?); + // Preserve tor onion keys so later migrations (v0_4_0_alpha_20) can + // include them in onion-migration.json for the tor service. + if !tor_keys.is_empty() { + let mut onion_map: Value = json!({}); + let onion_obj = onion_map.as_object_mut().unwrap(); + let mut tor_migration = imbl::Vector::::new(); + for (package_id, host_id, key_bytes) in &tor_keys { + let onion_addr = onion_address_from_key(key_bytes); + let encoded_key = + base64::Engine::encode(&crate::util::serde::BASE64, key_bytes); + onion_obj.insert( + onion_addr.as_str().into(), + Value::String(encoded_key.clone().into()), + ); + tor_migration.push_back(json!({ + "hostname": &onion_addr, + "packageId": package_id, + "hostId": host_id, + "key": &encoded_key, + })); + } + value["keyStore"]["onion"] = onion_map; + value["torMigration"] = Value::Array(tor_migration); + } value["password"] = to_value(&account.password)?; value["compatS9pkKey"] = to_value(&crate::db::model::private::generate_developer_key())?; @@ -498,3 +524,109 @@ async fn previous_ssh_keys(pg: &sqlx::Pool) -> Result`. +/// Server key uses `("STARTOS", "STARTOS")`. +#[tracing::instrument(skip_all)] +async fn previous_tor_keys( + pg: &sqlx::Pool, +) -> Result, Error> { + let mut keys = Vec::new(); + + // Server tor key from the account table. + // Older installs have tor_key (64 bytes). Newer installs (post-NetworkKeys migration) + // made tor_key nullable and use network_key (32 bytes, needs expansion) instead. + let row = sqlx::query(r#"SELECT tor_key, network_key FROM account"#) + .fetch_one(pg) + .await + .with_kind(ErrorKind::Database)?; + if let Ok(tor_key) = row.try_get::, _>("tor_key") { + if let Ok(key) = <[u8; 64]>::try_from(tor_key) { + keys.push(("STARTOS".to_owned(), "STARTOS".to_owned(), key)); + } + } else if let Ok(net_key) = row.try_get::, _>("network_key") { + if let Ok(seed) = <[u8; 32]>::try_from(net_key) { + keys.push(( + "STARTOS".to_owned(), + "STARTOS".to_owned(), + crate::util::crypto::ed25519_expand_key(&seed), + )); + } + } + + // Package tor keys from the network_keys table (32-byte keys that need expansion) + if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM network_keys"#) + .fetch_all(pg) + .await + { + for row in rows { + let Ok(package) = row.try_get::("package") else { + continue; + }; + let Ok(interface) = row.try_get::("interface") else { + continue; + }; + let Ok(key_bytes) = row.try_get::, _>("key") else { + continue; + }; + if let Ok(seed) = <[u8; 32]>::try_from(key_bytes) { + keys.push(( + package, + interface, + crate::util::crypto::ed25519_expand_key(&seed), + )); + } + } + } + + // Package tor keys from the tor table (already 64-byte expanded keys) + if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM tor"#) + .fetch_all(pg) + .await + { + for row in rows { + let Ok(package) = row.try_get::("package") else { + continue; + }; + let Ok(interface) = row.try_get::("interface") else { + continue; + }; + let Ok(key_bytes) = row.try_get::, _>("key") else { + continue; + }; + if let Ok(key) = <[u8; 64]>::try_from(key_bytes) { + keys.push((package, interface, key)); + } + } + } + + Ok(keys) +} + +/// Derive the tor v3 onion address (without .onion suffix) from a 64-byte +/// expanded ed25519 secret key. +fn onion_address_from_key(expanded_key: &[u8; 64]) -> String { + use sha3::Digest; + + // Derive public key from expanded secret key using ed25519-dalek v1 + let esk = + ed25519_dalek_v1::ExpandedSecretKey::from_bytes(expanded_key).expect("invalid tor key"); + let pk = ed25519_dalek_v1::PublicKey::from(&esk); + let pk_bytes = pk.to_bytes(); + + // Compute onion v3 address: base32(pubkey || checksum || version) + // checksum = SHA3-256(".onion checksum" || pubkey || version)[0..2] + let mut hasher = sha3::Sha3_256::new(); + hasher.update(b".onion checksum"); + hasher.update(&pk_bytes); + hasher.update(b"\x03"); + let hash = hasher.finalize(); + + let mut raw = [0u8; 35]; + raw[..32].copy_from_slice(&pk_bytes); + raw[32] = hash[0]; // checksum byte 0 + raw[33] = hash[1]; // checksum byte 1 + raw[34] = 0x03; // version + + base32::encode(base32::Alphabet::Rfc4648 { padding: false }, &raw).to_ascii_lowercase() +} diff --git a/core/src/version/v0_4_0_alpha_20.rs b/core/src/version/v0_4_0_alpha_20.rs index 62b454bb1..01bff2251 100644 --- a/core/src/version/v0_4_0_alpha_20.rs +++ b/core/src/version/v0_4_0_alpha_20.rs @@ -2,11 +2,13 @@ use std::path::Path; use exver::{PreReleaseSegment, VersionRange}; use imbl_value::json; +use reqwest::Url; use super::v0_3_5::V0_3_0_COMPAT; use super::{VersionT, v0_4_0_alpha_19}; use crate::context::RpcContext; use crate::prelude::*; +use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile; lazy_static::lazy_static! { static ref V0_4_0_alpha_20: exver::Version = exver::Version::new( @@ -33,74 +35,106 @@ impl VersionT for Version { } #[instrument(skip_all)] fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result { - // Extract onion migration data before removing it - let onion_store = db + // Use the pre-built torMigration data from v0_3_6_alpha_0 if available. + // This contains all (hostname, packageId, hostId, key) entries with keys + // already resolved, avoiding the issue where packageData is empty during + // migration (packages aren't reinstalled until post_up). + let migration_data = if let Some(tor_migration) = db .get("private") - .and_then(|p| p.get("keyStore")) - .and_then(|k| k.get("onion")) - .cloned() - .unwrap_or(Value::Object(Default::default())); - - let mut addresses = imbl::Vector::::new(); - - // Extract OS host onion addresses - if let Some(onions) = db - .get("public") - .and_then(|p| p.get("serverInfo")) - .and_then(|s| s.get("network")) - .and_then(|n| n.get("host")) - .and_then(|h| h.get("onions")) - .and_then(|o| o.as_array()) + .and_then(|p| p.get("torMigration")) + .and_then(|t| t.as_array()) { - for onion in onions { - if let Some(hostname) = onion.as_str() { - let key = onion_store - .get(hostname) - .and_then(|v| v.as_str()) - .unwrap_or_default(); - addresses.push_back(json!({ - "hostname": hostname, - "packageId": "STARTOS", - "hostId": "STARTOS", - "key": key, - })); + json!({ + "addresses": tor_migration.clone(), + }) + } else { + // Fallback for fresh installs or installs that didn't go through + // v0_3_6_alpha_0 with the torMigration field. + let onion_store = db + .get("private") + .and_then(|p| p.get("keyStore")) + .and_then(|k| k.get("onion")) + .cloned() + .unwrap_or(Value::Object(Default::default())); + + let mut addresses = imbl::Vector::::new(); + + // Extract OS host onion addresses + if let Some(onions) = db + .get("public") + .and_then(|p| p.get("serverInfo")) + .and_then(|s| s.get("network")) + .and_then(|n| n.get("host")) + .and_then(|h| h.get("onions")) + .and_then(|o| o.as_array()) + { + for onion in onions { + if let Some(hostname) = onion.as_str() { + let key = onion_store + .get(hostname) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Error::new( + eyre!("missing tor key for onion address {hostname}"), + ErrorKind::Database, + ) + })?; + addresses.push_back(json!({ + "hostname": hostname, + "packageId": "STARTOS", + "hostId": "startos-ui", + "key": key, + })); + } } } - } - // Extract package host onion addresses - if let Some(packages) = db - .get("public") - .and_then(|p| p.get("packageData")) - .and_then(|p| p.as_object()) - { - for (package_id, package) in packages.iter() { - if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) { - for (host_id, host) in hosts.iter() { - if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) { - for onion in onions { - if let Some(hostname) = onion.as_str() { - let key = onion_store - .get(hostname) - .and_then(|v| v.as_str()) - .unwrap_or_default(); - addresses.push_back(json!({ - "hostname": hostname, - "packageId": &**package_id, - "hostId": &**host_id, - "key": key, - })); + // Extract package host onion addresses + if let Some(packages) = db + .get("public") + .and_then(|p| p.get("packageData")) + .and_then(|p| p.as_object()) + { + for (package_id, package) in packages.iter() { + if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) { + for (host_id, host) in hosts.iter() { + if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) { + for onion in onions { + if let Some(hostname) = onion.as_str() { + let key = onion_store + .get(hostname) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Error::new( + eyre!( + "missing tor key for onion address {hostname}" + ), + ErrorKind::Database, + ) + })?; + addresses.push_back(json!({ + "hostname": hostname, + "packageId": &**package_id, + "hostId": &**host_id, + "key": key, + })); + } } } } } } } - } - let migration_data = json!({ - "addresses": addresses, - }); + json!({ + "addresses": addresses, + }) + }; + + // Clean up torMigration from private + if let Some(private) = db.get_mut("private").and_then(|p| p.as_object_mut()) { + private.remove("torMigration"); + } // Remove onions and tor-related fields from server host if let Some(host) = db @@ -200,7 +234,7 @@ impl VersionT for Version { } #[instrument(skip_all)] - async fn post_up(self, _ctx: &RpcContext, input: Value) -> Result<(), Error> { + async fn post_up(self, ctx: &RpcContext, input: Value) -> Result<(), Error> { let path = Path::new( "/media/startos/data/package-data/volumes/tor/data/startos/onion-migration.json", ); @@ -209,6 +243,53 @@ impl VersionT for Version { crate::util::io::write_file_atomic(path, json).await?; + // Sideload the bundled tor s9pk + let s9pk_path_str = format!("/usr/lib/startos/tor_{}.s9pk", crate::ARCH); + let s9pk_path = Path::new(&s9pk_path_str); + if tokio::fs::metadata(s9pk_path).await.is_ok() { + if let Err(e) = async { + let package_s9pk = tokio::fs::File::open(s9pk_path).await?; + let file = MultiCursorFile::open(&package_s9pk).await?; + + let key = ctx.db.peek().await.into_private().into_developer_key(); + let registry_url = + Url::parse("https://registry.start9.com/").with_kind(ErrorKind::ParseUrl)?; + + ctx.services + .install( + ctx.clone(), + || crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None), + None, + None::, + None, + ) + .await? + .await? + .await?; + + // Set the marketplace URL on the installed tor package + let tor_id = "tor".parse::()?; + ctx.db + .mutate(|db| { + if let Some(pkg) = + db.as_public_mut().as_package_data_mut().as_idx_mut(&tor_id) + { + pkg.as_registry_mut().ser(&Some(registry_url))?; + } + Ok(()) + }) + .await + .result?; + + Ok::<_, Error>(()) + } + .await + { + tracing::error!("Error installing tor package: {e}"); + tracing::debug!("{e:?}"); + } + } + Ok(()) } fn down(self, _db: &mut Value) -> Result<(), Error> { From 8562e1e19d47807e1a6c23dde5d17a33f7bb933d Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Mon, 16 Mar 2026 13:39:04 -0600 Subject: [PATCH 4/7] refactor: change kiosk parameter from Option to bool Simplifies the setup API by making kiosk mandatory at the protocol level, with platform-specific filtering applied at the database layer. --- core/src/backup/restore.rs | 5 ++--- core/src/db/model/mod.rs | 2 +- core/src/db/model/public.rs | 4 ++-- core/src/init.rs | 6 +++-- core/src/setup.rs | 22 ++++++++----------- core/src/system/mod.rs | 12 +++++----- sdk/base/lib/osBindings/AttachParams.ts | 2 +- sdk/base/lib/osBindings/SetupExecuteParams.ts | 2 +- .../src/app/services/state.service.ts | 2 ++ 9 files changed, 27 insertions(+), 30 deletions(-) diff --git a/core/src/backup/restore.rs b/core/src/backup/restore.rs index bc96d8823..77e7181f5 100644 --- a/core/src/backup/restore.rs +++ b/core/src/backup/restore.rs @@ -10,6 +10,7 @@ use tracing::instrument; use ts_rs::TS; use super::target::BackupTargetId; +use crate::PackageId; use crate::backup::os::OsBackup; use crate::context::setup::SetupResult; use crate::context::{RpcContext, SetupContext}; @@ -26,7 +27,6 @@ use crate::service::service_map::DownloadInstallFuture; use crate::setup::SetupExecuteProgress; use crate::system::{save_language, sync_kiosk}; use crate::util::serde::{IoFormat, Pem}; -use crate::{PLATFORM, PackageId}; #[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] @@ -90,7 +90,7 @@ pub async fn recover_full_server( recovery_source: TmpMountGuard, server_id: &str, recovery_password: &str, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, @@ -123,7 +123,6 @@ pub async fn recover_full_server( os_backup.account.hostname = h; } - let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi"); sync_kiosk(kiosk).await?; let language = ctx.language.peek(|a| a.clone()); diff --git a/core/src/db/model/mod.rs b/core/src/db/model/mod.rs index 05fc8502d..1cdc7f9bf 100644 --- a/core/src/db/model/mod.rs +++ b/core/src/db/model/mod.rs @@ -31,7 +31,7 @@ pub struct Database { impl Database { pub fn init( account: &AccountInfo, - kiosk: Option, + kiosk: bool, language: Option, keyboard: Option, ) -> Result { diff --git a/core/src/db/model/public.rs b/core/src/db/model/public.rs index a07375adf..9477dac82 100644 --- a/core/src/db/model/public.rs +++ b/core/src/db/model/public.rs @@ -49,7 +49,7 @@ pub struct Public { impl Public { pub fn init( account: &AccountInfo, - kiosk: Option, + kiosk: bool, language: Option, keyboard: Option, ) -> Result { @@ -149,7 +149,7 @@ impl Public { echoip_urls: default_echoip_urls(), ram: 0, devices: Vec::new(), - kiosk, + kiosk: Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"), language, keyboard, }, diff --git a/core/src/init.rs b/core/src/init.rs index fe5f334f2..ce489c0e9 100644 --- a/core/src/init.rs +++ b/core/src/init.rs @@ -174,7 +174,9 @@ pub async fn init( local_auth.complete(); // Re-enroll MOK on every boot if Secure Boot key exists but isn't enrolled yet - if let Err(e) = crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await { + if let Err(e) = + crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await + { tracing::warn!("MOK enrollment failed: {e}"); } @@ -369,7 +371,7 @@ pub async fn init( enable_zram.complete(); update_server_info.start(); - sync_kiosk(server_info.as_kiosk().de()?).await?; + sync_kiosk(server_info.as_kiosk().de()?.unwrap_or(false)).await?; let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024; let devices = lshw().await?; let status_info = ServerStatus { diff --git a/core/src/setup.rs b/core/src/setup.rs index 1b33268b3..e6aa68212 100644 --- a/core/src/setup.rs +++ b/core/src/setup.rs @@ -115,7 +115,7 @@ pub async fn list_disks(ctx: SetupContext) -> Result, Error> { async fn setup_init( ctx: &SetupContext, password: Option, - kiosk: Option, + kiosk: bool, hostname: Option, init_phases: InitPhases, ) -> Result<(AccountInfo, InitResult), Error> { @@ -137,9 +137,8 @@ async fn setup_init( account.save(m)?; let info = m.as_public_mut().as_server_info_mut(); info.as_password_hash_mut().ser(&account.password)?; - if let Some(kiosk) = kiosk { - info.as_kiosk_mut().ser(&Some(kiosk))?; - } + info.as_kiosk_mut() + .ser(&Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"))?; if let Some(language) = language.clone() { info.as_language_mut().ser(&Some(language))?; } @@ -174,8 +173,7 @@ async fn setup_init( pub struct AttachParams { pub password: Option, pub guid: InternedString, - #[ts(optional)] - pub kiosk: Option, + pub kiosk: bool, } #[instrument(skip_all)] @@ -411,8 +409,7 @@ pub struct SetupExecuteParams { guid: InternedString, password: Option, recovery_source: Option>, - #[ts(optional)] - kiosk: Option, + kiosk: bool, name: Option, hostname: Option, } @@ -549,7 +546,7 @@ pub async fn execute_inner( guid: InternedString, password: Option, recovery_source: Option>, - kiosk: Option, + kiosk: bool, hostname: Option, ) -> Result<(SetupResult, RpcContext), Error> { let progress = &ctx.progress; @@ -622,7 +619,7 @@ async fn fresh_setup( ctx: &SetupContext, guid: InternedString, password: &str, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, @@ -633,7 +630,6 @@ async fn fresh_setup( let account = AccountInfo::new(password, root_ca_start_time().await, hostname)?; let db = ctx.db().await?; - let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi"); sync_kiosk(kiosk).await?; let language = ctx.language.peek(|a| a.clone()); @@ -684,7 +680,7 @@ async fn recover( recovery_source: BackupTargetFS, server_id: String, recovery_password: String, - kiosk: Option, + kiosk: bool, hostname: Option, progress: SetupExecuteProgress, ) -> Result<(SetupResult, RpcContext), Error> { @@ -709,7 +705,7 @@ async fn migrate( guid: InternedString, old_guid: &str, password: Option, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, diff --git a/core/src/system/mod.rs b/core/src/system/mod.rs index 9caf9687b..893a45df9 100644 --- a/core/src/system/mod.rs +++ b/core/src/system/mod.rs @@ -319,13 +319,11 @@ pub fn kernel_logs>() -> ParentHandler) -> Result<(), Error> { - if let Some(kiosk) = kiosk { - if kiosk { - enable_kiosk().await?; - } else { - disable_kiosk().await?; - } +pub async fn sync_kiosk(kiosk: bool) -> Result<(), Error> { + if kiosk { + enable_kiosk().await?; + } else { + disable_kiosk().await?; } Ok(()) } diff --git a/sdk/base/lib/osBindings/AttachParams.ts b/sdk/base/lib/osBindings/AttachParams.ts index 31283fec6..e469833c2 100644 --- a/sdk/base/lib/osBindings/AttachParams.ts +++ b/sdk/base/lib/osBindings/AttachParams.ts @@ -4,5 +4,5 @@ import type { EncryptedWire } from './EncryptedWire' export type AttachParams = { password: EncryptedWire | null guid: string - kiosk?: boolean + kiosk: boolean } diff --git a/sdk/base/lib/osBindings/SetupExecuteParams.ts b/sdk/base/lib/osBindings/SetupExecuteParams.ts index 69f358c54..a1581c752 100644 --- a/sdk/base/lib/osBindings/SetupExecuteParams.ts +++ b/sdk/base/lib/osBindings/SetupExecuteParams.ts @@ -6,7 +6,7 @@ export type SetupExecuteParams = { guid: string password: EncryptedWire | null recoverySource: RecoverySource | null - kiosk?: boolean + kiosk: boolean name: string | null hostname: string | null } diff --git a/web/projects/setup-wizard/src/app/services/state.service.ts b/web/projects/setup-wizard/src/app/services/state.service.ts index 4a5b5090f..6f923c544 100644 --- a/web/projects/setup-wizard/src/app/services/state.service.ts +++ b/web/projects/setup-wizard/src/app/services/state.service.ts @@ -72,6 +72,7 @@ export class StateService { await this.api.attach({ guid: this.dataDriveGuid, password: password ? await this.api.encrypt(password) : null, + kiosk: this.kiosk, }) } @@ -106,6 +107,7 @@ export class StateService { name, hostname, recoverySource, + kiosk: this.kiosk, }) } From 3ae24e63e2bb5e2b4d541d9ef2bc4a6f5c08b4bb Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Mon, 16 Mar 2026 13:39:35 -0600 Subject: [PATCH 5/7] perf: add O_DIRECT uploads and stabilize RPC continuation shutdown Implements DirectIoFile for faster package uploads by bypassing page cache. Refactors RpcContinuations to support graceful WebSocket shutdown via broadcast signal, improving stability during daemon restart. --- core/src/context/diagnostic.rs | 2 +- core/src/context/init.rs | 2 +- core/src/context/rpc.rs | 4 +- core/src/context/setup.rs | 2 +- core/src/registry/context.rs | 2 +- core/src/rpc_continuations.rs | 37 ++-- core/src/tunnel/context.rs | 2 +- core/src/upload.rs | 53 ++++-- core/src/util/direct_io.rs | 298 +++++++++++++++++++++++++++++++++ core/src/util/mod.rs | 1 + 10 files changed, 368 insertions(+), 35 deletions(-) create mode 100644 core/src/util/direct_io.rs diff --git a/core/src/context/diagnostic.rs b/core/src/context/diagnostic.rs index c069d017f..bf27da071 100644 --- a/core/src/context/diagnostic.rs +++ b/core/src/context/diagnostic.rs @@ -39,7 +39,7 @@ impl DiagnosticContext { shutdown, disk_guid, error: Arc::new(error.into()), - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), }))) } } diff --git a/core/src/context/init.rs b/core/src/context/init.rs index b7d5eac6a..5f6c35222 100644 --- a/core/src/context/init.rs +++ b/core/src/context/init.rs @@ -32,7 +32,7 @@ impl InitContext { error: watch::channel(None).0, progress, shutdown, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), }))) } } diff --git a/core/src/context/rpc.rs b/core/src/context/rpc.rs index 61ce35020..204b000b5 100644 --- a/core/src/context/rpc.rs +++ b/core/src/context/rpc.rs @@ -62,8 +62,8 @@ pub struct RpcContextSeed { pub db: TypedPatchDb, pub sync_db: watch::Sender, pub account: SyncRwLock, - pub net_controller: Arc, pub os_net_service: NetService, + pub net_controller: Arc, pub s9pk_arch: Option<&'static str>, pub services: ServiceMap, pub cancellable_installs: SyncMutex>>, @@ -346,10 +346,10 @@ impl RpcContext { services, cancellable_installs: SyncMutex::new(BTreeMap::new()), metrics_cache, + rpc_continuations: RpcContinuations::new(Some(shutdown.clone())), shutdown, lxc_manager: Arc::new(LxcManager::new()), open_authed_continuations: OpenAuthedContinuations::new(), - rpc_continuations: RpcContinuations::new(), wifi_manager: Arc::new(RwLock::new(wifi_interface.clone().map(|i| WpaCli::init(i)))), current_secret: Arc::new( Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| { diff --git a/core/src/context/setup.rs b/core/src/context/setup.rs index d4d0bb9de..3d16624ef 100644 --- a/core/src/context/setup.rs +++ b/core/src/context/setup.rs @@ -85,7 +85,7 @@ impl SetupContext { result: OnceCell::new(), disk_guid: OnceCell::new(), shutdown, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), install_rootfs: SyncMutex::new(None), language: SyncMutex::new(None), keyboard: SyncMutex::new(None), diff --git a/core/src/registry/context.rs b/core/src/registry/context.rs index c9773d14b..2aa5739f2 100644 --- a/core/src/registry/context.rs +++ b/core/src/registry/context.rs @@ -141,7 +141,7 @@ impl RegistryContext { listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN), db, datadir, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), client: Client::builder() .proxy(Proxy::custom(move |url| { if url.host_str().map_or(false, |h| h.ends_with(".onion")) { diff --git a/core/src/rpc_continuations.rs b/core/src/rpc_continuations.rs index 42c3ae858..e084264ab 100644 --- a/core/src/rpc_continuations.rs +++ b/core/src/rpc_continuations.rs @@ -17,6 +17,7 @@ use ts_rs::TS; #[allow(unused_imports)] use crate::prelude::*; +use crate::shutdown::Shutdown; use crate::util::future::TimedResource; use crate::util::net::WebSocket; use crate::util::{FromStrParser, new_guid}; @@ -98,12 +99,15 @@ pub type RestHandler = Box RestFuture + Send>; pub struct WebSocketFuture { kill: Option>, + shutdown: Option>>, fut: BoxFuture<'static, ()>, } impl Future for WebSocketFuture { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.kill.as_ref().map_or(false, |k| !k.is_empty()) { + if self.kill.as_ref().map_or(false, |k| !k.is_empty()) + || self.shutdown.as_ref().map_or(false, |s| !s.is_empty()) + { Poll::Ready(()) } else { self.fut.poll_unpin(cx) @@ -138,6 +142,7 @@ impl RpcContinuation { RpcContinuation::WebSocket(TimedResource::new( Box::new(|ws| WebSocketFuture { kill: None, + shutdown: None, fut: handler(ws.into()).boxed(), }), timeout, @@ -170,6 +175,7 @@ impl RpcContinuation { RpcContinuation::WebSocket(TimedResource::new( Box::new(|ws| WebSocketFuture { kill, + shutdown: None, fut: handler(ws.into()).boxed(), }), timeout, @@ -183,15 +189,21 @@ impl RpcContinuation { } } -pub struct RpcContinuations(AsyncMutex>); +pub struct RpcContinuations { + continuations: AsyncMutex>, + shutdown: Option>>, +} impl RpcContinuations { - pub fn new() -> Self { - RpcContinuations(AsyncMutex::new(BTreeMap::new())) + pub fn new(shutdown: Option>>) -> Self { + RpcContinuations { + continuations: AsyncMutex::new(BTreeMap::new()), + shutdown, + } } #[instrument(skip_all)] pub async fn clean(&self) { - let mut continuations = self.0.lock().await; + let mut continuations = self.continuations.lock().await; let mut to_remove = Vec::new(); for (guid, cont) in &*continuations { if cont.is_timed_out() { @@ -206,23 +218,28 @@ impl RpcContinuations { #[instrument(skip_all)] pub async fn add(&self, guid: Guid, handler: RpcContinuation) { self.clean().await; - self.0.lock().await.insert(guid, handler); + self.continuations.lock().await.insert(guid, handler); } pub async fn get_ws_handler(&self, guid: &Guid) -> Option { - let mut continuations = self.0.lock().await; + let mut continuations = self.continuations.lock().await; if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) { return None; } let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else { return None; }; - x.get().await + let handler = x.get().await?; + let shutdown = self.shutdown.as_ref().map(|s| s.subscribe()); + Some(Box::new(move |ws| { + let mut fut = handler(ws); + fut.shutdown = shutdown; + fut + })) } pub async fn get_rest_handler(&self, guid: &Guid) -> Option { - let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap> = - self.0.lock().await; + let mut continuations = self.continuations.lock().await; if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) { return None; } diff --git a/core/src/tunnel/context.rs b/core/src/tunnel/context.rs index 1cb23c49e..0d6ab5df8 100644 --- a/core/src/tunnel/context.rs +++ b/core/src/tunnel/context.rs @@ -201,7 +201,7 @@ impl TunnelContext { listen, db, datadir, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), open_authed_continuations: OpenAuthedContinuations::new(), ephemeral_sessions: SyncMutex::new(Sessions::new()), net_iface, diff --git a/core/src/upload.rs b/core/src/upload.rs index 5812834da..96f82b812 100644 --- a/core/src/upload.rs +++ b/core/src/upload.rs @@ -13,7 +13,6 @@ use futures::{FutureExt, Stream, StreamExt, ready}; use http::header::CONTENT_LENGTH; use http::{HeaderMap, StatusCode}; use imbl_value::InternedString; -use tokio::fs::File; use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt}; use tokio::sync::watch; @@ -23,6 +22,7 @@ use crate::progress::{PhaseProgressTrackerHandle, ProgressUnits}; use crate::rpc_continuations::{Guid, RpcContinuation}; use crate::s9pk::merkle_archive::source::ArchiveSource; use crate::s9pk::merkle_archive::source::multi_cursor_file::{FileCursor, MultiCursorFile}; +use crate::util::direct_io::DirectIoFile; use crate::util::io::{TmpDir, create_file}; pub async fn upload( @@ -69,16 +69,6 @@ impl Progress { false } } - fn handle_write(&mut self, res: &std::io::Result) -> bool { - match res { - Ok(a) => { - self.written += *a as u64; - self.tracker += *a as u64; - true - } - Err(e) => self.handle_error(e), - } - } async fn expected_size(watch: &mut watch::Receiver) -> Option { watch .wait_for(|progress| progress.error.is_some() || progress.expected_size.is_some()) @@ -192,16 +182,19 @@ impl UploadingFile { complete: false, }); let file = create_file(path).await?; + let multi_cursor = MultiCursorFile::open(&file).await?; + let direct_file = DirectIoFile::from_tokio_file(file).await?; let uploading = Self { tmp_dir: None, - file: MultiCursorFile::open(&file).await?, + file: multi_cursor, progress: progress.1, }; Ok(( UploadHandle { tmp_dir: None, - file, + file: direct_file, progress: progress.0, + last_synced: 0, }, uploading, )) @@ -346,8 +339,9 @@ impl AsyncSeek for UploadingFileReader { pub struct UploadHandle { tmp_dir: Option>, #[pin] - file: File, + file: DirectIoFile, progress: watch::Sender, + last_synced: u64, } impl UploadHandle { pub async fn upload(&mut self, request: Request) { @@ -394,6 +388,19 @@ impl UploadHandle { if let Err(e) = self.file.sync_all().await { self.progress.send_if_modified(|p| p.handle_error(&e)); } + // Update progress with final synced bytes + self.update_sync_progress(); + } + fn update_sync_progress(&mut self) { + let synced = self.file.bytes_synced(); + let delta = synced - self.last_synced; + if delta > 0 { + self.last_synced = synced; + self.progress.send_modify(|p| { + p.written += delta; + p.tracker += delta; + }); + } } } #[pin_project::pinned_drop] @@ -410,13 +417,23 @@ impl AsyncWrite for UploadHandle { buf: &[u8], ) -> Poll> { let this = self.project(); + // Update progress based on bytes actually flushed to disk + let synced = this.file.bytes_synced(); + let delta = synced - *this.last_synced; + if delta > 0 { + *this.last_synced = synced; + this.progress.send_modify(|p| { + p.written += delta; + p.tracker += delta; + }); + } match this.file.poll_write(cx, buf) { - Poll::Ready(res) => { + Poll::Ready(Err(e)) => { this.progress - .send_if_modified(|progress| progress.handle_write(&res)); - Poll::Ready(res) + .send_if_modified(|progress| progress.handle_error(&e)); + Poll::Ready(Err(e)) } - Poll::Pending => Poll::Pending, + a => a, } } fn poll_flush( diff --git a/core/src/util/direct_io.rs b/core/src/util/direct_io.rs new file mode 100644 index 000000000..9c3880f8c --- /dev/null +++ b/core/src/util/direct_io.rs @@ -0,0 +1,298 @@ +use std::alloc::Layout; +use std::io::Write; +use std::os::fd::AsRawFd; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use tokio::io::AsyncWrite; +use tokio::task::JoinHandle; + +const BLOCK_SIZE: usize = 4096; +const BUF_CAP: usize = 256 * 1024; // 256KB + +/// Aligned buffer for O_DIRECT I/O. +struct AlignedBuf { + ptr: *mut u8, + len: usize, +} + +// SAFETY: We have exclusive ownership of the allocation. +unsafe impl Send for AlignedBuf {} + +impl AlignedBuf { + fn new() -> Self { + let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap(); + // SAFETY: layout has non-zero size + let ptr = unsafe { std::alloc::alloc(layout) }; + if ptr.is_null() { + std::alloc::handle_alloc_error(layout); + } + Self { ptr, len: 0 } + } + + fn as_slice(&self) -> &[u8] { + // SAFETY: ptr is valid for len bytes, properly aligned, exclusively owned + unsafe { std::slice::from_raw_parts(self.ptr, self.len) } + } + + fn push(&mut self, data: &[u8]) -> usize { + let n = data.len().min(BUF_CAP - self.len); + // SAFETY: src and dst don't overlap, both valid for n bytes + unsafe { + std::ptr::copy_nonoverlapping(data.as_ptr(), self.ptr.add(self.len), n); + } + self.len += n; + n + } + + fn aligned_len(&self) -> usize { + self.len & !(BLOCK_SIZE - 1) + } + + fn drain_front(&mut self, n: usize) { + debug_assert!(n <= self.len); + let remaining = self.len - n; + if remaining > 0 { + // SAFETY: regions may overlap, so we use copy (memmove) + unsafe { + std::ptr::copy(self.ptr.add(n), self.ptr, remaining); + } + } + self.len = remaining; + } + + /// Extract aligned data into a new buffer for flushing, leaving remainder. + fn take_aligned(&mut self) -> Option<(AlignedBuf, u64)> { + let aligned = self.aligned_len(); + if aligned == 0 { + return None; + } + let mut flush_buf = AlignedBuf::new(); + flush_buf.push(&self.as_slice()[..aligned]); + self.drain_front(aligned); + Some((flush_buf, aligned as u64)) + } +} + +impl Drop for AlignedBuf { + fn drop(&mut self) { + let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap(); + // SAFETY: ptr was allocated with this layout in new() + unsafe { std::alloc::dealloc(self.ptr, layout) }; + } +} + +enum FileState { + Idle(std::fs::File), + Flushing(JoinHandle>), + Done, +} + +/// A file writer that uses O_DIRECT to bypass the kernel page cache. +/// +/// Buffers writes in an aligned buffer and flushes to disk in the background. +/// New writes can proceed while a flush is in progress (double-buffering). +/// Progress is tracked via [`bytes_synced`](Self::bytes_synced), which reflects +/// bytes actually written to disk. +pub struct DirectIoFile { + file_state: FileState, + buf: AlignedBuf, + synced: u64, +} + +impl DirectIoFile { + fn new(file: std::fs::File) -> Self { + Self { + file_state: FileState::Idle(file), + buf: AlignedBuf::new(), + synced: 0, + } + } + + /// Convert an existing tokio File into a DirectIoFile by adding O_DIRECT. + pub async fn from_tokio_file(file: tokio::fs::File) -> std::io::Result { + let std_file = file.into_std().await; + let fd = std_file.as_raw_fd(); + // SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops + unsafe { + let flags = libc::fcntl(fd, libc::F_GETFL); + if flags == -1 { + return Err(std::io::Error::last_os_error()); + } + if libc::fcntl(fd, libc::F_SETFL, flags | libc::O_DIRECT) == -1 { + return Err(std::io::Error::last_os_error()); + } + } + Ok(Self::new(std_file)) + } + + /// Number of bytes confirmed written to disk. + pub fn bytes_synced(&self) -> u64 { + self.synced + } + + /// Flush any remaining buffered data and sync to disk. + /// + /// Removes the O_DIRECT flag for the final partial-block write, then + /// calls fsync. Updates `bytes_synced` to the final total. + pub async fn sync_all(&mut self) -> std::io::Result<()> { + // Wait for any in-flight flush + self.await_flush().await?; + + let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) + else { + return Ok(()); + }; + + let mut buf = std::mem::replace(&mut self.buf, AlignedBuf::new()); + let remaining = buf.len as u64; + + tokio::task::spawn_blocking(move || { + let mut file = file; + + // Write any aligned portion + let aligned = buf.aligned_len(); + if aligned > 0 { + let slice = unsafe { std::slice::from_raw_parts(buf.ptr, aligned) }; + file.write_all(slice)?; + buf.drain_front(aligned); + } + + // Write remainder with O_DIRECT disabled + if buf.len > 0 { + let fd = file.as_raw_fd(); + // SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops + unsafe { + let flags = libc::fcntl(fd, libc::F_GETFL); + libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_DIRECT); + } + file.write_all(buf.as_slice())?; + } + + file.sync_all() + }) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??; + + self.synced += remaining; + Ok(()) + } + + async fn await_flush(&mut self) -> std::io::Result<()> { + if let FileState::Flushing(handle) = &mut self.file_state { + let (file, flushed) = handle + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??; + self.synced += flushed; + self.file_state = FileState::Idle(file); + } + Ok(()) + } + + /// Non-blocking poll: try to complete a pending flush. + /// Returns Ready(Ok(())) if idle (or just became idle), Pending if still flushing. + fn poll_complete_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + if let FileState::Flushing(handle) = &mut self.file_state { + match Pin::new(handle).poll(cx) { + Poll::Ready(Ok(Ok((file, flushed)))) => { + self.synced += flushed; + self.file_state = FileState::Idle(file); + } + Poll::Ready(Ok(Err(e))) => { + self.file_state = FileState::Done; + return Poll::Ready(Err(e)); + } + Poll::Ready(Err(e)) => { + self.file_state = FileState::Done; + return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::Other, e))); + } + Poll::Pending => return Poll::Pending, + } + } + Poll::Ready(Ok(())) + } + + /// Start a background flush of aligned data if the file is idle. + fn maybe_start_flush(&mut self) { + if !matches!(self.file_state, FileState::Idle(_)) { + return; + } + let Some((flush_buf, count)) = self.buf.take_aligned() else { + return; + }; + let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) + else { + unreachable!() + }; + let handle = tokio::task::spawn_blocking(move || { + let mut file = file; + file.write_all(flush_buf.as_slice())?; + Ok((file, count)) + }); + self.file_state = FileState::Flushing(handle); + } +} + +impl AsyncWrite for DirectIoFile { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + // Try to complete any pending flush (non-blocking, registers waker) + match self.poll_complete_flush(cx) { + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + _ => {} // Pending is fine — we can still accept writes into the buffer + } + + // If file just became idle and buffer has aligned data, start a flush + // to free buffer space before accepting new data + self.maybe_start_flush(); + + // Accept data into the buffer + let n = self.buf.push(buf); + if n == 0 { + // Buffer full, must wait for flush to complete and free space. + // Waker was already registered by poll_complete_flush above. + return Poll::Pending; + } + + // If file is idle and we now have aligned data, start flushing + self.maybe_start_flush(); + + Poll::Ready(Ok(n)) + } + + fn poll_flush( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.poll_complete_flush(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Ready(Ok(())) => {} + } + + if self.buf.aligned_len() > 0 { + self.maybe_start_flush(); + // Poll the just-started flush + return self.poll_complete_flush(cx).map(|r| r.map(|_| ())); + } + + Poll::Ready(Ok(())) + } + + fn poll_shutdown( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + match self.poll_complete_flush(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Ready(Ok(())) => {} + } + + self.file_state = FileState::Done; + Poll::Ready(Ok(())) + } +} diff --git a/core/src/util/mod.rs b/core/src/util/mod.rs index 6cdc345a1..e9e98e039 100644 --- a/core/src/util/mod.rs +++ b/core/src/util/mod.rs @@ -38,6 +38,7 @@ pub mod collections; pub mod cpupower; pub mod crypto; pub mod data_url; +pub mod direct_io; pub mod future; pub mod http_reader; pub mod io; From cc6a134a32a54ca0c7e97995ee6cf4a58cd9e772 Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Mon, 16 Mar 2026 13:39:52 -0600 Subject: [PATCH 6/7] chore: enable debug features and improve graceful shutdown for unstable builds Adds stack overflow backtraces, debug info compilation, and SSH password auth for development. Reduces shutdown timeouts from 60s to 100ms for faster iteration. Fixes race condition in NetService cleanup. --- build/image-recipe/build.sh | 5 +++++ .../usr/lib/startos/scripts/init_resize.sh | 8 +++++--- core/build/build-cli.sh | 4 ++++ core/build/build-registrybox.sh | 4 ++++ core/build/build-start-container.sh | 4 ++++ core/build/build-startbox.sh | 4 ++++ core/build/build-tunnelbox.sh | 4 ++++ core/locales/i18n.yaml | 7 +++++++ core/src/backup/backup_bulk.rs | 9 +++++++++ core/src/bins/mod.rs | 5 +++++ core/src/bins/startd.rs | 2 +- core/src/net/net_controller.rs | 2 +- core/src/net/web_server.rs | 2 +- web/package-lock.json | 20 ++----------------- 14 files changed, 56 insertions(+), 24 deletions(-) diff --git a/build/image-recipe/build.sh b/build/image-recipe/build.sh index e35b21b9a..c2367dbfa 100755 --- a/build/image-recipe/build.sh +++ b/build/image-recipe/build.sh @@ -131,6 +131,11 @@ ff02::1 ip6-allnodes ff02::2 ip6-allrouters EOT +if [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then + mkdir -p config/includes.chroot/etc/ssh/sshd_config.d + echo "PasswordAuthentication yes" > config/includes.chroot/etc/ssh/sshd_config.d/dev-password-auth.conf +fi + # Installer marker file (used by installed GRUB to detect the live USB) mkdir -p config/includes.binary touch config/includes.binary/.startos-installer diff --git a/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh b/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh index faa796c7b..67e0629df 100755 --- a/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh +++ b/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh @@ -58,6 +58,11 @@ check_variables () { main () { get_variables + # Fix GPT backup header first — the image was built with a tight root + # partition, so the backup GPT is not at the end of the SD card. parted + # will prompt interactively if this isn't fixed before we use it. + sgdisk -e "$ROOT_DEV" 2>/dev/null || true + if ! check_variables; then return 1 fi @@ -74,9 +79,6 @@ main () { fi fi - # Fix GPT backup header to reflect new partition layout - sgdisk -e "$ROOT_DEV" 2>/dev/null || true - mount / -o remount,rw btrfs filesystem resize max /media/startos/root diff --git a/core/build/build-cli.sh b/core/build/build-cli.sh index d809a189f..889c5a766 100755 --- a/core/build/build-cli.sh +++ b/core/build/build-cli.sh @@ -67,6 +67,10 @@ if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT:-}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-cli --target=$TARGET diff --git a/core/build/build-registrybox.sh b/core/build/build-registrybox.sh index 263a3ae6d..1d70895e3 100755 --- a/core/build/build-registrybox.sh +++ b/core/build/build-registrybox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-start-container.sh b/core/build/build-start-container.sh index d5a56549e..12f47063b 100755 --- a/core/build/build-start-container.sh +++ b/core/build/build-start-container.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-startbox.sh b/core/build/build-startbox.sh index 5a6df1771..86907e2db 100755 --- a/core/build/build-startbox.sh +++ b/core/build/build-startbox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-tunnelbox.sh b/core/build/build-tunnelbox.sh index 181af3644..1326a2422 100755 --- a/core/build/build-tunnelbox.sh +++ b/core/build/build-tunnelbox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/locales/i18n.yaml b/core/locales/i18n.yaml index 0b7a688a2..94d1225a2 100644 --- a/core/locales/i18n.yaml +++ b/core/locales/i18n.yaml @@ -1255,6 +1255,13 @@ backup.bulk.leaked-reference: fr_FR: "référence fuitée vers BackupMountGuard" pl_PL: "wyciekła referencja do BackupMountGuard" +backup.bulk.service-not-ready: + en_US: "Cannot create a backup of a service that is still initializing or in an error state" + de_DE: "Es kann keine Sicherung eines Dienstes erstellt werden, der noch initialisiert wird oder sich im Fehlerzustand befindet" + es_ES: "No se puede crear una copia de seguridad de un servicio que aún se está inicializando o está en estado de error" + fr_FR: "Impossible de créer une sauvegarde d'un service encore en cours d'initialisation ou en état d'erreur" + pl_PL: "Nie można utworzyć kopii zapasowej usługi, która jest jeszcze inicjalizowana lub znajduje się w stanie błędu" + # backup/restore.rs backup.restore.package-error: en_US: "Error restoring package %{id}: %{error}" diff --git a/core/src/backup/backup_bulk.rs b/core/src/backup/backup_bulk.rs index 4c95bcad9..cb70a77a1 100644 --- a/core/src/backup/backup_bulk.rs +++ b/core/src/backup/backup_bulk.rs @@ -300,6 +300,15 @@ async fn perform_backup( error: backup_result, }, ); + } else { + backup_report.insert( + id.clone(), + PackageBackupReport { + error: Some( + t!("backup.bulk.service-not-ready").to_string(), + ), + }, + ); } } diff --git a/core/src/bins/mod.rs b/core/src/bins/mod.rs index 2b1959db7..f790bf5c1 100644 --- a/core/src/bins/mod.rs +++ b/core/src/bins/mod.rs @@ -149,6 +149,11 @@ impl MultiExecutable { } pub fn execute(&self) { + #[cfg(feature = "backtrace-on-stack-overflow")] + unsafe { + backtrace_on_stack_overflow::enable() + }; + set_locale_from_env(); let mut popped = Vec::with_capacity(2); diff --git a/core/src/bins/startd.rs b/core/src/bins/startd.rs index 314d3dc7a..487170b82 100644 --- a/core/src/bins/startd.rs +++ b/core/src/bins/startd.rs @@ -190,7 +190,7 @@ pub fn main(args: impl IntoIterator) { } } }); - rt.shutdown_timeout(Duration::from_secs(60)); + rt.shutdown_timeout(Duration::from_millis(100)); res }; diff --git a/core/src/net/net_controller.rs b/core/src/net/net_controller.rs index 529b8824a..c48c7c9c6 100644 --- a/core/src/net/net_controller.rs +++ b/core/src/net/net_controller.rs @@ -820,7 +820,6 @@ impl NetService { break; } } - self.shutdown = true; Ok(()) } @@ -832,6 +831,7 @@ impl NetService { impl Drop for NetService { fn drop(&mut self) { if !self.shutdown { + self.shutdown = true; let svc = std::mem::replace(self, Self::dummy()); tokio::spawn(async move { svc.remove_all().await.log_err() }); } diff --git a/core/src/net/web_server.rs b/core/src/net/web_server.rs index 8ffe9deaa..eed7cf37c 100644 --- a/core/src/net/web_server.rs +++ b/core/src/net/web_server.rs @@ -509,7 +509,7 @@ where drop(queue_cell.replace(None)); if !runner.is_empty() { - tokio::time::timeout(Duration::from_secs(60), runner) + tokio::time::timeout(Duration::from_millis(100), runner) .await .log_err(); } diff --git a/web/package-lock.json b/web/package-lock.json index 11eb53ecc..fb07bd64b 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -836,6 +836,7 @@ "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -8322,6 +8323,7 @@ "integrity": "sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "cli-truncate": "^5.0.0", "colorette": "^2.0.20", @@ -12524,24 +12526,6 @@ "dev": true, "license": "ISC" }, - "node_modules/yaml": { - "version": "2.8.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", - "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", - "dev": true, - "license": "ISC", - "optional": true, - "peer": true, - "bin": { - "yaml": "bin.mjs" - }, - "engines": { - "node": ">= 14.6" - }, - "funding": { - "url": "https://github.com/sponsors/eemeli" - } - }, "node_modules/yargs": { "version": "18.0.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz", From 1c1ae11241e2b9c47c07d97f0ad3ca8db64f07f9 Mon Sep 17 00:00:00 2001 From: Aiden McClelland Date: Mon, 16 Mar 2026 13:54:45 -0600 Subject: [PATCH 7/7] chore: bump to v0.4.0-alpha.21 --- core/Cargo.lock | 2 +- core/Cargo.toml | 2 +- core/src/version/mod.rs | 12 ++++++---- core/src/version/v0_4_0_alpha_21.rs | 37 +++++++++++++++++++++++++++++ sdk/package/lib/StartSdk.ts | 2 +- web/package-lock.json | 4 ++-- web/package.json | 2 +- 7 files changed, 51 insertions(+), 10 deletions(-) create mode 100644 core/src/version/v0_4_0_alpha_21.rs diff --git a/core/Cargo.lock b/core/Cargo.lock index ef8f9bb3e..8bb465168 100644 --- a/core/Cargo.lock +++ b/core/Cargo.lock @@ -6434,7 +6434,7 @@ dependencies = [ [[package]] name = "start-os" -version = "0.4.0-alpha.20" +version = "0.4.0-alpha.21" dependencies = [ "aes", "async-acme", diff --git a/core/Cargo.toml b/core/Cargo.toml index 47341d188..581ea58e2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -15,7 +15,7 @@ license = "MIT" name = "start-os" readme = "README.md" repository = "https://github.com/Start9Labs/start-os" -version = "0.4.0-alpha.20" # VERSION_BUMP +version = "0.4.0-alpha.21" # VERSION_BUMP [lib] name = "startos" diff --git a/core/src/version/mod.rs b/core/src/version/mod.rs index 4c17bc32f..a4585358f 100644 --- a/core/src/version/mod.rs +++ b/core/src/version/mod.rs @@ -60,8 +60,9 @@ mod v0_4_0_alpha_17; mod v0_4_0_alpha_18; mod v0_4_0_alpha_19; mod v0_4_0_alpha_20; +mod v0_4_0_alpha_21; -pub type Current = v0_4_0_alpha_20::Version; // VERSION_BUMP +pub type Current = v0_4_0_alpha_21::Version; // VERSION_BUMP impl Current { #[instrument(skip(self, db))] @@ -189,7 +190,8 @@ enum Version { V0_4_0_alpha_17(Wrapper), V0_4_0_alpha_18(Wrapper), V0_4_0_alpha_19(Wrapper), - V0_4_0_alpha_20(Wrapper), // VERSION_BUMP + V0_4_0_alpha_20(Wrapper), + V0_4_0_alpha_21(Wrapper), // VERSION_BUMP Other(exver::Version), } @@ -252,7 +254,8 @@ impl Version { Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_18(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)), - Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP + Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), + Self::V0_4_0_alpha_21(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP Self::Other(v) => { return Err(Error::new( eyre!("unknown version {v}"), @@ -307,7 +310,8 @@ impl Version { Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_18(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(), - Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), // VERSION_BUMP + Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), + Version::V0_4_0_alpha_21(Wrapper(x)) => x.semver(), // VERSION_BUMP Version::Other(x) => x.clone(), } } diff --git a/core/src/version/v0_4_0_alpha_21.rs b/core/src/version/v0_4_0_alpha_21.rs new file mode 100644 index 000000000..2ca9631b3 --- /dev/null +++ b/core/src/version/v0_4_0_alpha_21.rs @@ -0,0 +1,37 @@ +use exver::{PreReleaseSegment, VersionRange}; + +use super::v0_3_5::V0_3_0_COMPAT; +use super::{VersionT, v0_4_0_alpha_20}; +use crate::prelude::*; + +lazy_static::lazy_static! { + static ref V0_4_0_alpha_21: exver::Version = exver::Version::new( + [0, 4, 0], + [PreReleaseSegment::String("alpha".into()), 21.into()] + ); +} + +#[derive(Clone, Copy, Debug, Default)] +pub struct Version; + +impl VersionT for Version { + type Previous = v0_4_0_alpha_20::Version; + type PreUpRes = (); + + async fn pre_up(self) -> Result { + Ok(()) + } + fn semver(self) -> exver::Version { + V0_4_0_alpha_21.clone() + } + fn compat(self) -> &'static VersionRange { + &V0_3_0_COMPAT + } + #[instrument(skip_all)] + fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result { + Ok(Value::Null) + } + fn down(self, _db: &mut Value) -> Result<(), Error> { + Ok(()) + } +} diff --git a/sdk/package/lib/StartSdk.ts b/sdk/package/lib/StartSdk.ts index 4566b2666..6bf925141 100644 --- a/sdk/package/lib/StartSdk.ts +++ b/sdk/package/lib/StartSdk.ts @@ -69,7 +69,7 @@ import { getOwnServiceInterfaces } from '../../base/lib/util/getServiceInterface import { Volumes, createVolumes } from './util/Volume' /** The minimum StartOS version required by this SDK release */ -export const OSVersion = testTypeVersion('0.4.0-alpha.20') +export const OSVersion = testTypeVersion('0.4.0-alpha.21') // prettier-ignore type AnyNeverCond = diff --git a/web/package-lock.json b/web/package-lock.json index fb07bd64b..8757e3e75 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -1,12 +1,12 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "license": "MIT", "dependencies": { "@angular/cdk": "^21.2.1", diff --git a/web/package.json b/web/package.json index a4f9635ca..8aa0e0799 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "author": "Start9 Labs, Inc", "homepage": "https://start9.com/", "license": "MIT",