diff --git a/.github/workflows/startos-iso.yaml b/.github/workflows/startos-iso.yaml index 543bc739c..3bf33d9a2 100644 --- a/.github/workflows/startos-iso.yaml +++ b/.github/workflows/startos-iso.yaml @@ -124,14 +124,13 @@ jobs: strategy: fail-fast: false matrix: - # TODO: re-add "raspberrypi" to the platform list below platform: >- ${{ fromJson( format( '[ ["{0}"], - ["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "riscv64", "riscv64-nonfree"] + ["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "raspberrypi", "riscv64", "riscv64-nonfree"] ]', github.event.inputs.platform || 'ALL' ) diff --git a/.gitignore b/.gitignore index 10d8b5424..32c700eef 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ secrets.db tmp web/.i18n-checked docs/USER.md +*.s9pk diff --git a/Makefile b/Makefile index d04824d67..151f19ee6 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,8 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json) -BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) +TOR_S9PK := build/lib/tor_$(ARCH).s9pk +BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) $(TOR_S9PK) IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/) STARTD_SRC := core/startd.service $(BUILD_SRC) CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE) @@ -188,6 +189,9 @@ install: $(STARTOS_TARGETS) $(call mkdir,$(DESTDIR)/lib/systemd/system) $(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service) + if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \ + sed -i '/^Environment=/a Environment=RUST_BACKTRACE=full' $(DESTDIR)/lib/systemd/system/startd.service; \ + fi $(call mkdir,$(DESTDIR)/usr/lib) $(call rm,$(DESTDIR)/usr/lib/startos) @@ -312,6 +316,9 @@ build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(sh $(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE) ./build/download-firmware.sh $(PLATFORM) +$(TOR_S9PK): ./build/download-tor-s9pk.sh + ./build/download-tor-s9pk.sh $(ARCH) + core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE) ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox diff --git a/build/download-tor-s9pk.sh b/build/download-tor-s9pk.sh new file mode 100755 index 000000000..8feb9f597 --- /dev/null +++ b/build/download-tor-s9pk.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +set -e + +ARCH=$1 + +if [ -z "$ARCH" ]; then + >&2 echo "usage: $0 " + exit 1 +fi + +curl --fail -L -o "./lib/tor_${ARCH}.s9pk" "https://s9pks.nyc3.cdn.digitaloceanspaces.com/tor_${ARCH}.s9pk" diff --git a/build/image-recipe/build.sh b/build/image-recipe/build.sh index e35b21b9a..6648e719a 100755 --- a/build/image-recipe/build.sh +++ b/build/image-recipe/build.sh @@ -131,6 +131,11 @@ ff02::1 ip6-allnodes ff02::2 ip6-allrouters EOT +if [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then + mkdir -p config/includes.chroot/etc/ssh/sshd_config.d + echo "PasswordAuthentication yes" > config/includes.chroot/etc/ssh/sshd_config.d/dev-password-auth.conf +fi + # Installer marker file (used by installed GRUB to detect the live USB) mkdir -p config/includes.binary touch config/includes.binary/.startos-installer @@ -348,6 +353,10 @@ if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then passwd -l start9 fi +mkdir -p /media/startos +chmod 750 /media/startos +chown root:startos /media/startos + EOF SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date '+%s')}" @@ -498,8 +507,8 @@ elif [ "${IMAGE_TYPE}" = img ]; then $TMPDIR/next/dev $TMPDIR/next/proc $TMPDIR/next/sys $TMPDIR/next/media/startos/root mount --rbind $TMPDIR/boot $TMPDIR/next/boot mount --bind /dev $TMPDIR/next/dev - mount --bind /proc $TMPDIR/next/proc - mount --bind /sys $TMPDIR/next/sys + mount -t proc proc $TMPDIR/next/proc + mount -t sysfs sysfs $TMPDIR/next/sys mount --bind $TMPDIR/root $TMPDIR/next/media/startos/root chroot $TMPDIR/next grub-install --target=arm64-efi --removable --efi-directory=/boot/efi --boot-directory=/boot --no-nvram diff --git a/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh b/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh index faa796c7b..67e0629df 100755 --- a/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh +++ b/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh @@ -58,6 +58,11 @@ check_variables () { main () { get_variables + # Fix GPT backup header first — the image was built with a tight root + # partition, so the backup GPT is not at the end of the SD card. parted + # will prompt interactively if this isn't fixed before we use it. + sgdisk -e "$ROOT_DEV" 2>/dev/null || true + if ! check_variables; then return 1 fi @@ -74,9 +79,6 @@ main () { fi fi - # Fix GPT backup header to reflect new partition layout - sgdisk -e "$ROOT_DEV" 2>/dev/null || true - mount / -o remount,rw btrfs filesystem resize max /media/startos/root diff --git a/build/lib/scripts/chroot-and-upgrade b/build/lib/scripts/chroot-and-upgrade index f14898316..f5dd417aa 100755 --- a/build/lib/scripts/chroot-and-upgrade +++ b/build/lib/scripts/chroot-and-upgrade @@ -58,13 +58,13 @@ mkdir -p /media/startos/next/media/startos/root mount --bind /run /media/startos/next/run mount --bind /tmp /media/startos/next/tmp mount --bind /dev /media/startos/next/dev -mount --bind /sys /media/startos/next/sys -mount --bind /proc /media/startos/next/proc +mount -t sysfs sysfs /media/startos/next/sys +mount -t proc proc /media/startos/next/proc mount --bind /boot /media/startos/next/boot mount --bind /media/startos/root /media/startos/next/media/startos/root if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then - mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars + mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars fi if [ -z "$*" ]; then @@ -111,6 +111,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then reboot fi -umount /media/startos/next -umount /media/startos/upper +umount -l /media/startos/next +umount -l /media/startos/upper rm -rf /media/startos/upper /media/startos/next \ No newline at end of file diff --git a/build/lib/scripts/upgrade b/build/lib/scripts/upgrade index 309d0e9bb..488372e2d 100755 --- a/build/lib/scripts/upgrade +++ b/build/lib/scripts/upgrade @@ -45,13 +45,13 @@ mkdir -p /media/startos/next/media/startos/root mount --bind /run /media/startos/next/run mount --bind /tmp /media/startos/next/tmp mount --bind /dev /media/startos/next/dev -mount --bind /sys /media/startos/next/sys -mount --bind /proc /media/startos/next/proc +mount -t sysfs sysfs /media/startos/next/sys +mount -t proc proc /media/startos/next/proc mount --rbind /boot /media/startos/next/boot mount --bind /media/startos/root /media/startos/next/media/startos/root if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then - mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars + mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars fi chroot /media/startos/next bash -e << "EOF" diff --git a/container-runtime/package-lock.json b/container-runtime/package-lock.json index 22b581db5..ff2e560de 100644 --- a/container-runtime/package-lock.json +++ b/container-runtime/package-lock.json @@ -37,7 +37,7 @@ }, "../sdk/dist": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.61", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/container-runtime/src/Adapters/EffectCreator.ts b/container-runtime/src/Adapters/EffectCreator.ts index c244347eb..22328bbb9 100644 --- a/container-runtime/src/Adapters/EffectCreator.ts +++ b/container-runtime/src/Adapters/EffectCreator.ts @@ -187,9 +187,10 @@ export function makeEffects(context: EffectContext): Effects { getServiceManifest( ...[options]: Parameters ) { - return rpcRound("get-service-manifest", options) as ReturnType< - T.Effects["getServiceManifest"] - > + return rpcRound("get-service-manifest", { + ...options, + callback: context.callbacks?.addCallback(options.callback) || null, + }) as ReturnType }, subcontainer: { createFs(options: { imageId: string; name: string }) { @@ -211,9 +212,10 @@ export function makeEffects(context: EffectContext): Effects { > }) as Effects["exportServiceInterface"], getContainerIp(...[options]: Parameters) { - return rpcRound("get-container-ip", options) as ReturnType< - T.Effects["getContainerIp"] - > + return rpcRound("get-container-ip", { + ...options, + callback: context.callbacks?.addCallback(options.callback) || null, + }) as ReturnType }, getOsIp(...[]: Parameters) { return rpcRound("get-os-ip", {}) as ReturnType @@ -244,9 +246,10 @@ export function makeEffects(context: EffectContext): Effects { > }, getSslCertificate(options: Parameters[0]) { - return rpcRound("get-ssl-certificate", options) as ReturnType< - T.Effects["getSslCertificate"] - > + return rpcRound("get-ssl-certificate", { + ...options, + callback: context.callbacks?.addCallback(options.callback) || null, + }) as ReturnType }, getSslKey(options: Parameters[0]) { return rpcRound("get-ssl-key", options) as ReturnType< @@ -308,7 +311,10 @@ export function makeEffects(context: EffectContext): Effects { }, getStatus(...[o]: Parameters) { - return rpcRound("get-status", o) as ReturnType + return rpcRound("get-status", { + ...o, + callback: context.callbacks?.addCallback(o.callback) || null, + }) as ReturnType }, /// DEPRECATED setMainStatus(o: { status: "running" | "stopped" }): Promise { diff --git a/container-runtime/src/Adapters/RpcListener.ts b/container-runtime/src/Adapters/RpcListener.ts index f9dd0fac2..4f51e5278 100644 --- a/container-runtime/src/Adapters/RpcListener.ts +++ b/container-runtime/src/Adapters/RpcListener.ts @@ -298,13 +298,10 @@ export class RpcListener { } case "stop": { const { id } = stopType.parse(input) + this.callbacks?.removeChild("main") return handleRpc( id, - this.system.stop().then((result) => { - this.callbacks?.removeChild("main") - - return { result } - }), + this.system.stop().then((result) => ({ result })), ) } case "exit": { diff --git a/container-runtime/src/Adapters/Systems/SystemForStartOs.ts b/container-runtime/src/Adapters/Systems/SystemForStartOs.ts index 3b0d767ed..adf76aa44 100644 --- a/container-runtime/src/Adapters/Systems/SystemForStartOs.ts +++ b/container-runtime/src/Adapters/Systems/SystemForStartOs.ts @@ -71,7 +71,7 @@ export class SystemForStartOs implements System { this.starting = true effects.constRetry = utils.once(() => { console.debug(".const() triggered") - effects.restart() + if (effects.isInContext) effects.restart() }) let mainOnTerm: () => Promise | undefined const daemons = await ( diff --git a/core/Cargo.lock b/core/Cargo.lock index 10caba090..8bb465168 100644 --- a/core/Cargo.lock +++ b/core/Cargo.lock @@ -3376,6 +3376,15 @@ dependencies = [ "serde_json", ] +[[package]] +name = "keccak" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +dependencies = [ + "cpufeatures", +] + [[package]] name = "kv" version = "0.24.0" @@ -5985,6 +5994,16 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -6415,7 +6434,7 @@ dependencies = [ [[package]] name = "start-os" -version = "0.4.0-alpha.20" +version = "0.4.0-alpha.21" dependencies = [ "aes", "async-acme", @@ -6519,6 +6538,7 @@ dependencies = [ "serde_yml", "sha-crypt", "sha2 0.10.9", + "sha3", "signal-hook", "socket2 0.6.2", "socks5-impl", diff --git a/core/Cargo.toml b/core/Cargo.toml index cb1eb3c51..581ea58e2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -15,7 +15,7 @@ license = "MIT" name = "start-os" readme = "README.md" repository = "https://github.com/Start9Labs/start-os" -version = "0.4.0-alpha.20" # VERSION_BUMP +version = "0.4.0-alpha.21" # VERSION_BUMP [lib] name = "startos" @@ -200,6 +200,7 @@ serde_toml = { package = "toml", version = "0.9.9+spec-1.0.0" } serde_yaml = { package = "serde_yml", version = "0.0.12" } sha-crypt = "0.5.0" sha2 = "0.10.2" +sha3 = "0.10" signal-hook = "0.3.17" socket2 = { version = "0.6.0", features = ["all"] } socks5-impl = { version = "0.7.2", features = ["client", "server"] } diff --git a/core/build/build-cli.sh b/core/build/build-cli.sh index d809a189f..889c5a766 100755 --- a/core/build/build-cli.sh +++ b/core/build/build-cli.sh @@ -67,6 +67,10 @@ if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT:-}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-cli --target=$TARGET diff --git a/core/build/build-registrybox.sh b/core/build/build-registrybox.sh index 263a3ae6d..1d70895e3 100755 --- a/core/build/build-registrybox.sh +++ b/core/build/build-registrybox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-start-container.sh b/core/build/build-start-container.sh index d5a56549e..12f47063b 100755 --- a/core/build/build-start-container.sh +++ b/core/build/build-start-container.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-startbox.sh b/core/build/build-startbox.sh index 5a6df1771..86907e2db 100755 --- a/core/build/build-startbox.sh +++ b/core/build/build-startbox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-tunnelbox.sh b/core/build/build-tunnelbox.sh index 181af3644..1326a2422 100755 --- a/core/build/build-tunnelbox.sh +++ b/core/build/build-tunnelbox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/locales/i18n.yaml b/core/locales/i18n.yaml index 0b7a688a2..94d1225a2 100644 --- a/core/locales/i18n.yaml +++ b/core/locales/i18n.yaml @@ -1255,6 +1255,13 @@ backup.bulk.leaked-reference: fr_FR: "référence fuitée vers BackupMountGuard" pl_PL: "wyciekła referencja do BackupMountGuard" +backup.bulk.service-not-ready: + en_US: "Cannot create a backup of a service that is still initializing or in an error state" + de_DE: "Es kann keine Sicherung eines Dienstes erstellt werden, der noch initialisiert wird oder sich im Fehlerzustand befindet" + es_ES: "No se puede crear una copia de seguridad de un servicio que aún se está inicializando o está en estado de error" + fr_FR: "Impossible de créer une sauvegarde d'un service encore en cours d'initialisation ou en état d'erreur" + pl_PL: "Nie można utworzyć kopii zapasowej usługi, która jest jeszcze inicjalizowana lub znajduje się w stanie błędu" + # backup/restore.rs backup.restore.package-error: en_US: "Error restoring package %{id}: %{error}" diff --git a/core/src/backup/backup_bulk.rs b/core/src/backup/backup_bulk.rs index 4c95bcad9..cb70a77a1 100644 --- a/core/src/backup/backup_bulk.rs +++ b/core/src/backup/backup_bulk.rs @@ -300,6 +300,15 @@ async fn perform_backup( error: backup_result, }, ); + } else { + backup_report.insert( + id.clone(), + PackageBackupReport { + error: Some( + t!("backup.bulk.service-not-ready").to_string(), + ), + }, + ); } } diff --git a/core/src/backup/restore.rs b/core/src/backup/restore.rs index bc96d8823..77e7181f5 100644 --- a/core/src/backup/restore.rs +++ b/core/src/backup/restore.rs @@ -10,6 +10,7 @@ use tracing::instrument; use ts_rs::TS; use super::target::BackupTargetId; +use crate::PackageId; use crate::backup::os::OsBackup; use crate::context::setup::SetupResult; use crate::context::{RpcContext, SetupContext}; @@ -26,7 +27,6 @@ use crate::service::service_map::DownloadInstallFuture; use crate::setup::SetupExecuteProgress; use crate::system::{save_language, sync_kiosk}; use crate::util::serde::{IoFormat, Pem}; -use crate::{PLATFORM, PackageId}; #[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] @@ -90,7 +90,7 @@ pub async fn recover_full_server( recovery_source: TmpMountGuard, server_id: &str, recovery_password: &str, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, @@ -123,7 +123,6 @@ pub async fn recover_full_server( os_backup.account.hostname = h; } - let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi"); sync_kiosk(kiosk).await?; let language = ctx.language.peek(|a| a.clone()); diff --git a/core/src/bins/mod.rs b/core/src/bins/mod.rs index 2b1959db7..f790bf5c1 100644 --- a/core/src/bins/mod.rs +++ b/core/src/bins/mod.rs @@ -149,6 +149,11 @@ impl MultiExecutable { } pub fn execute(&self) { + #[cfg(feature = "backtrace-on-stack-overflow")] + unsafe { + backtrace_on_stack_overflow::enable() + }; + set_locale_from_env(); let mut popped = Vec::with_capacity(2); diff --git a/core/src/bins/startd.rs b/core/src/bins/startd.rs index 314d3dc7a..487170b82 100644 --- a/core/src/bins/startd.rs +++ b/core/src/bins/startd.rs @@ -190,7 +190,7 @@ pub fn main(args: impl IntoIterator) { } } }); - rt.shutdown_timeout(Duration::from_secs(60)); + rt.shutdown_timeout(Duration::from_millis(100)); res }; diff --git a/core/src/context/diagnostic.rs b/core/src/context/diagnostic.rs index c069d017f..bf27da071 100644 --- a/core/src/context/diagnostic.rs +++ b/core/src/context/diagnostic.rs @@ -39,7 +39,7 @@ impl DiagnosticContext { shutdown, disk_guid, error: Arc::new(error.into()), - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), }))) } } diff --git a/core/src/context/init.rs b/core/src/context/init.rs index b7d5eac6a..5f6c35222 100644 --- a/core/src/context/init.rs +++ b/core/src/context/init.rs @@ -32,7 +32,7 @@ impl InitContext { error: watch::channel(None).0, progress, shutdown, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), }))) } } diff --git a/core/src/context/rpc.rs b/core/src/context/rpc.rs index 61ce35020..204b000b5 100644 --- a/core/src/context/rpc.rs +++ b/core/src/context/rpc.rs @@ -62,8 +62,8 @@ pub struct RpcContextSeed { pub db: TypedPatchDb, pub sync_db: watch::Sender, pub account: SyncRwLock, - pub net_controller: Arc, pub os_net_service: NetService, + pub net_controller: Arc, pub s9pk_arch: Option<&'static str>, pub services: ServiceMap, pub cancellable_installs: SyncMutex>>, @@ -346,10 +346,10 @@ impl RpcContext { services, cancellable_installs: SyncMutex::new(BTreeMap::new()), metrics_cache, + rpc_continuations: RpcContinuations::new(Some(shutdown.clone())), shutdown, lxc_manager: Arc::new(LxcManager::new()), open_authed_continuations: OpenAuthedContinuations::new(), - rpc_continuations: RpcContinuations::new(), wifi_manager: Arc::new(RwLock::new(wifi_interface.clone().map(|i| WpaCli::init(i)))), current_secret: Arc::new( Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| { diff --git a/core/src/context/setup.rs b/core/src/context/setup.rs index d4d0bb9de..3d16624ef 100644 --- a/core/src/context/setup.rs +++ b/core/src/context/setup.rs @@ -85,7 +85,7 @@ impl SetupContext { result: OnceCell::new(), disk_guid: OnceCell::new(), shutdown, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), install_rootfs: SyncMutex::new(None), language: SyncMutex::new(None), keyboard: SyncMutex::new(None), diff --git a/core/src/db/model/mod.rs b/core/src/db/model/mod.rs index 05fc8502d..1cdc7f9bf 100644 --- a/core/src/db/model/mod.rs +++ b/core/src/db/model/mod.rs @@ -31,7 +31,7 @@ pub struct Database { impl Database { pub fn init( account: &AccountInfo, - kiosk: Option, + kiosk: bool, language: Option, keyboard: Option, ) -> Result { diff --git a/core/src/db/model/public.rs b/core/src/db/model/public.rs index a07375adf..9477dac82 100644 --- a/core/src/db/model/public.rs +++ b/core/src/db/model/public.rs @@ -49,7 +49,7 @@ pub struct Public { impl Public { pub fn init( account: &AccountInfo, - kiosk: Option, + kiosk: bool, language: Option, keyboard: Option, ) -> Result { @@ -149,7 +149,7 @@ impl Public { echoip_urls: default_echoip_urls(), ram: 0, devices: Vec::new(), - kiosk, + kiosk: Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"), language, keyboard, }, diff --git a/core/src/disk/fsck/mod.rs b/core/src/disk/fsck/mod.rs index 1c6949138..f2d210d46 100644 --- a/core/src/disk/fsck/mod.rs +++ b/core/src/disk/fsck/mod.rs @@ -25,20 +25,28 @@ pub enum RepairStrategy { Preen, Aggressive, } +/// Detects the filesystem type of a block device using `grub-probe`. +/// Returns e.g. `"ext2"` (for ext4), `"btrfs"`, etc. +pub async fn detect_filesystem( + logicalname: impl AsRef + std::fmt::Debug, +) -> Result { + Ok(String::from_utf8( + Command::new("grub-probe") + .arg("-d") + .arg(logicalname.as_ref()) + .invoke(crate::ErrorKind::DiskManagement) + .await?, + )? + .trim() + .to_owned()) +} + impl RepairStrategy { pub async fn fsck( &self, logicalname: impl AsRef + std::fmt::Debug, ) -> Result { - match &*String::from_utf8( - Command::new("grub-probe") - .arg("-d") - .arg(logicalname.as_ref()) - .invoke(crate::ErrorKind::DiskManagement) - .await?, - )? - .trim() - { + match &*detect_filesystem(&logicalname).await? { "ext2" => self.e2fsck(logicalname).await, "btrfs" => self.btrfs_check(logicalname).await, fs => { diff --git a/core/src/disk/main.rs b/core/src/disk/main.rs index 349cb045e..da0007caf 100644 --- a/core/src/disk/main.rs +++ b/core/src/disk/main.rs @@ -7,7 +7,7 @@ use rust_i18n::t; use tokio::process::Command; use tracing::instrument; -use super::fsck::{RepairStrategy, RequiresReboot}; +use super::fsck::{RepairStrategy, RequiresReboot, detect_filesystem}; use super::util::pvscan; use crate::disk::mount::filesystem::block_dev::BlockDev; use crate::disk::mount::filesystem::{FileSystem, ReadWrite}; @@ -301,6 +301,37 @@ pub async fn mount_fs>( .with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?; blockdev_path = Path::new("/dev/mapper").join(&full_name); } + + // Convert ext4 → btrfs on the package-data partition if needed + let fs_type = detect_filesystem(&blockdev_path).await?; + if fs_type == "ext2" { + tracing::info!("Running e2fsck before converting {name} from ext4 to btrfs"); + Command::new("e2fsck") + .arg("-fy") + .arg(&blockdev_path) + .invoke(ErrorKind::DiskManagement) + .await?; + tracing::info!("Converting {name} from ext4 to btrfs"); + Command::new("btrfs-convert") + .arg("--no-progress") + .arg(&blockdev_path) + .invoke(ErrorKind::DiskManagement) + .await?; + // Defragment after conversion for optimal performance + let tmp_mount = datadir.as_ref().join(format!("{name}.convert-tmp")); + tokio::fs::create_dir_all(&tmp_mount).await?; + BlockDev::new(&blockdev_path) + .mount(&tmp_mount, ReadWrite) + .await?; + Command::new("btrfs") + .args(["filesystem", "defragment", "-r"]) + .arg(&tmp_mount) + .invoke(ErrorKind::DiskManagement) + .await?; + unmount(&tmp_mount, false).await?; + tokio::fs::remove_dir(&tmp_mount).await?; + } + let reboot = repair.fsck(&blockdev_path).await?; if !guid.ends_with("_UNENC") { @@ -342,3 +373,99 @@ pub async fn mount_all_fs>( reboot |= mount_fs(guid, &datadir, "package-data", repair, password).await?; Ok(reboot) } + +/// Temporarily activates a VG and opens LUKS to probe the `package-data` +/// filesystem type. Returns `None` if probing fails (e.g. LV doesn't exist). +#[instrument(skip_all)] +pub async fn probe_package_data_fs(guid: &str) -> Result, Error> { + // Import and activate the VG + match Command::new("vgimport") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + { + Ok(_) => {} + Err(e) + if format!("{}", e.source) + .lines() + .any(|l| l.trim() == format!("Volume group \"{}\" is not exported", guid)) => + { + // Already imported, that's fine + } + Err(e) => { + tracing::warn!("Could not import VG {guid} for filesystem probe: {e}"); + return Ok(None); + } + } + if let Err(e) = Command::new("vgchange") + .arg("-ay") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + { + tracing::warn!("Could not activate VG {guid} for filesystem probe: {e}"); + return Ok(None); + } + + let mut opened_luks = false; + let result = async { + let lv_path = Path::new("/dev").join(guid).join("package-data"); + if tokio::fs::metadata(&lv_path).await.is_err() { + return Ok(None); + } + + let blockdev_path = if !guid.ends_with("_UNENC") { + let full_name = format!("{guid}_package-data"); + let password = DEFAULT_PASSWORD; + if let Some(parent) = Path::new(PASSWORD_PATH).parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(PASSWORD_PATH, password) + .await + .with_ctx(|_| (ErrorKind::Filesystem, PASSWORD_PATH))?; + Command::new("cryptsetup") + .arg("-q") + .arg("luksOpen") + .arg("--allow-discards") + .arg(format!("--key-file={PASSWORD_PATH}")) + .arg(format!("--keyfile-size={}", password.len())) + .arg(&lv_path) + .arg(&full_name) + .invoke(ErrorKind::DiskManagement) + .await?; + let _ = tokio::fs::remove_file(PASSWORD_PATH).await; + opened_luks = true; + PathBuf::from(format!("/dev/mapper/{full_name}")) + } else { + lv_path.clone() + }; + + detect_filesystem(&blockdev_path).await.map(Some) + } + .await; + + // Always clean up: close LUKS, deactivate VG, export VG + if opened_luks { + let full_name = format!("{guid}_package-data"); + Command::new("cryptsetup") + .arg("-q") + .arg("luksClose") + .arg(&full_name) + .invoke(ErrorKind::DiskManagement) + .await + .log_err(); + } + Command::new("vgchange") + .arg("-an") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + .log_err(); + Command::new("vgexport") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + .log_err(); + + result +} diff --git a/core/src/disk/util.rs b/core/src/disk/util.rs index 9cf2b6882..fdff3a966 100644 --- a/core/src/disk/util.rs +++ b/core/src/disk/util.rs @@ -41,6 +41,7 @@ pub struct DiskInfo { pub partitions: Vec, pub capacity: u64, pub guid: Option, + pub filesystem: Option, } #[derive(Clone, Debug, Deserialize, Serialize, ts_rs::TS)] @@ -55,6 +56,7 @@ pub struct PartitionInfo { pub used: Option, pub start_os: BTreeMap, pub guid: Option, + pub filesystem: Option, } #[derive(Clone, Debug, Default, Deserialize, Serialize, ts_rs::TS)] @@ -374,6 +376,15 @@ pub async fn list(os: &OsPartitionInfo) -> Result, Error> { disk_info.capacity = part_info.capacity; if let Some(g) = disk_guids.get(&disk_info.logicalname) { disk_info.guid = g.clone(); + if let Some(guid) = g { + disk_info.filesystem = + crate::disk::main::probe_package_data_fs(guid) + .await + .unwrap_or_else(|e| { + tracing::warn!("Failed to probe filesystem for {guid}: {e}"); + None + }); + } } else { disk_info.partitions = vec![part_info]; } @@ -384,11 +395,31 @@ pub async fn list(os: &OsPartitionInfo) -> Result, Error> { disk_info.partitions = Vec::with_capacity(index.parts.len()); if let Some(g) = disk_guids.get(&disk_info.logicalname) { disk_info.guid = g.clone(); + if let Some(guid) = g { + disk_info.filesystem = + crate::disk::main::probe_package_data_fs(guid) + .await + .unwrap_or_else(|e| { + tracing::warn!("Failed to probe filesystem for {guid}: {e}"); + None + }); + } } else { for part in index.parts { let mut part_info = part_info(part).await; if let Some(g) = disk_guids.get(&part_info.logicalname) { part_info.guid = g.clone(); + if let Some(guid) = g { + part_info.filesystem = + crate::disk::main::probe_package_data_fs(guid) + .await + .unwrap_or_else(|e| { + tracing::warn!( + "Failed to probe filesystem for {guid}: {e}" + ); + None + }); + } } disk_info.partitions.push(part_info); } @@ -461,6 +492,7 @@ async fn disk_info(disk: PathBuf) -> DiskInfo { partitions: Vec::new(), capacity, guid: None, + filesystem: None, } } @@ -544,6 +576,7 @@ async fn part_info(part: PathBuf) -> PartitionInfo { used, start_os, guid: None, + filesystem: None, } } diff --git a/core/src/init.rs b/core/src/init.rs index fe5f334f2..ce489c0e9 100644 --- a/core/src/init.rs +++ b/core/src/init.rs @@ -174,7 +174,9 @@ pub async fn init( local_auth.complete(); // Re-enroll MOK on every boot if Secure Boot key exists but isn't enrolled yet - if let Err(e) = crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await { + if let Err(e) = + crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await + { tracing::warn!("MOK enrollment failed: {e}"); } @@ -369,7 +371,7 @@ pub async fn init( enable_zram.complete(); update_server_info.start(); - sync_kiosk(server_info.as_kiosk().de()?).await?; + sync_kiosk(server_info.as_kiosk().de()?.unwrap_or(false)).await?; let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024; let devices = lshw().await?; let status_info = ServerStatus { diff --git a/core/src/net/net_controller.rs b/core/src/net/net_controller.rs index 529b8824a..c48c7c9c6 100644 --- a/core/src/net/net_controller.rs +++ b/core/src/net/net_controller.rs @@ -820,7 +820,6 @@ impl NetService { break; } } - self.shutdown = true; Ok(()) } @@ -832,6 +831,7 @@ impl NetService { impl Drop for NetService { fn drop(&mut self) { if !self.shutdown { + self.shutdown = true; let svc = std::mem::replace(self, Self::dummy()); tokio::spawn(async move { svc.remove_all().await.log_err() }); } diff --git a/core/src/net/service_interface.rs b/core/src/net/service_interface.rs index 7c4b294aa..6fc2aa52e 100644 --- a/core/src/net/service_interface.rs +++ b/core/src/net/service_interface.rs @@ -145,9 +145,10 @@ pub struct GatewayInfo { pub public: bool, } -#[derive(Clone, Debug, Deserialize, Serialize, TS)] -#[ts(export)] +#[derive(Clone, Debug, Deserialize, Serialize, HasModel, TS)] #[serde(rename_all = "camelCase")] +#[model = "Model"] +#[ts(export)] pub struct ServiceInterface { pub id: ServiceInterfaceId, pub name: String, diff --git a/core/src/net/ssl.rs b/core/src/net/ssl.rs index 3b8e69c8e..e2dfc6eea 100644 --- a/core/src/net/ssl.rs +++ b/core/src/net/ssl.rs @@ -188,7 +188,7 @@ lazy_static::lazy_static! { } fn asn1_time_to_system_time(time: &Asn1TimeRef) -> Result { - let diff = time.diff(&**ASN1_UNIX_EPOCH)?; + let diff = ASN1_UNIX_EPOCH.diff(time)?; let mut res = UNIX_EPOCH; if diff.days >= 0 { res += Duration::from_secs(diff.days as u64 * 86400); diff --git a/core/src/net/web_server.rs b/core/src/net/web_server.rs index 8ffe9deaa..eed7cf37c 100644 --- a/core/src/net/web_server.rs +++ b/core/src/net/web_server.rs @@ -509,7 +509,7 @@ where drop(queue_cell.replace(None)); if !runner.is_empty() { - tokio::time::timeout(Duration::from_secs(60), runner) + tokio::time::timeout(Duration::from_millis(100), runner) .await .log_err(); } diff --git a/core/src/registry/context.rs b/core/src/registry/context.rs index c9773d14b..2aa5739f2 100644 --- a/core/src/registry/context.rs +++ b/core/src/registry/context.rs @@ -141,7 +141,7 @@ impl RegistryContext { listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN), db, datadir, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), client: Client::builder() .proxy(Proxy::custom(move |url| { if url.host_str().map_or(false, |h| h.ends_with(".onion")) { diff --git a/core/src/rpc_continuations.rs b/core/src/rpc_continuations.rs index 42c3ae858..e084264ab 100644 --- a/core/src/rpc_continuations.rs +++ b/core/src/rpc_continuations.rs @@ -17,6 +17,7 @@ use ts_rs::TS; #[allow(unused_imports)] use crate::prelude::*; +use crate::shutdown::Shutdown; use crate::util::future::TimedResource; use crate::util::net::WebSocket; use crate::util::{FromStrParser, new_guid}; @@ -98,12 +99,15 @@ pub type RestHandler = Box RestFuture + Send>; pub struct WebSocketFuture { kill: Option>, + shutdown: Option>>, fut: BoxFuture<'static, ()>, } impl Future for WebSocketFuture { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.kill.as_ref().map_or(false, |k| !k.is_empty()) { + if self.kill.as_ref().map_or(false, |k| !k.is_empty()) + || self.shutdown.as_ref().map_or(false, |s| !s.is_empty()) + { Poll::Ready(()) } else { self.fut.poll_unpin(cx) @@ -138,6 +142,7 @@ impl RpcContinuation { RpcContinuation::WebSocket(TimedResource::new( Box::new(|ws| WebSocketFuture { kill: None, + shutdown: None, fut: handler(ws.into()).boxed(), }), timeout, @@ -170,6 +175,7 @@ impl RpcContinuation { RpcContinuation::WebSocket(TimedResource::new( Box::new(|ws| WebSocketFuture { kill, + shutdown: None, fut: handler(ws.into()).boxed(), }), timeout, @@ -183,15 +189,21 @@ impl RpcContinuation { } } -pub struct RpcContinuations(AsyncMutex>); +pub struct RpcContinuations { + continuations: AsyncMutex>, + shutdown: Option>>, +} impl RpcContinuations { - pub fn new() -> Self { - RpcContinuations(AsyncMutex::new(BTreeMap::new())) + pub fn new(shutdown: Option>>) -> Self { + RpcContinuations { + continuations: AsyncMutex::new(BTreeMap::new()), + shutdown, + } } #[instrument(skip_all)] pub async fn clean(&self) { - let mut continuations = self.0.lock().await; + let mut continuations = self.continuations.lock().await; let mut to_remove = Vec::new(); for (guid, cont) in &*continuations { if cont.is_timed_out() { @@ -206,23 +218,28 @@ impl RpcContinuations { #[instrument(skip_all)] pub async fn add(&self, guid: Guid, handler: RpcContinuation) { self.clean().await; - self.0.lock().await.insert(guid, handler); + self.continuations.lock().await.insert(guid, handler); } pub async fn get_ws_handler(&self, guid: &Guid) -> Option { - let mut continuations = self.0.lock().await; + let mut continuations = self.continuations.lock().await; if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) { return None; } let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else { return None; }; - x.get().await + let handler = x.get().await?; + let shutdown = self.shutdown.as_ref().map(|s| s.subscribe()); + Some(Box::new(move |ws| { + let mut fut = handler(ws); + fut.shutdown = shutdown; + fut + })) } pub async fn get_rest_handler(&self, guid: &Guid) -> Option { - let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap> = - self.0.lock().await; + let mut continuations = self.continuations.lock().await; if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) { return None; } diff --git a/core/src/service/effects/callbacks.rs b/core/src/service/effects/callbacks.rs index b50c526f8..a3acc5b18 100644 --- a/core/src/service/effects/callbacks.rs +++ b/core/src/service/effects/callbacks.rs @@ -12,18 +12,20 @@ use serde::{Deserialize, Serialize}; use tracing::warn; use ts_rs::TS; +use crate::db::model::package::PackageState; use crate::db::model::public::NetworkInterfaceInfo; use crate::net::host::Host; +use crate::net::service_interface::ServiceInterface; use crate::net::ssl::FullchainCertData; use crate::prelude::*; use crate::service::effects::context::EffectContext; use crate::service::effects::net::ssl::Algorithm; use crate::service::rpc::{CallbackHandle, CallbackId}; use crate::service::{Service, ServiceActorSeed}; +use crate::status::StatusInfo; use crate::util::collections::EqMap; use crate::util::future::NonDetachingJoinHandle; use crate::util::sync::SyncMutex; -use crate::status::StatusInfo; use crate::{GatewayId, HostId, PackageId, ServiceInterfaceId}; /// Abstraction for callbacks that are triggered by patchdb subscriptions. @@ -66,9 +68,7 @@ impl DbWatchedCallbacks { .map(|(_, handlers)| CallbackHandlers(handlers)) .filter(|cb| !cb.0.is_empty()) }) { - let value = watch - .peek_and_mark_seen() - .unwrap_or_default(); + let value = watch.peek_and_mark_seen().unwrap_or_default(); if let Err(e) = cbs.call(vector![value]).await { tracing::error!("Error in {label} callback: {e}"); tracing::debug!("{e:?}"); @@ -99,6 +99,10 @@ pub struct ServiceCallbacks { inner: SyncMutex, get_host_info: Arc>, get_status: Arc>, + get_service_interface: Arc>, + list_service_interfaces: Arc>, + get_system_smtp: Arc>, + get_service_manifest: Arc>, } impl Default for ServiceCallbacks { @@ -107,21 +111,21 @@ impl Default for ServiceCallbacks { inner: SyncMutex::new(ServiceCallbackMap::default()), get_host_info: Arc::new(DbWatchedCallbacks::new("host info")), get_status: Arc::new(DbWatchedCallbacks::new("get_status")), + get_service_interface: Arc::new(DbWatchedCallbacks::new("get_service_interface")), + list_service_interfaces: Arc::new(DbWatchedCallbacks::new("list_service_interfaces")), + get_system_smtp: Arc::new(DbWatchedCallbacks::new("get_system_smtp")), + get_service_manifest: Arc::new(DbWatchedCallbacks::new("get_service_manifest")), } } } #[derive(Default)] struct ServiceCallbackMap { - get_service_interface: BTreeMap<(PackageId, ServiceInterfaceId), Vec>, - list_service_interfaces: BTreeMap>, - get_system_smtp: Vec, get_ssl_certificate: EqMap< (BTreeSet, FullchainCertData, Algorithm), (NonDetachingJoinHandle<()>, Vec), >, get_container_ip: BTreeMap>, - get_service_manifest: BTreeMap>, get_outbound_gateway: BTreeMap, Vec)>, } @@ -132,24 +136,10 @@ impl ServiceCallbacks { pub fn gc(&self) { self.mutate(|this| { - this.get_service_interface.retain(|_, v| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.list_service_interfaces.retain(|_, v| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.get_system_smtp - .retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); this.get_ssl_certificate.retain(|_, (_, v)| { v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); !v.is_empty() }); - this.get_service_manifest.retain(|_, v| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); this.get_outbound_gateway.retain(|_, (_, v)| { v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); !v.is_empty() @@ -157,70 +147,38 @@ impl ServiceCallbacks { }); self.get_host_info.gc(); self.get_status.gc(); + self.get_service_interface.gc(); + self.list_service_interfaces.gc(); + self.get_system_smtp.gc(); + self.get_service_manifest.gc(); } pub(super) fn add_get_service_interface( &self, package_id: PackageId, service_interface_id: ServiceInterfaceId, + watch: TypedDbWatch, handler: CallbackHandler, ) { - self.mutate(|this| { - this.get_service_interface - .entry((package_id, service_interface_id)) - .or_default() - .push(handler); - }) + self.get_service_interface + .add((package_id, service_interface_id), watch, handler); } - #[must_use] - pub fn get_service_interface( - &self, - id: &(PackageId, ServiceInterfaceId), - ) -> Option { - self.mutate(|this| { - Some(CallbackHandlers( - this.get_service_interface.remove(id).unwrap_or_default(), - )) - .filter(|cb| !cb.0.is_empty()) - }) - } - - pub(super) fn add_list_service_interfaces( + pub(super) fn add_list_service_interfaces( &self, package_id: PackageId, + watch: TypedDbWatch, handler: CallbackHandler, ) { - self.mutate(|this| { - this.list_service_interfaces - .entry(package_id) - .or_default() - .push(handler); - }) + self.list_service_interfaces.add(package_id, watch, handler); } - #[must_use] - pub fn list_service_interfaces(&self, id: &PackageId) -> Option { - self.mutate(|this| { - Some(CallbackHandlers( - this.list_service_interfaces.remove(id).unwrap_or_default(), - )) - .filter(|cb| !cb.0.is_empty()) - }) - } - - pub(super) fn add_get_system_smtp(&self, handler: CallbackHandler) { - self.mutate(|this| { - this.get_system_smtp.push(handler); - }) - } - - #[must_use] - pub fn get_system_smtp(&self) -> Option { - self.mutate(|this| { - Some(CallbackHandlers(std::mem::take(&mut this.get_system_smtp))) - .filter(|cb| !cb.0.is_empty()) - }) + pub(super) fn add_get_system_smtp( + &self, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.get_system_smtp.add((), watch, handler); } pub(super) fn add_get_host_info( @@ -376,23 +334,13 @@ impl ServiceCallbacks { }) } - pub(super) fn add_get_service_manifest(&self, package_id: PackageId, handler: CallbackHandler) { - self.mutate(|this| { - this.get_service_manifest - .entry(package_id) - .or_default() - .push(handler) - }) - } - - #[must_use] - pub fn get_service_manifest(&self, package_id: &PackageId) -> Option { - self.mutate(|this| { - this.get_service_manifest - .remove(package_id) - .map(CallbackHandlers) - .filter(|cb| !cb.0.is_empty()) - }) + pub(super) fn add_get_service_manifest( + &self, + package_id: PackageId, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.get_service_manifest.add(package_id, watch, handler); } } diff --git a/core/src/service/effects/dependency.rs b/core/src/service/effects/dependency.rs index 7cf233452..cf50c8674 100644 --- a/core/src/service/effects/dependency.rs +++ b/core/src/service/effects/dependency.rs @@ -399,27 +399,38 @@ pub async fn get_service_manifest( callback, }: GetServiceManifestParams, ) -> Result { + use crate::db::model::package::PackageState; + let context = context.deref()?; + let ptr = format!("/public/packageData/{}/stateInfo", package_id) + .parse() + .expect("valid json pointer"); + let mut watch = context + .seed + .ctx + .db + .watch(ptr) + .await + .typed::(); + + let manifest = watch + .peek_and_mark_seen()? + .as_manifest(ManifestPreference::Old) + .de()?; + if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context .seed .ctx .callbacks - .add_get_service_manifest(package_id.clone(), CallbackHandler::new(&context, callback)); + .add_get_service_manifest( + package_id.clone(), + watch, + CallbackHandler::new(&context, callback), + ); } - let db = context.seed.ctx.db.peek().await; - - let manifest = db - .as_public() - .as_package_data() - .as_idx(&package_id) - .or_not_found(&package_id)? - .as_state_info() - .as_manifest(ManifestPreference::New) - .de()?; - Ok(manifest) } diff --git a/core/src/service/effects/net/interface.rs b/core/src/service/effects/net/interface.rs index ff0452976..0716efffc 100644 --- a/core/src/service/effects/net/interface.rs +++ b/core/src/service/effects/net/interface.rs @@ -1,7 +1,5 @@ use std::collections::BTreeMap; -use imbl::vector; - use crate::net::service_interface::{AddressInfo, ServiceInterface, ServiceInterfaceType}; use crate::service::effects::callbacks::CallbackHandler; use crate::service::effects::prelude::*; @@ -42,7 +40,7 @@ pub async fn export_service_interface( interface_type: r#type, }; - let res = context + context .seed .ctx .db @@ -56,27 +54,8 @@ pub async fn export_service_interface( ifaces.insert(&id, &service_interface)?; Ok(()) }) - .await; - res.result?; - - if res.revision.is_some() { - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .get_service_interface(&(package_id.clone(), id)) - { - callbacks.call(vector![]).await?; - } - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .list_service_interfaces(&package_id) - { - callbacks.call(vector![]).await?; - } - } + .await + .result?; Ok(()) } @@ -101,26 +80,34 @@ pub async fn get_service_interface( ) -> Result, Error> { let context = context.deref()?; let package_id = package_id.unwrap_or_else(|| context.seed.id.clone()); - let db = context.seed.ctx.db.peek().await; + + let ptr = format!( + "/public/packageData/{}/serviceInterfaces/{}", + package_id, service_interface_id + ) + .parse() + .expect("valid json pointer"); + let mut watch = context + .seed + .ctx + .db + .watch(ptr) + .await + .typed::(); + + let res = watch.peek_and_mark_seen()?.de().ok(); if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context.seed.ctx.callbacks.add_get_service_interface( package_id.clone(), service_interface_id.clone(), + watch, CallbackHandler::new(&context, callback), ); } - let interface = db - .as_public() - .as_package_data() - .as_idx(&package_id) - .and_then(|m| m.as_service_interfaces().as_idx(&service_interface_id)) - .map(|m| m.de()) - .transpose()?; - - Ok(interface) + Ok(res) } #[derive(Debug, Clone, Serialize, Deserialize, TS)] @@ -142,27 +129,23 @@ pub async fn list_service_interfaces( let context = context.deref()?; let package_id = package_id.unwrap_or_else(|| context.seed.id.clone()); + let ptr = format!("/public/packageData/{}/serviceInterfaces", package_id) + .parse() + .expect("valid json pointer"); + let mut watch = context.seed.ctx.db.watch(ptr).await; + + let res = imbl_value::from_value(watch.peek_and_mark_seen()?) + .unwrap_or_default(); + if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context.seed.ctx.callbacks.add_list_service_interfaces( package_id.clone(), + watch.typed::>(), CallbackHandler::new(&context, callback), ); } - let res = context - .seed - .ctx - .db - .peek() - .await - .into_public() - .into_package_data() - .into_idx(&package_id) - .map(|m| m.into_service_interfaces().de()) - .transpose()? - .unwrap_or_default(); - Ok(res) } @@ -180,52 +163,22 @@ pub async fn clear_service_interfaces( let context = context.deref()?; let package_id = context.seed.id.clone(); - let res = context + context .seed .ctx .db .mutate(|db| { - let mut removed = Vec::new(); db.as_public_mut() .as_package_data_mut() .as_idx_mut(&package_id) .or_not_found(&package_id)? .as_service_interfaces_mut() .mutate(|s| { - Ok(s.retain(|id, _| { - if except.contains(id) { - true - } else { - removed.push(id.clone()); - false - } - })) - })?; - Ok(removed) + Ok(s.retain(|id, _| except.contains(id))) + }) }) - .await; - let removed = res.result?; - - if res.revision.is_some() { - for id in removed { - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .get_service_interface(&(package_id.clone(), id)) - { - callbacks.call(vector![]).await?; - } - } - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .list_service_interfaces(&package_id) - { - callbacks.call(vector![]).await?; - } - } + .await + .result?; Ok(()) } diff --git a/core/src/service/effects/system.rs b/core/src/service/effects/system.rs index abf6f36ad..e6a9afcba 100644 --- a/core/src/service/effects/system.rs +++ b/core/src/service/effects/system.rs @@ -16,25 +16,25 @@ pub async fn get_system_smtp( ) -> Result, Error> { let context = context.deref()?; + let ptr = "/public/serverInfo/smtp" + .parse() + .expect("valid json pointer"); + let mut watch = context.seed.ctx.db.watch(ptr).await; + + let res = imbl_value::from_value(watch.peek_and_mark_seen()?) + .with_kind(ErrorKind::Deserialization)?; + if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context .seed .ctx .callbacks - .add_get_system_smtp(CallbackHandler::new(&context, callback)); + .add_get_system_smtp( + watch.typed::>(), + CallbackHandler::new(&context, callback), + ); } - let res = context - .seed - .ctx - .db - .peek() - .await - .into_public() - .into_server_info() - .into_smtp() - .de()?; - Ok(res) } diff --git a/core/src/service/mod.rs b/core/src/service/mod.rs index aa1b396a4..5ba038651 100644 --- a/core/src/service/mod.rs +++ b/core/src/service/mod.rs @@ -432,11 +432,15 @@ impl Service { tracing::error!("Error installing service: {e}"); tracing::debug!("{e:?}") }) { + crate::volume::remove_install_backup(id).await.log_err(); return Ok(Some(service)); } } } cleanup(ctx, id, false).await.log_err(); + crate::volume::restore_volumes_from_install_backup(id) + .await + .log_err(); ctx.db .mutate(|v| v.as_public_mut().as_package_data_mut().remove(id)) .await @@ -471,37 +475,60 @@ impl Service { tracing::error!("Error installing service: {e}"); tracing::debug!("{e:?}") }) { + crate::volume::remove_install_backup(id).await.log_err(); return Ok(Some(service)); } } } - let s9pk = S9pk::open(s9pk_path, Some(id)).await?; - ctx.db - .mutate({ - |db| { - db.as_public_mut() - .as_package_data_mut() - .as_idx_mut(id) - .or_not_found(id)? - .as_state_info_mut() - .map_mutate(|s| { - if let PackageState::Updating(UpdatingState { - manifest, .. - }) = s - { - Ok(PackageState::Installed(InstalledState { manifest })) - } else { - Err(Error::new( - eyre!("{}", t!("service.mod.race-condition-detected")), - ErrorKind::Database, - )) - } - }) - } - }) - .await - .result?; - handle_installed(s9pk).await + match async { + let s9pk = S9pk::open(s9pk_path, Some(id)).await?; + ctx.db + .mutate({ + |db| { + db.as_public_mut() + .as_package_data_mut() + .as_idx_mut(id) + .or_not_found(id)? + .as_state_info_mut() + .map_mutate(|s| { + if let PackageState::Updating(UpdatingState { + manifest, + .. + }) = s + { + Ok(PackageState::Installed(InstalledState { manifest })) + } else { + Err(Error::new( + eyre!( + "{}", + t!("service.mod.race-condition-detected") + ), + ErrorKind::Database, + )) + } + }) + } + }) + .await + .result?; + handle_installed(s9pk).await + } + .await + { + Ok(service) => { + crate::volume::remove_install_backup(id).await.log_err(); + Ok(service) + } + Err(e) => { + tracing::error!( + "Update rollback failed for {id}, restoring volume snapshot: {e}" + ); + crate::volume::restore_volumes_from_install_backup(id) + .await + .log_err(); + Err(e) + } + } } PackageStateMatchModelRef::Removing(_) | PackageStateMatchModelRef::Restoring(_) => { if let Ok(s9pk) = S9pk::open(s9pk_path, Some(id)).await.map_err(|e| { @@ -650,17 +677,6 @@ impl Service { tokio::task::yield_now().await; } - // Trigger manifest callbacks after successful installation - let manifest = service.seed.persistent_container.s9pk.as_manifest(); - if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) { - let manifest_value = - serde_json::to_value(manifest).with_kind(ErrorKind::Serialization)?; - callbacks - .call(imbl::vector![manifest_value.into()]) - .await - .log_err(); - } - Ok(service) } diff --git a/core/src/service/service_map.rs b/core/src/service/service_map.rs index 5668b4094..7c03caa0b 100644 --- a/core/src/service/service_map.rs +++ b/core/src/service/service_map.rs @@ -307,6 +307,8 @@ impl ServiceMap { finalization_progress.start(); let s9pk = S9pk::open(&installed_path, Some(&id)).await?; let data_version = get_data_version(&id).await?; + // Snapshot existing volumes before install/update modifies them + crate::volume::snapshot_volumes_for_install(&id).await?; let prev = if let Some(service) = service.take() { ensure_code!( recovery_source.is_none(), @@ -382,6 +384,8 @@ impl ServiceMap { cleanup.await?; } + crate::volume::remove_install_backup(&id).await.log_err(); + drop(service); sync_progress_task.await.map_err(|_| { diff --git a/core/src/service/uninstall.rs b/core/src/service/uninstall.rs index ec8b92468..8f4bd8ad1 100644 --- a/core/src/service/uninstall.rs +++ b/core/src/service/uninstall.rs @@ -1,8 +1,6 @@ use std::collections::BTreeSet; use std::path::Path; -use imbl::vector; - use crate::context::RpcContext; use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState}; use crate::net::host::all_hosts; @@ -94,11 +92,6 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(), )); } }; - // Trigger manifest callbacks with null to indicate uninstall - if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) { - callbacks.call(vector![Value::Null]).await.log_err(); - } - if !soft { let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id); crate::util::io::delete_dir(&path).await?; diff --git a/core/src/setup.rs b/core/src/setup.rs index 1b33268b3..e6aa68212 100644 --- a/core/src/setup.rs +++ b/core/src/setup.rs @@ -115,7 +115,7 @@ pub async fn list_disks(ctx: SetupContext) -> Result, Error> { async fn setup_init( ctx: &SetupContext, password: Option, - kiosk: Option, + kiosk: bool, hostname: Option, init_phases: InitPhases, ) -> Result<(AccountInfo, InitResult), Error> { @@ -137,9 +137,8 @@ async fn setup_init( account.save(m)?; let info = m.as_public_mut().as_server_info_mut(); info.as_password_hash_mut().ser(&account.password)?; - if let Some(kiosk) = kiosk { - info.as_kiosk_mut().ser(&Some(kiosk))?; - } + info.as_kiosk_mut() + .ser(&Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"))?; if let Some(language) = language.clone() { info.as_language_mut().ser(&Some(language))?; } @@ -174,8 +173,7 @@ async fn setup_init( pub struct AttachParams { pub password: Option, pub guid: InternedString, - #[ts(optional)] - pub kiosk: Option, + pub kiosk: bool, } #[instrument(skip_all)] @@ -411,8 +409,7 @@ pub struct SetupExecuteParams { guid: InternedString, password: Option, recovery_source: Option>, - #[ts(optional)] - kiosk: Option, + kiosk: bool, name: Option, hostname: Option, } @@ -549,7 +546,7 @@ pub async fn execute_inner( guid: InternedString, password: Option, recovery_source: Option>, - kiosk: Option, + kiosk: bool, hostname: Option, ) -> Result<(SetupResult, RpcContext), Error> { let progress = &ctx.progress; @@ -622,7 +619,7 @@ async fn fresh_setup( ctx: &SetupContext, guid: InternedString, password: &str, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, @@ -633,7 +630,6 @@ async fn fresh_setup( let account = AccountInfo::new(password, root_ca_start_time().await, hostname)?; let db = ctx.db().await?; - let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi"); sync_kiosk(kiosk).await?; let language = ctx.language.peek(|a| a.clone()); @@ -684,7 +680,7 @@ async fn recover( recovery_source: BackupTargetFS, server_id: String, recovery_password: String, - kiosk: Option, + kiosk: bool, hostname: Option, progress: SetupExecuteProgress, ) -> Result<(SetupResult, RpcContext), Error> { @@ -709,7 +705,7 @@ async fn migrate( guid: InternedString, old_guid: &str, password: Option, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, diff --git a/core/src/system/mod.rs b/core/src/system/mod.rs index 9caf9687b..4bb404e8b 100644 --- a/core/src/system/mod.rs +++ b/core/src/system/mod.rs @@ -6,7 +6,6 @@ use chrono::Utc; use clap::Parser; use color_eyre::eyre::eyre; use futures::FutureExt; -use imbl::vector; use imbl_value::InternedString; use rpc_toolkit::{Context, Empty, HandlerExt, ParentHandler, from_fn_async}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -319,13 +318,11 @@ pub fn kernel_logs>() -> ParentHandler) -> Result<(), Error> { - if let Some(kiosk) = kiosk { - if kiosk { - enable_kiosk().await?; - } else { - disable_kiosk().await?; - } +pub async fn sync_kiosk(kiosk: bool) -> Result<(), Error> { + if kiosk { + enable_kiosk().await?; + } else { + disable_kiosk().await?; } Ok(()) } @@ -1150,9 +1147,6 @@ pub async fn set_system_smtp(ctx: RpcContext, smtp: SmtpValue) -> Result<(), Err }) .await .result?; - if let Some(callbacks) = ctx.callbacks.get_system_smtp() { - callbacks.call(vector![to_value(&smtp)?]).await?; - } Ok(()) } pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> { @@ -1165,9 +1159,6 @@ pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> { }) .await .result?; - if let Some(callbacks) = ctx.callbacks.get_system_smtp() { - callbacks.call(vector![Value::Null]).await?; - } Ok(()) } diff --git a/core/src/tunnel/context.rs b/core/src/tunnel/context.rs index 1cb23c49e..0d6ab5df8 100644 --- a/core/src/tunnel/context.rs +++ b/core/src/tunnel/context.rs @@ -201,7 +201,7 @@ impl TunnelContext { listen, db, datadir, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), open_authed_continuations: OpenAuthedContinuations::new(), ephemeral_sessions: SyncMutex::new(Sessions::new()), net_iface, diff --git a/core/src/upload.rs b/core/src/upload.rs index 5812834da..96f82b812 100644 --- a/core/src/upload.rs +++ b/core/src/upload.rs @@ -13,7 +13,6 @@ use futures::{FutureExt, Stream, StreamExt, ready}; use http::header::CONTENT_LENGTH; use http::{HeaderMap, StatusCode}; use imbl_value::InternedString; -use tokio::fs::File; use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt}; use tokio::sync::watch; @@ -23,6 +22,7 @@ use crate::progress::{PhaseProgressTrackerHandle, ProgressUnits}; use crate::rpc_continuations::{Guid, RpcContinuation}; use crate::s9pk::merkle_archive::source::ArchiveSource; use crate::s9pk::merkle_archive::source::multi_cursor_file::{FileCursor, MultiCursorFile}; +use crate::util::direct_io::DirectIoFile; use crate::util::io::{TmpDir, create_file}; pub async fn upload( @@ -69,16 +69,6 @@ impl Progress { false } } - fn handle_write(&mut self, res: &std::io::Result) -> bool { - match res { - Ok(a) => { - self.written += *a as u64; - self.tracker += *a as u64; - true - } - Err(e) => self.handle_error(e), - } - } async fn expected_size(watch: &mut watch::Receiver) -> Option { watch .wait_for(|progress| progress.error.is_some() || progress.expected_size.is_some()) @@ -192,16 +182,19 @@ impl UploadingFile { complete: false, }); let file = create_file(path).await?; + let multi_cursor = MultiCursorFile::open(&file).await?; + let direct_file = DirectIoFile::from_tokio_file(file).await?; let uploading = Self { tmp_dir: None, - file: MultiCursorFile::open(&file).await?, + file: multi_cursor, progress: progress.1, }; Ok(( UploadHandle { tmp_dir: None, - file, + file: direct_file, progress: progress.0, + last_synced: 0, }, uploading, )) @@ -346,8 +339,9 @@ impl AsyncSeek for UploadingFileReader { pub struct UploadHandle { tmp_dir: Option>, #[pin] - file: File, + file: DirectIoFile, progress: watch::Sender, + last_synced: u64, } impl UploadHandle { pub async fn upload(&mut self, request: Request) { @@ -394,6 +388,19 @@ impl UploadHandle { if let Err(e) = self.file.sync_all().await { self.progress.send_if_modified(|p| p.handle_error(&e)); } + // Update progress with final synced bytes + self.update_sync_progress(); + } + fn update_sync_progress(&mut self) { + let synced = self.file.bytes_synced(); + let delta = synced - self.last_synced; + if delta > 0 { + self.last_synced = synced; + self.progress.send_modify(|p| { + p.written += delta; + p.tracker += delta; + }); + } } } #[pin_project::pinned_drop] @@ -410,13 +417,23 @@ impl AsyncWrite for UploadHandle { buf: &[u8], ) -> Poll> { let this = self.project(); + // Update progress based on bytes actually flushed to disk + let synced = this.file.bytes_synced(); + let delta = synced - *this.last_synced; + if delta > 0 { + *this.last_synced = synced; + this.progress.send_modify(|p| { + p.written += delta; + p.tracker += delta; + }); + } match this.file.poll_write(cx, buf) { - Poll::Ready(res) => { + Poll::Ready(Err(e)) => { this.progress - .send_if_modified(|progress| progress.handle_write(&res)); - Poll::Ready(res) + .send_if_modified(|progress| progress.handle_error(&e)); + Poll::Ready(Err(e)) } - Poll::Pending => Poll::Pending, + a => a, } } fn poll_flush( diff --git a/core/src/util/direct_io.rs b/core/src/util/direct_io.rs new file mode 100644 index 000000000..67617c22c --- /dev/null +++ b/core/src/util/direct_io.rs @@ -0,0 +1,292 @@ +use std::alloc::Layout; +use std::io::Write; +use std::os::fd::AsRawFd; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use tokio::io::AsyncWrite; +use tokio::task::JoinHandle; + +const BLOCK_SIZE: usize = 4096; +const BUF_CAP: usize = 256 * 1024; // 256KB + +/// Aligned buffer for O_DIRECT I/O. +struct AlignedBuf { + ptr: *mut u8, + len: usize, +} + +// SAFETY: We have exclusive ownership of the allocation. +unsafe impl Send for AlignedBuf {} + +impl AlignedBuf { + fn new() -> Self { + let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap(); + // SAFETY: layout has non-zero size + let ptr = unsafe { std::alloc::alloc(layout) }; + if ptr.is_null() { + std::alloc::handle_alloc_error(layout); + } + Self { ptr, len: 0 } + } + + fn as_slice(&self) -> &[u8] { + // SAFETY: ptr is valid for len bytes, properly aligned, exclusively owned + unsafe { std::slice::from_raw_parts(self.ptr, self.len) } + } + + fn push(&mut self, data: &[u8]) -> usize { + let n = data.len().min(BUF_CAP - self.len); + // SAFETY: src and dst don't overlap, both valid for n bytes + unsafe { + std::ptr::copy_nonoverlapping(data.as_ptr(), self.ptr.add(self.len), n); + } + self.len += n; + n + } + + fn aligned_len(&self) -> usize { + self.len & !(BLOCK_SIZE - 1) + } + + fn drain_front(&mut self, n: usize) { + debug_assert!(n <= self.len); + let remaining = self.len - n; + if remaining > 0 { + // SAFETY: regions may overlap, so we use copy (memmove) + unsafe { + std::ptr::copy(self.ptr.add(n), self.ptr, remaining); + } + } + self.len = remaining; + } + + /// Extract aligned data into a new buffer for flushing, leaving remainder. + fn take_aligned(&mut self) -> Option<(AlignedBuf, u64)> { + let aligned = self.aligned_len(); + if aligned == 0 { + return None; + } + let mut flush_buf = AlignedBuf::new(); + flush_buf.push(&self.as_slice()[..aligned]); + self.drain_front(aligned); + Some((flush_buf, aligned as u64)) + } +} + +impl Drop for AlignedBuf { + fn drop(&mut self) { + let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap(); + // SAFETY: ptr was allocated with this layout in new() + unsafe { std::alloc::dealloc(self.ptr, layout) }; + } +} + +enum FileState { + Idle(std::fs::File), + Flushing(JoinHandle>), + Done, +} + +/// A file writer that uses O_DIRECT to bypass the kernel page cache. +/// +/// Buffers writes in an aligned buffer and flushes to disk in the background. +/// New writes can proceed while a flush is in progress (double-buffering). +/// Progress is tracked via [`bytes_synced`](Self::bytes_synced), which reflects +/// bytes actually written to disk. +pub struct DirectIoFile { + file_state: FileState, + buf: AlignedBuf, + synced: u64, +} + +impl DirectIoFile { + fn new(file: std::fs::File) -> Self { + Self { + file_state: FileState::Idle(file), + buf: AlignedBuf::new(), + synced: 0, + } + } + + /// Convert an existing tokio File into a DirectIoFile by adding O_DIRECT. + pub async fn from_tokio_file(file: tokio::fs::File) -> std::io::Result { + let std_file = file.into_std().await; + let fd = std_file.as_raw_fd(); + // SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops + unsafe { + let flags = libc::fcntl(fd, libc::F_GETFL); + if flags == -1 { + return Err(std::io::Error::last_os_error()); + } + #[cfg(target_os = "linux")] + if libc::fcntl(fd, libc::F_SETFL, flags | libc::O_DIRECT) == -1 { + return Err(std::io::Error::last_os_error()); + } + } + Ok(Self::new(std_file)) + } + + /// Number of bytes confirmed written to disk. + pub fn bytes_synced(&self) -> u64 { + self.synced + } + + /// Flush any remaining buffered data and sync to disk. + /// + /// Removes the O_DIRECT flag for the final partial-block write, then + /// calls fsync. Updates `bytes_synced` to the final total. + pub async fn sync_all(&mut self) -> std::io::Result<()> { + // Wait for any in-flight flush + self.await_flush().await?; + + let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else { + return Ok(()); + }; + + let mut buf = std::mem::replace(&mut self.buf, AlignedBuf::new()); + let remaining = buf.len as u64; + + tokio::task::spawn_blocking(move || { + let mut file = file; + + // Write any aligned portion + let aligned = buf.aligned_len(); + if aligned > 0 { + let slice = unsafe { std::slice::from_raw_parts(buf.ptr, aligned) }; + file.write_all(slice)?; + buf.drain_front(aligned); + } + + // Write remainder with O_DIRECT disabled + if buf.len > 0 { + let fd = file.as_raw_fd(); + // SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops + #[cfg(target_os = "linux")] + unsafe { + let flags = libc::fcntl(fd, libc::F_GETFL); + libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_DIRECT); + } + file.write_all(buf.as_slice())?; + } + + file.sync_all() + }) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??; + + self.synced += remaining; + Ok(()) + } + + async fn await_flush(&mut self) -> std::io::Result<()> { + if let FileState::Flushing(handle) = &mut self.file_state { + let (file, flushed) = handle + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??; + self.synced += flushed; + self.file_state = FileState::Idle(file); + } + Ok(()) + } + + /// Non-blocking poll: try to complete a pending flush. + /// Returns Ready(Ok(())) if idle (or just became idle), Pending if still flushing. + fn poll_complete_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + if let FileState::Flushing(handle) = &mut self.file_state { + match Pin::new(handle).poll(cx) { + Poll::Ready(Ok(Ok((file, flushed)))) => { + self.synced += flushed; + self.file_state = FileState::Idle(file); + } + Poll::Ready(Ok(Err(e))) => { + self.file_state = FileState::Done; + return Poll::Ready(Err(e)); + } + Poll::Ready(Err(e)) => { + self.file_state = FileState::Done; + return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::Other, e))); + } + Poll::Pending => return Poll::Pending, + } + } + Poll::Ready(Ok(())) + } + + /// Start a background flush of aligned data if the file is idle. + fn maybe_start_flush(&mut self) { + if !matches!(self.file_state, FileState::Idle(_)) { + return; + } + let Some((flush_buf, count)) = self.buf.take_aligned() else { + return; + }; + let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else { + unreachable!() + }; + let handle = tokio::task::spawn_blocking(move || { + let mut file = file; + file.write_all(flush_buf.as_slice())?; + Ok((file, count)) + }); + self.file_state = FileState::Flushing(handle); + } +} + +impl AsyncWrite for DirectIoFile { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + // Try to complete any pending flush (non-blocking, registers waker) + match self.poll_complete_flush(cx) { + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + _ => {} // Pending is fine — we can still accept writes into the buffer + } + + // If file just became idle and buffer has aligned data, start a flush + // to free buffer space before accepting new data + self.maybe_start_flush(); + + // Accept data into the buffer + let n = self.buf.push(buf); + if n == 0 { + // Buffer full, must wait for flush to complete and free space. + // Waker was already registered by poll_complete_flush above. + return Poll::Pending; + } + + // If file is idle and we now have aligned data, start flushing + self.maybe_start_flush(); + + Poll::Ready(Ok(n)) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.poll_complete_flush(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Ready(Ok(())) => {} + } + + if self.buf.aligned_len() > 0 { + self.maybe_start_flush(); + // Poll the just-started flush + return self.poll_complete_flush(cx).map(|r| r.map(|_| ())); + } + + Poll::Ready(Ok(())) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.poll_complete_flush(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Ready(Ok(())) => {} + } + + self.file_state = FileState::Done; + Poll::Ready(Ok(())) + } +} diff --git a/core/src/util/mod.rs b/core/src/util/mod.rs index 6cdc345a1..e9e98e039 100644 --- a/core/src/util/mod.rs +++ b/core/src/util/mod.rs @@ -38,6 +38,7 @@ pub mod collections; pub mod cpupower; pub mod crypto; pub mod data_url; +pub mod direct_io; pub mod future; pub mod http_reader; pub mod io; diff --git a/core/src/version/mod.rs b/core/src/version/mod.rs index 4c17bc32f..a4585358f 100644 --- a/core/src/version/mod.rs +++ b/core/src/version/mod.rs @@ -60,8 +60,9 @@ mod v0_4_0_alpha_17; mod v0_4_0_alpha_18; mod v0_4_0_alpha_19; mod v0_4_0_alpha_20; +mod v0_4_0_alpha_21; -pub type Current = v0_4_0_alpha_20::Version; // VERSION_BUMP +pub type Current = v0_4_0_alpha_21::Version; // VERSION_BUMP impl Current { #[instrument(skip(self, db))] @@ -189,7 +190,8 @@ enum Version { V0_4_0_alpha_17(Wrapper), V0_4_0_alpha_18(Wrapper), V0_4_0_alpha_19(Wrapper), - V0_4_0_alpha_20(Wrapper), // VERSION_BUMP + V0_4_0_alpha_20(Wrapper), + V0_4_0_alpha_21(Wrapper), // VERSION_BUMP Other(exver::Version), } @@ -252,7 +254,8 @@ impl Version { Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_18(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)), - Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP + Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), + Self::V0_4_0_alpha_21(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP Self::Other(v) => { return Err(Error::new( eyre!("unknown version {v}"), @@ -307,7 +310,8 @@ impl Version { Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_18(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(), - Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), // VERSION_BUMP + Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), + Version::V0_4_0_alpha_21(Wrapper(x)) => x.semver(), // VERSION_BUMP Version::Other(x) => x.clone(), } } diff --git a/core/src/version/v0_3_6_alpha_0.rs b/core/src/version/v0_3_6_alpha_0.rs index fbae2fc2f..e9c623bad 100644 --- a/core/src/version/v0_3_6_alpha_0.rs +++ b/core/src/version/v0_3_6_alpha_0.rs @@ -143,7 +143,8 @@ pub struct Version; impl VersionT for Version { type Previous = v0_3_5_2::Version; - type PreUpRes = (AccountInfo, SshKeys, CifsTargets); + /// (package_id, host_id, expanded_key) + type PreUpRes = (AccountInfo, SshKeys, CifsTargets, Vec<(String, String, [u8; 64])>); fn semver(self) -> exver::Version { V0_3_6_alpha_0.clone() } @@ -158,15 +159,17 @@ impl VersionT for Version { let cifs = previous_cifs(&pg).await?; + let tor_keys = previous_tor_keys(&pg).await?; + Command::new("systemctl") .arg("stop") .arg("postgresql@*.service") .invoke(crate::ErrorKind::Database) .await?; - Ok((account, ssh_keys, cifs)) + Ok((account, ssh_keys, cifs, tor_keys)) } - fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result { + fn up(self, db: &mut Value, (account, ssh_keys, cifs, tor_keys): Self::PreUpRes) -> Result { let prev_package_data = db["package-data"].clone(); let wifi = json!({ @@ -183,6 +186,11 @@ impl VersionT for Version { "shuttingDown": db["server-info"]["status-info"]["shutting-down"], "restarting": db["server-info"]["status-info"]["restarting"], }); + let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?; + let onion_address = tor_address + .replace("https://", "") + .replace("http://", "") + .replace(".onion/", ""); let server_info = { let mut server_info = json!({ "arch": db["server-info"]["arch"], @@ -196,15 +204,9 @@ impl VersionT for Version { }); server_info["postInitMigrationTodos"] = json!({}); - let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?; // Maybe we do this like the Public::init does - server_info["torAddress"] = json!(tor_address); - server_info["onionAddress"] = json!( - tor_address - .replace("https://", "") - .replace("http://", "") - .replace(".onion/", "") - ); + server_info["torAddress"] = json!(&tor_address); + server_info["onionAddress"] = json!(&onion_address); server_info["networkInterfaces"] = json!({}); server_info["statusInfo"] = status_info; server_info["wifi"] = wifi; @@ -233,6 +235,30 @@ impl VersionT for Version { let private = { let mut value = json!({}); value["keyStore"] = crate::dbg!(to_value(&keystore)?); + // Preserve tor onion keys so later migrations (v0_4_0_alpha_20) can + // include them in onion-migration.json for the tor service. + if !tor_keys.is_empty() { + let mut onion_map: Value = json!({}); + let onion_obj = onion_map.as_object_mut().unwrap(); + let mut tor_migration = imbl::Vector::::new(); + for (package_id, host_id, key_bytes) in &tor_keys { + let onion_addr = onion_address_from_key(key_bytes); + let encoded_key = + base64::Engine::encode(&crate::util::serde::BASE64, key_bytes); + onion_obj.insert( + onion_addr.as_str().into(), + Value::String(encoded_key.clone().into()), + ); + tor_migration.push_back(json!({ + "hostname": &onion_addr, + "packageId": package_id, + "hostId": host_id, + "key": &encoded_key, + })); + } + value["keyStore"]["onion"] = onion_map; + value["torMigration"] = Value::Array(tor_migration); + } value["password"] = to_value(&account.password)?; value["compatS9pkKey"] = to_value(&crate::db::model::private::generate_developer_key())?; @@ -498,3 +524,109 @@ async fn previous_ssh_keys(pg: &sqlx::Pool) -> Result`. +/// Server key uses `("STARTOS", "STARTOS")`. +#[tracing::instrument(skip_all)] +async fn previous_tor_keys( + pg: &sqlx::Pool, +) -> Result, Error> { + let mut keys = Vec::new(); + + // Server tor key from the account table. + // Older installs have tor_key (64 bytes). Newer installs (post-NetworkKeys migration) + // made tor_key nullable and use network_key (32 bytes, needs expansion) instead. + let row = sqlx::query(r#"SELECT tor_key, network_key FROM account"#) + .fetch_one(pg) + .await + .with_kind(ErrorKind::Database)?; + if let Ok(tor_key) = row.try_get::, _>("tor_key") { + if let Ok(key) = <[u8; 64]>::try_from(tor_key) { + keys.push(("STARTOS".to_owned(), "STARTOS".to_owned(), key)); + } + } else if let Ok(net_key) = row.try_get::, _>("network_key") { + if let Ok(seed) = <[u8; 32]>::try_from(net_key) { + keys.push(( + "STARTOS".to_owned(), + "STARTOS".to_owned(), + crate::util::crypto::ed25519_expand_key(&seed), + )); + } + } + + // Package tor keys from the network_keys table (32-byte keys that need expansion) + if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM network_keys"#) + .fetch_all(pg) + .await + { + for row in rows { + let Ok(package) = row.try_get::("package") else { + continue; + }; + let Ok(interface) = row.try_get::("interface") else { + continue; + }; + let Ok(key_bytes) = row.try_get::, _>("key") else { + continue; + }; + if let Ok(seed) = <[u8; 32]>::try_from(key_bytes) { + keys.push(( + package, + interface, + crate::util::crypto::ed25519_expand_key(&seed), + )); + } + } + } + + // Package tor keys from the tor table (already 64-byte expanded keys) + if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM tor"#) + .fetch_all(pg) + .await + { + for row in rows { + let Ok(package) = row.try_get::("package") else { + continue; + }; + let Ok(interface) = row.try_get::("interface") else { + continue; + }; + let Ok(key_bytes) = row.try_get::, _>("key") else { + continue; + }; + if let Ok(key) = <[u8; 64]>::try_from(key_bytes) { + keys.push((package, interface, key)); + } + } + } + + Ok(keys) +} + +/// Derive the tor v3 onion address (without .onion suffix) from a 64-byte +/// expanded ed25519 secret key. +fn onion_address_from_key(expanded_key: &[u8; 64]) -> String { + use sha3::Digest; + + // Derive public key from expanded secret key using ed25519-dalek v1 + let esk = + ed25519_dalek_v1::ExpandedSecretKey::from_bytes(expanded_key).expect("invalid tor key"); + let pk = ed25519_dalek_v1::PublicKey::from(&esk); + let pk_bytes = pk.to_bytes(); + + // Compute onion v3 address: base32(pubkey || checksum || version) + // checksum = SHA3-256(".onion checksum" || pubkey || version)[0..2] + let mut hasher = sha3::Sha3_256::new(); + hasher.update(b".onion checksum"); + hasher.update(&pk_bytes); + hasher.update(b"\x03"); + let hash = hasher.finalize(); + + let mut raw = [0u8; 35]; + raw[..32].copy_from_slice(&pk_bytes); + raw[32] = hash[0]; // checksum byte 0 + raw[33] = hash[1]; // checksum byte 1 + raw[34] = 0x03; // version + + base32::encode(base32::Alphabet::Rfc4648 { padding: false }, &raw).to_ascii_lowercase() +} diff --git a/core/src/version/v0_4_0_alpha_20.rs b/core/src/version/v0_4_0_alpha_20.rs index 62b454bb1..01bff2251 100644 --- a/core/src/version/v0_4_0_alpha_20.rs +++ b/core/src/version/v0_4_0_alpha_20.rs @@ -2,11 +2,13 @@ use std::path::Path; use exver::{PreReleaseSegment, VersionRange}; use imbl_value::json; +use reqwest::Url; use super::v0_3_5::V0_3_0_COMPAT; use super::{VersionT, v0_4_0_alpha_19}; use crate::context::RpcContext; use crate::prelude::*; +use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile; lazy_static::lazy_static! { static ref V0_4_0_alpha_20: exver::Version = exver::Version::new( @@ -33,74 +35,106 @@ impl VersionT for Version { } #[instrument(skip_all)] fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result { - // Extract onion migration data before removing it - let onion_store = db + // Use the pre-built torMigration data from v0_3_6_alpha_0 if available. + // This contains all (hostname, packageId, hostId, key) entries with keys + // already resolved, avoiding the issue where packageData is empty during + // migration (packages aren't reinstalled until post_up). + let migration_data = if let Some(tor_migration) = db .get("private") - .and_then(|p| p.get("keyStore")) - .and_then(|k| k.get("onion")) - .cloned() - .unwrap_or(Value::Object(Default::default())); - - let mut addresses = imbl::Vector::::new(); - - // Extract OS host onion addresses - if let Some(onions) = db - .get("public") - .and_then(|p| p.get("serverInfo")) - .and_then(|s| s.get("network")) - .and_then(|n| n.get("host")) - .and_then(|h| h.get("onions")) - .and_then(|o| o.as_array()) + .and_then(|p| p.get("torMigration")) + .and_then(|t| t.as_array()) { - for onion in onions { - if let Some(hostname) = onion.as_str() { - let key = onion_store - .get(hostname) - .and_then(|v| v.as_str()) - .unwrap_or_default(); - addresses.push_back(json!({ - "hostname": hostname, - "packageId": "STARTOS", - "hostId": "STARTOS", - "key": key, - })); + json!({ + "addresses": tor_migration.clone(), + }) + } else { + // Fallback for fresh installs or installs that didn't go through + // v0_3_6_alpha_0 with the torMigration field. + let onion_store = db + .get("private") + .and_then(|p| p.get("keyStore")) + .and_then(|k| k.get("onion")) + .cloned() + .unwrap_or(Value::Object(Default::default())); + + let mut addresses = imbl::Vector::::new(); + + // Extract OS host onion addresses + if let Some(onions) = db + .get("public") + .and_then(|p| p.get("serverInfo")) + .and_then(|s| s.get("network")) + .and_then(|n| n.get("host")) + .and_then(|h| h.get("onions")) + .and_then(|o| o.as_array()) + { + for onion in onions { + if let Some(hostname) = onion.as_str() { + let key = onion_store + .get(hostname) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Error::new( + eyre!("missing tor key for onion address {hostname}"), + ErrorKind::Database, + ) + })?; + addresses.push_back(json!({ + "hostname": hostname, + "packageId": "STARTOS", + "hostId": "startos-ui", + "key": key, + })); + } } } - } - // Extract package host onion addresses - if let Some(packages) = db - .get("public") - .and_then(|p| p.get("packageData")) - .and_then(|p| p.as_object()) - { - for (package_id, package) in packages.iter() { - if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) { - for (host_id, host) in hosts.iter() { - if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) { - for onion in onions { - if let Some(hostname) = onion.as_str() { - let key = onion_store - .get(hostname) - .and_then(|v| v.as_str()) - .unwrap_or_default(); - addresses.push_back(json!({ - "hostname": hostname, - "packageId": &**package_id, - "hostId": &**host_id, - "key": key, - })); + // Extract package host onion addresses + if let Some(packages) = db + .get("public") + .and_then(|p| p.get("packageData")) + .and_then(|p| p.as_object()) + { + for (package_id, package) in packages.iter() { + if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) { + for (host_id, host) in hosts.iter() { + if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) { + for onion in onions { + if let Some(hostname) = onion.as_str() { + let key = onion_store + .get(hostname) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Error::new( + eyre!( + "missing tor key for onion address {hostname}" + ), + ErrorKind::Database, + ) + })?; + addresses.push_back(json!({ + "hostname": hostname, + "packageId": &**package_id, + "hostId": &**host_id, + "key": key, + })); + } } } } } } } - } - let migration_data = json!({ - "addresses": addresses, - }); + json!({ + "addresses": addresses, + }) + }; + + // Clean up torMigration from private + if let Some(private) = db.get_mut("private").and_then(|p| p.as_object_mut()) { + private.remove("torMigration"); + } // Remove onions and tor-related fields from server host if let Some(host) = db @@ -200,7 +234,7 @@ impl VersionT for Version { } #[instrument(skip_all)] - async fn post_up(self, _ctx: &RpcContext, input: Value) -> Result<(), Error> { + async fn post_up(self, ctx: &RpcContext, input: Value) -> Result<(), Error> { let path = Path::new( "/media/startos/data/package-data/volumes/tor/data/startos/onion-migration.json", ); @@ -209,6 +243,53 @@ impl VersionT for Version { crate::util::io::write_file_atomic(path, json).await?; + // Sideload the bundled tor s9pk + let s9pk_path_str = format!("/usr/lib/startos/tor_{}.s9pk", crate::ARCH); + let s9pk_path = Path::new(&s9pk_path_str); + if tokio::fs::metadata(s9pk_path).await.is_ok() { + if let Err(e) = async { + let package_s9pk = tokio::fs::File::open(s9pk_path).await?; + let file = MultiCursorFile::open(&package_s9pk).await?; + + let key = ctx.db.peek().await.into_private().into_developer_key(); + let registry_url = + Url::parse("https://registry.start9.com/").with_kind(ErrorKind::ParseUrl)?; + + ctx.services + .install( + ctx.clone(), + || crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None), + None, + None::, + None, + ) + .await? + .await? + .await?; + + // Set the marketplace URL on the installed tor package + let tor_id = "tor".parse::()?; + ctx.db + .mutate(|db| { + if let Some(pkg) = + db.as_public_mut().as_package_data_mut().as_idx_mut(&tor_id) + { + pkg.as_registry_mut().ser(&Some(registry_url))?; + } + Ok(()) + }) + .await + .result?; + + Ok::<_, Error>(()) + } + .await + { + tracing::error!("Error installing tor package: {e}"); + tracing::debug!("{e:?}"); + } + } + Ok(()) } fn down(self, _db: &mut Value) -> Result<(), Error> { diff --git a/core/src/version/v0_4_0_alpha_21.rs b/core/src/version/v0_4_0_alpha_21.rs new file mode 100644 index 000000000..2ca9631b3 --- /dev/null +++ b/core/src/version/v0_4_0_alpha_21.rs @@ -0,0 +1,37 @@ +use exver::{PreReleaseSegment, VersionRange}; + +use super::v0_3_5::V0_3_0_COMPAT; +use super::{VersionT, v0_4_0_alpha_20}; +use crate::prelude::*; + +lazy_static::lazy_static! { + static ref V0_4_0_alpha_21: exver::Version = exver::Version::new( + [0, 4, 0], + [PreReleaseSegment::String("alpha".into()), 21.into()] + ); +} + +#[derive(Clone, Copy, Debug, Default)] +pub struct Version; + +impl VersionT for Version { + type Previous = v0_4_0_alpha_20::Version; + type PreUpRes = (); + + async fn pre_up(self) -> Result { + Ok(()) + } + fn semver(self) -> exver::Version { + V0_4_0_alpha_21.clone() + } + fn compat(self) -> &'static VersionRange { + &V0_3_0_COMPAT + } + #[instrument(skip_all)] + fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result { + Ok(Value::Null) + } + fn down(self, _db: &mut Value) -> Result<(), Error> { + Ok(()) + } +} diff --git a/core/src/volume.rs b/core/src/volume.rs index 2755fd809..22bce54f1 100644 --- a/core/src/volume.rs +++ b/core/src/volume.rs @@ -1,13 +1,19 @@ use std::path::{Path, PathBuf}; +use tokio::process::Command; + use crate::PackageId; pub use crate::VolumeId; use crate::prelude::*; +use crate::util::Invoke; use crate::util::VersionString; +use crate::DATA_DIR; pub const PKG_VOLUME_DIR: &str = "package-data/volumes"; pub const BACKUP_DIR: &str = "/media/startos/backups"; +const INSTALL_BACKUP_SUFFIX: &str = ".install-backup"; + pub fn data_dir>(datadir: P, pkg_id: &PackageId, volume_id: &VolumeId) -> PathBuf { datadir .as_ref() @@ -33,3 +39,70 @@ pub fn asset_dir>( pub fn backup_dir(pkg_id: &PackageId) -> PathBuf { Path::new(BACKUP_DIR).join(pkg_id).join("data") } + +fn pkg_volume_dir(pkg_id: &PackageId) -> PathBuf { + Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(pkg_id) +} + +fn install_backup_path(pkg_id: &PackageId) -> PathBuf { + Path::new(DATA_DIR) + .join(PKG_VOLUME_DIR) + .join(format!("{pkg_id}{INSTALL_BACKUP_SUFFIX}")) +} + +/// Creates a COW snapshot of the package volume directory before install. +/// Uses `cp --reflink=always` so it's instant on btrfs and fails gracefully +/// on ext4 (no backup, current behavior preserved). +/// Returns `true` if a backup was created, `false` if no data existed or +/// the filesystem doesn't support reflinks. +pub async fn snapshot_volumes_for_install(pkg_id: &PackageId) -> Result { + let src = pkg_volume_dir(pkg_id); + if tokio::fs::metadata(&src).await.is_err() { + return Ok(false); + } + let dst = install_backup_path(pkg_id); + // Remove any stale backup from a previous failed attempt + crate::util::io::delete_dir(&dst).await?; + match Command::new("cp") + .arg("-a") + .arg("--reflink=always") + .arg(&src) + .arg(&dst) + .invoke(ErrorKind::Filesystem) + .await + { + Ok(_) => { + tracing::info!("Created install backup for {pkg_id} at {dst:?}"); + Ok(true) + } + Err(e) => { + tracing::warn!( + "Could not create install backup for {pkg_id} \ + (filesystem may not support reflinks): {e}" + ); + // Clean up partial copy if any + crate::util::io::delete_dir(&dst).await?; + Ok(false) + } + } +} + +/// Restores the package volume directory from a COW snapshot after a failed +/// install. The current (possibly corrupted) volume dir is deleted first. +/// No-op if no backup exists. +pub async fn restore_volumes_from_install_backup(pkg_id: &PackageId) -> Result<(), Error> { + let backup = install_backup_path(pkg_id); + if tokio::fs::metadata(&backup).await.is_err() { + return Ok(()); + } + let dst = pkg_volume_dir(pkg_id); + crate::util::io::delete_dir(&dst).await?; + crate::util::io::rename(&backup, &dst).await?; + tracing::info!("Restored volumes from install backup for {pkg_id}"); + Ok(()) +} + +/// Removes the install backup after a successful install. +pub async fn remove_install_backup(pkg_id: &PackageId) -> Result<(), Error> { + crate::util::io::delete_dir(&install_backup_path(pkg_id)).await +} diff --git a/sdk/CHANGELOG.md b/sdk/CHANGELOG.md index 881673e60..f980422b2 100644 --- a/sdk/CHANGELOG.md +++ b/sdk/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## 0.4.0-beta.61 — StartOS v0.4.0-alpha.21 (2026-03-16) + +### Fixed + +- Fixed bug where leaving the effect context triggered consts + +## 0.4.0-beta.60 — StartOS v0.4.0-alpha.20 (2026-03-16) + +### Added + +- Tunnel TS type exports and port forward labels +- Secure Boot MOK key enrollment fields in `SetupInfo` + +### Changed + +- Consolidated `Watchable` base class with generic `map`/`eq` support; renamed `call` to `fetch` +- Moved `GetServiceManifest` and `GetSslCertificate` from `package/` to `base/` +- Simplified `getServiceInterface`, `getServiceInterfaces`, `GetOutboundGateway`, `GetSystemSmtp`, and `fileHelper` using `Watchable` base class +- Simplified SDK Makefile with rsync + +### Fixed + +- Added `restart_again` flag to `DesiredStatus::Restarting` + ## 0.4.0-beta.59 — StartOS v0.4.0-alpha.20 (2026-03-06) ### Added diff --git a/sdk/base/lib/osBindings/AttachParams.ts b/sdk/base/lib/osBindings/AttachParams.ts index 31283fec6..e469833c2 100644 --- a/sdk/base/lib/osBindings/AttachParams.ts +++ b/sdk/base/lib/osBindings/AttachParams.ts @@ -4,5 +4,5 @@ import type { EncryptedWire } from './EncryptedWire' export type AttachParams = { password: EncryptedWire | null guid: string - kiosk?: boolean + kiosk: boolean } diff --git a/sdk/base/lib/osBindings/SetupExecuteParams.ts b/sdk/base/lib/osBindings/SetupExecuteParams.ts index 69f358c54..a1581c752 100644 --- a/sdk/base/lib/osBindings/SetupExecuteParams.ts +++ b/sdk/base/lib/osBindings/SetupExecuteParams.ts @@ -6,7 +6,7 @@ export type SetupExecuteParams = { guid: string password: EncryptedWire | null recoverySource: RecoverySource | null - kiosk?: boolean + kiosk: boolean name: string | null hostname: string | null } diff --git a/sdk/base/lib/util/Watchable.ts b/sdk/base/lib/util/Watchable.ts index 22c2d3581..b65d24a2b 100644 --- a/sdk/base/lib/util/Watchable.ts +++ b/sdk/base/lib/util/Watchable.ts @@ -83,10 +83,12 @@ export abstract class Watchable { const constRetry = this.effects.constRetry const cleanup = this.onConstRegistered(value) gen.next().then( - () => { + (a) => { abort.abort() cleanup?.() - constRetry() + if (!a.done) { + constRetry() + } }, () => { abort.abort() diff --git a/sdk/package/lib/StartSdk.ts b/sdk/package/lib/StartSdk.ts index 4566b2666..6bf925141 100644 --- a/sdk/package/lib/StartSdk.ts +++ b/sdk/package/lib/StartSdk.ts @@ -69,7 +69,7 @@ import { getOwnServiceInterfaces } from '../../base/lib/util/getServiceInterface import { Volumes, createVolumes } from './util/Volume' /** The minimum StartOS version required by this SDK release */ -export const OSVersion = testTypeVersion('0.4.0-alpha.20') +export const OSVersion = testTypeVersion('0.4.0-alpha.21') // prettier-ignore type AnyNeverCond = diff --git a/sdk/package/package-lock.json b/sdk/package/package-lock.json index ea4287eb8..2e49b06f3 100644 --- a/sdk/package/package-lock.json +++ b/sdk/package/package-lock.json @@ -1,12 +1,12 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.61", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.61", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/sdk/package/package.json b/sdk/package/package.json index 2d31ac87e..64dbf2d21 100644 --- a/sdk/package/package.json +++ b/sdk/package/package.json @@ -1,6 +1,6 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.59", + "version": "0.4.0-beta.61", "description": "Software development kit to facilitate packaging services for StartOS", "main": "./package/lib/index.js", "types": "./package/lib/index.d.ts", diff --git a/web/package-lock.json b/web/package-lock.json index 11eb53ecc..8757e3e75 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -1,12 +1,12 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "license": "MIT", "dependencies": { "@angular/cdk": "^21.2.1", @@ -836,6 +836,7 @@ "integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==", "dev": true, "license": "MIT", + "peer": true, "dependencies": { "@babel/code-frame": "^7.29.0", "@babel/generator": "^7.29.0", @@ -8322,6 +8323,7 @@ "integrity": "sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==", "devOptional": true, "license": "MIT", + "peer": true, "dependencies": { "cli-truncate": "^5.0.0", "colorette": "^2.0.20", @@ -12524,24 +12526,6 @@ "dev": true, "license": "ISC" }, - "node_modules/yaml": { - "version": "2.8.2", - "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz", - "integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==", - "dev": true, - "license": "ISC", - "optional": true, - "peer": true, - "bin": { - "yaml": "bin.mjs" - }, - "engines": { - "node": ">= 14.6" - }, - "funding": { - "url": "https://github.com/sponsors/eemeli" - } - }, "node_modules/yargs": { "version": "18.0.0", "resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz", diff --git a/web/package.json b/web/package.json index a4f9635ca..8aa0e0799 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "author": "Start9 Labs, Inc", "homepage": "https://start9.com/", "license": "MIT", diff --git a/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts b/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts index 0cd4dace2..180120b30 100644 --- a/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts +++ b/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts @@ -1,12 +1,30 @@ import { Component } from '@angular/core' +import { FormsModule } from '@angular/forms' import { i18nPipe } from '@start9labs/shared' -import { TuiButton, TuiTitle } from '@taiga-ui/core' -import { TuiDialogContext } from '@taiga-ui/core' +import { + TuiButton, + TuiCheckbox, + TuiDialogContext, + TuiNotification, + TuiTitle, +} from '@taiga-ui/core' import { TuiHeader } from '@taiga-ui/layout' import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus' +export interface PreserveOverwriteData { + isExt4: boolean +} + @Component({ - imports: [TuiButton, TuiHeader, TuiTitle, i18nPipe], + imports: [ + FormsModule, + TuiButton, + TuiCheckbox, + TuiHeader, + TuiNotification, + TuiTitle, + i18nPipe, + ], template: `
@@ -24,6 +42,18 @@ import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus' {{ 'to discard' | i18n }} + @if (context.data.isExt4) { +

+ {{ + 'This drive uses ext4 and will be automatically converted to btrfs. A backup is strongly recommended before proceeding.' + | i18n + }} +

+ + }
@@ -24,7 +24,7 @@ import { StateService } from '../services/state.service' @if (!isFresh) {
`, diff --git a/web/projects/setup-wizard/src/app/pages/success.page.ts b/web/projects/setup-wizard/src/app/pages/success.page.ts index 3f3bd4254..f21d5f4ba 100644 --- a/web/projects/setup-wizard/src/app/pages/success.page.ts +++ b/web/projects/setup-wizard/src/app/pages/success.page.ts @@ -29,20 +29,20 @@ import { StateService } from '../services/state.service'
-

- +
+

{{ 'Setup Complete!' | i18n }} - - @if (!stateService.kiosk) { - - {{ - 'http://start.local was for setup only. It will no longer work.' - | i18n - }} - - } -

+

+
+ @if (!stateService.kiosk) { +

+ {{ + 'http://start.local was for setup only. It will no longer work.' + | i18n + }} +

+ }
@if (!result) { @@ -52,15 +52,15 @@ import { StateService } from '../services/state.service' @if (!stateService.kiosk) { } @@ -165,10 +165,10 @@ import { StateService } from '../services/state.service' (click)="openLocalAddress()" > -
- {{ 'Open Local Address' | i18n }} -
{{ lanAddress }}
-
+ + {{ 'Open Local Address' | i18n }} + {{ lanAddress }} +