Merge branch 'next/major' of github.com:Start9Labs/start-os into mcp

This commit is contained in:
Matt Hill
2026-03-17 15:51:59 -06:00
96 changed files with 1628 additions and 600 deletions

View File

@@ -124,14 +124,13 @@ jobs:
strategy:
fail-fast: false
matrix:
# TODO: re-add "raspberrypi" to the platform list below
platform: >-
${{
fromJson(
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "riscv64", "riscv64-nonfree"]
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "raspberrypi", "riscv64", "riscv64-nonfree"]
]',
github.event.inputs.platform || 'ALL'
)

1
.gitignore vendored
View File

@@ -22,3 +22,4 @@ secrets.db
tmp
web/.i18n-checked
docs/USER.md
*.s9pk

View File

@@ -15,7 +15,8 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
TOR_S9PK := build/lib/tor_$(ARCH).s9pk
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) $(TOR_S9PK)
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
STARTD_SRC := core/startd.service $(BUILD_SRC)
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
@@ -188,6 +189,9 @@ install: $(STARTOS_TARGETS)
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
sed -i '/^Environment=/a Environment=RUST_BACKTRACE=full' $(DESTDIR)/lib/systemd/system/startd.service; \
fi
$(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/startos)
@@ -312,6 +316,9 @@ build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(sh
$(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE)
./build/download-firmware.sh $(PLATFORM)
$(TOR_S9PK): ./build/download-tor-s9pk.sh
./build/download-tor-s9pk.sh $(ARCH)
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh
touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox

14
build/download-tor-s9pk.sh Executable file
View File

@@ -0,0 +1,14 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
ARCH=$1
if [ -z "$ARCH" ]; then
>&2 echo "usage: $0 <ARCH>"
exit 1
fi
curl --fail -L -o "./lib/tor_${ARCH}.s9pk" "https://s9pks.nyc3.cdn.digitaloceanspaces.com/tor_${ARCH}.s9pk"

View File

@@ -131,6 +131,11 @@ ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
EOT
if [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
mkdir -p config/includes.chroot/etc/ssh/sshd_config.d
echo "PasswordAuthentication yes" > config/includes.chroot/etc/ssh/sshd_config.d/dev-password-auth.conf
fi
# Installer marker file (used by installed GRUB to detect the live USB)
mkdir -p config/includes.binary
touch config/includes.binary/.startos-installer
@@ -348,6 +353,10 @@ if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
passwd -l start9
fi
mkdir -p /media/startos
chmod 750 /media/startos
chown root:startos /media/startos
EOF
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date '+%s')}"
@@ -498,8 +507,8 @@ elif [ "${IMAGE_TYPE}" = img ]; then
$TMPDIR/next/dev $TMPDIR/next/proc $TMPDIR/next/sys $TMPDIR/next/media/startos/root
mount --rbind $TMPDIR/boot $TMPDIR/next/boot
mount --bind /dev $TMPDIR/next/dev
mount --bind /proc $TMPDIR/next/proc
mount --bind /sys $TMPDIR/next/sys
mount -t proc proc $TMPDIR/next/proc
mount -t sysfs sysfs $TMPDIR/next/sys
mount --bind $TMPDIR/root $TMPDIR/next/media/startos/root
chroot $TMPDIR/next grub-install --target=arm64-efi --removable --efi-directory=/boot/efi --boot-directory=/boot --no-nvram

View File

@@ -58,6 +58,11 @@ check_variables () {
main () {
get_variables
# Fix GPT backup header first — the image was built with a tight root
# partition, so the backup GPT is not at the end of the SD card. parted
# will prompt interactively if this isn't fixed before we use it.
sgdisk -e "$ROOT_DEV" 2>/dev/null || true
if ! check_variables; then
return 1
fi
@@ -74,9 +79,6 @@ main () {
fi
fi
# Fix GPT backup header to reflect new partition layout
sgdisk -e "$ROOT_DEV" 2>/dev/null || true
mount / -o remount,rw
btrfs filesystem resize max /media/startos/root

View File

@@ -58,13 +58,13 @@ mkdir -p /media/startos/next/media/startos/root
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount -t sysfs sysfs /media/startos/next/sys
mount -t proc proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars
fi
if [ -z "$*" ]; then
@@ -111,6 +111,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then
reboot
fi
umount /media/startos/next
umount /media/startos/upper
umount -l /media/startos/next
umount -l /media/startos/upper
rm -rf /media/startos/upper /media/startos/next

View File

@@ -45,13 +45,13 @@ mkdir -p /media/startos/next/media/startos/root
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount -t sysfs sysfs /media/startos/next/sys
mount -t proc proc /media/startos/next/proc
mount --rbind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars
fi
chroot /media/startos/next bash -e << "EOF"

View File

@@ -37,7 +37,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.59",
"version": "0.4.0-beta.61",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",

View File

@@ -187,9 +187,10 @@ export function makeEffects(context: EffectContext): Effects {
getServiceManifest(
...[options]: Parameters<T.Effects["getServiceManifest"]>
) {
return rpcRound("get-service-manifest", options) as ReturnType<
T.Effects["getServiceManifest"]
>
return rpcRound("get-service-manifest", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getServiceManifest"]>
},
subcontainer: {
createFs(options: { imageId: string; name: string }) {
@@ -211,9 +212,10 @@ export function makeEffects(context: EffectContext): Effects {
>
}) as Effects["exportServiceInterface"],
getContainerIp(...[options]: Parameters<T.Effects["getContainerIp"]>) {
return rpcRound("get-container-ip", options) as ReturnType<
T.Effects["getContainerIp"]
>
return rpcRound("get-container-ip", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getContainerIp"]>
},
getOsIp(...[]: Parameters<T.Effects["getOsIp"]>) {
return rpcRound("get-os-ip", {}) as ReturnType<T.Effects["getOsIp"]>
@@ -244,9 +246,10 @@ export function makeEffects(context: EffectContext): Effects {
>
},
getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) {
return rpcRound("get-ssl-certificate", options) as ReturnType<
T.Effects["getSslCertificate"]
>
return rpcRound("get-ssl-certificate", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getSslCertificate"]>
},
getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) {
return rpcRound("get-ssl-key", options) as ReturnType<
@@ -308,7 +311,10 @@ export function makeEffects(context: EffectContext): Effects {
},
getStatus(...[o]: Parameters<T.Effects["getStatus"]>) {
return rpcRound("get-status", o) as ReturnType<T.Effects["getStatus"]>
return rpcRound("get-status", {
...o,
callback: context.callbacks?.addCallback(o.callback) || null,
}) as ReturnType<T.Effects["getStatus"]>
},
/// DEPRECATED
setMainStatus(o: { status: "running" | "stopped" }): Promise<null> {

View File

@@ -298,13 +298,10 @@ export class RpcListener {
}
case "stop": {
const { id } = stopType.parse(input)
this.callbacks?.removeChild("main")
return handleRpc(
id,
this.system.stop().then((result) => {
this.callbacks?.removeChild("main")
return { result }
}),
this.system.stop().then((result) => ({ result })),
)
}
case "exit": {

View File

@@ -71,7 +71,7 @@ export class SystemForStartOs implements System {
this.starting = true
effects.constRetry = utils.once(() => {
console.debug(".const() triggered")
effects.restart()
if (effects.isInContext) effects.restart()
})
let mainOnTerm: () => Promise<void> | undefined
const daemons = await (

22
core/Cargo.lock generated
View File

@@ -3376,6 +3376,15 @@ dependencies = [
"serde_json",
]
[[package]]
name = "keccak"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653"
dependencies = [
"cpufeatures",
]
[[package]]
name = "kv"
version = "0.24.0"
@@ -5985,6 +5994,16 @@ dependencies = [
"digest 0.10.7",
]
[[package]]
name = "sha3"
version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
dependencies = [
"digest 0.10.7",
"keccak",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
@@ -6415,7 +6434,7 @@ dependencies = [
[[package]]
name = "start-os"
version = "0.4.0-alpha.20"
version = "0.4.0-alpha.21"
dependencies = [
"aes",
"async-acme",
@@ -6519,6 +6538,7 @@ dependencies = [
"serde_yml",
"sha-crypt",
"sha2 0.10.9",
"sha3",
"signal-hook",
"socket2 0.6.2",
"socks5-impl",

View File

@@ -15,7 +15,7 @@ license = "MIT"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.20" # VERSION_BUMP
version = "0.4.0-alpha.21" # VERSION_BUMP
[lib]
name = "startos"
@@ -200,6 +200,7 @@ serde_toml = { package = "toml", version = "0.9.9+spec-1.0.0" }
serde_yaml = { package = "serde_yml", version = "0.0.12" }
sha-crypt = "0.5.0"
sha2 = "0.10.2"
sha3 = "0.10"
signal-hook = "0.3.17"
socket2 = { version = "0.6.0", features = ["all"] }
socks5-impl = { version = "0.7.2", features = ["client", "server"] }

View File

@@ -67,6 +67,10 @@ if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT:-}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-cli --target=$TARGET

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -1255,6 +1255,13 @@ backup.bulk.leaked-reference:
fr_FR: "référence fuitée vers BackupMountGuard"
pl_PL: "wyciekła referencja do BackupMountGuard"
backup.bulk.service-not-ready:
en_US: "Cannot create a backup of a service that is still initializing or in an error state"
de_DE: "Es kann keine Sicherung eines Dienstes erstellt werden, der noch initialisiert wird oder sich im Fehlerzustand befindet"
es_ES: "No se puede crear una copia de seguridad de un servicio que aún se está inicializando o está en estado de error"
fr_FR: "Impossible de créer une sauvegarde d'un service encore en cours d'initialisation ou en état d'erreur"
pl_PL: "Nie można utworzyć kopii zapasowej usługi, która jest jeszcze inicjalizowana lub znajduje się w stanie błędu"
# backup/restore.rs
backup.restore.package-error:
en_US: "Error restoring package %{id}: %{error}"

View File

@@ -300,6 +300,15 @@ async fn perform_backup(
error: backup_result,
},
);
} else {
backup_report.insert(
id.clone(),
PackageBackupReport {
error: Some(
t!("backup.bulk.service-not-ready").to_string(),
),
},
);
}
}

View File

@@ -10,6 +10,7 @@ use tracing::instrument;
use ts_rs::TS;
use super::target::BackupTargetId;
use crate::PackageId;
use crate::backup::os::OsBackup;
use crate::context::setup::SetupResult;
use crate::context::{RpcContext, SetupContext};
@@ -26,7 +27,6 @@ use crate::service::service_map::DownloadInstallFuture;
use crate::setup::SetupExecuteProgress;
use crate::system::{save_language, sync_kiosk};
use crate::util::serde::{IoFormat, Pem};
use crate::{PLATFORM, PackageId};
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
@@ -90,7 +90,7 @@ pub async fn recover_full_server(
recovery_source: TmpMountGuard,
server_id: &str,
recovery_password: &str,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
init_phases,
@@ -123,7 +123,6 @@ pub async fn recover_full_server(
os_backup.account.hostname = h;
}
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
let language = ctx.language.peek(|a| a.clone());

View File

@@ -149,6 +149,11 @@ impl MultiExecutable {
}
pub fn execute(&self) {
#[cfg(feature = "backtrace-on-stack-overflow")]
unsafe {
backtrace_on_stack_overflow::enable()
};
set_locale_from_env();
let mut popped = Vec::with_capacity(2);

View File

@@ -190,7 +190,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
}
}
});
rt.shutdown_timeout(Duration::from_secs(60));
rt.shutdown_timeout(Duration::from_millis(100));
res
};

View File

@@ -39,7 +39,7 @@ impl DiagnosticContext {
shutdown,
disk_guid,
error: Arc::new(error.into()),
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
})))
}
}

View File

@@ -32,7 +32,7 @@ impl InitContext {
error: watch::channel(None).0,
progress,
shutdown,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
})))
}
}

View File

@@ -62,8 +62,8 @@ pub struct RpcContextSeed {
pub db: TypedPatchDb<Database>,
pub sync_db: watch::Sender<u64>,
pub account: SyncRwLock<AccountInfo>,
pub net_controller: Arc<NetController>,
pub os_net_service: NetService,
pub net_controller: Arc<NetController>,
pub s9pk_arch: Option<&'static str>,
pub services: ServiceMap,
pub cancellable_installs: SyncMutex<BTreeMap<PackageId, oneshot::Sender<()>>>,
@@ -346,10 +346,10 @@ impl RpcContext {
services,
cancellable_installs: SyncMutex::new(BTreeMap::new()),
metrics_cache,
rpc_continuations: RpcContinuations::new(Some(shutdown.clone())),
shutdown,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_continuations: OpenAuthedContinuations::new(),
rpc_continuations: RpcContinuations::new(),
wifi_manager: Arc::new(RwLock::new(wifi_interface.clone().map(|i| WpaCli::init(i)))),
current_secret: Arc::new(
Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| {

View File

@@ -85,7 +85,7 @@ impl SetupContext {
result: OnceCell::new(),
disk_guid: OnceCell::new(),
shutdown,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
install_rootfs: SyncMutex::new(None),
language: SyncMutex::new(None),
keyboard: SyncMutex::new(None),

View File

@@ -31,7 +31,7 @@ pub struct Database {
impl Database {
pub fn init(
account: &AccountInfo,
kiosk: Option<bool>,
kiosk: bool,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {

View File

@@ -49,7 +49,7 @@ pub struct Public {
impl Public {
pub fn init(
account: &AccountInfo,
kiosk: Option<bool>,
kiosk: bool,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {
@@ -149,7 +149,7 @@ impl Public {
echoip_urls: default_echoip_urls(),
ram: 0,
devices: Vec::new(),
kiosk,
kiosk: Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"),
language,
keyboard,
},

View File

@@ -25,20 +25,28 @@ pub enum RepairStrategy {
Preen,
Aggressive,
}
/// Detects the filesystem type of a block device using `grub-probe`.
/// Returns e.g. `"ext2"` (for ext4), `"btrfs"`, etc.
pub async fn detect_filesystem(
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<String, Error> {
Ok(String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
.to_owned())
}
impl RepairStrategy {
pub async fn fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match &*String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
{
match &*detect_filesystem(&logicalname).await? {
"ext2" => self.e2fsck(logicalname).await,
"btrfs" => self.btrfs_check(logicalname).await,
fs => {

View File

@@ -7,7 +7,7 @@ use rust_i18n::t;
use tokio::process::Command;
use tracing::instrument;
use super::fsck::{RepairStrategy, RequiresReboot};
use super::fsck::{RepairStrategy, RequiresReboot, detect_filesystem};
use super::util::pvscan;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadWrite};
@@ -301,6 +301,37 @@ pub async fn mount_fs<P: AsRef<Path>>(
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
blockdev_path = Path::new("/dev/mapper").join(&full_name);
}
// Convert ext4 → btrfs on the package-data partition if needed
let fs_type = detect_filesystem(&blockdev_path).await?;
if fs_type == "ext2" {
tracing::info!("Running e2fsck before converting {name} from ext4 to btrfs");
Command::new("e2fsck")
.arg("-fy")
.arg(&blockdev_path)
.invoke(ErrorKind::DiskManagement)
.await?;
tracing::info!("Converting {name} from ext4 to btrfs");
Command::new("btrfs-convert")
.arg("--no-progress")
.arg(&blockdev_path)
.invoke(ErrorKind::DiskManagement)
.await?;
// Defragment after conversion for optimal performance
let tmp_mount = datadir.as_ref().join(format!("{name}.convert-tmp"));
tokio::fs::create_dir_all(&tmp_mount).await?;
BlockDev::new(&blockdev_path)
.mount(&tmp_mount, ReadWrite)
.await?;
Command::new("btrfs")
.args(["filesystem", "defragment", "-r"])
.arg(&tmp_mount)
.invoke(ErrorKind::DiskManagement)
.await?;
unmount(&tmp_mount, false).await?;
tokio::fs::remove_dir(&tmp_mount).await?;
}
let reboot = repair.fsck(&blockdev_path).await?;
if !guid.ends_with("_UNENC") {
@@ -342,3 +373,99 @@ pub async fn mount_all_fs<P: AsRef<Path>>(
reboot |= mount_fs(guid, &datadir, "package-data", repair, password).await?;
Ok(reboot)
}
/// Temporarily activates a VG and opens LUKS to probe the `package-data`
/// filesystem type. Returns `None` if probing fails (e.g. LV doesn't exist).
#[instrument(skip_all)]
pub async fn probe_package_data_fs(guid: &str) -> Result<Option<String>, Error> {
// Import and activate the VG
match Command::new("vgimport")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
{
Ok(_) => {}
Err(e)
if format!("{}", e.source)
.lines()
.any(|l| l.trim() == format!("Volume group \"{}\" is not exported", guid)) =>
{
// Already imported, that's fine
}
Err(e) => {
tracing::warn!("Could not import VG {guid} for filesystem probe: {e}");
return Ok(None);
}
}
if let Err(e) = Command::new("vgchange")
.arg("-ay")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
{
tracing::warn!("Could not activate VG {guid} for filesystem probe: {e}");
return Ok(None);
}
let mut opened_luks = false;
let result = async {
let lv_path = Path::new("/dev").join(guid).join("package-data");
if tokio::fs::metadata(&lv_path).await.is_err() {
return Ok(None);
}
let blockdev_path = if !guid.ends_with("_UNENC") {
let full_name = format!("{guid}_package-data");
let password = DEFAULT_PASSWORD;
if let Some(parent) = Path::new(PASSWORD_PATH).parent() {
tokio::fs::create_dir_all(parent).await?;
}
tokio::fs::write(PASSWORD_PATH, password)
.await
.with_ctx(|_| (ErrorKind::Filesystem, PASSWORD_PATH))?;
Command::new("cryptsetup")
.arg("-q")
.arg("luksOpen")
.arg("--allow-discards")
.arg(format!("--key-file={PASSWORD_PATH}"))
.arg(format!("--keyfile-size={}", password.len()))
.arg(&lv_path)
.arg(&full_name)
.invoke(ErrorKind::DiskManagement)
.await?;
let _ = tokio::fs::remove_file(PASSWORD_PATH).await;
opened_luks = true;
PathBuf::from(format!("/dev/mapper/{full_name}"))
} else {
lv_path.clone()
};
detect_filesystem(&blockdev_path).await.map(Some)
}
.await;
// Always clean up: close LUKS, deactivate VG, export VG
if opened_luks {
let full_name = format!("{guid}_package-data");
Command::new("cryptsetup")
.arg("-q")
.arg("luksClose")
.arg(&full_name)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
}
Command::new("vgchange")
.arg("-an")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
Command::new("vgexport")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
result
}

View File

@@ -41,6 +41,7 @@ pub struct DiskInfo {
pub partitions: Vec<PartitionInfo>,
pub capacity: u64,
pub guid: Option<InternedString>,
pub filesystem: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize, ts_rs::TS)]
@@ -55,6 +56,7 @@ pub struct PartitionInfo {
pub used: Option<u64>,
pub start_os: BTreeMap<String, StartOsRecoveryInfo>,
pub guid: Option<InternedString>,
pub filesystem: Option<String>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, ts_rs::TS)]
@@ -374,6 +376,15 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disk_info.capacity = part_info.capacity;
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
if let Some(guid) = g {
disk_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!("Failed to probe filesystem for {guid}: {e}");
None
});
}
} else {
disk_info.partitions = vec![part_info];
}
@@ -384,11 +395,31 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disk_info.partitions = Vec::with_capacity(index.parts.len());
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
if let Some(guid) = g {
disk_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!("Failed to probe filesystem for {guid}: {e}");
None
});
}
} else {
for part in index.parts {
let mut part_info = part_info(part).await;
if let Some(g) = disk_guids.get(&part_info.logicalname) {
part_info.guid = g.clone();
if let Some(guid) = g {
part_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!(
"Failed to probe filesystem for {guid}: {e}"
);
None
});
}
}
disk_info.partitions.push(part_info);
}
@@ -461,6 +492,7 @@ async fn disk_info(disk: PathBuf) -> DiskInfo {
partitions: Vec::new(),
capacity,
guid: None,
filesystem: None,
}
}
@@ -544,6 +576,7 @@ async fn part_info(part: PathBuf) -> PartitionInfo {
used,
start_os,
guid: None,
filesystem: None,
}
}

View File

@@ -174,7 +174,9 @@ pub async fn init(
local_auth.complete();
// Re-enroll MOK on every boot if Secure Boot key exists but isn't enrolled yet
if let Err(e) = crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await {
if let Err(e) =
crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await
{
tracing::warn!("MOK enrollment failed: {e}");
}
@@ -369,7 +371,7 @@ pub async fn init(
enable_zram.complete();
update_server_info.start();
sync_kiosk(server_info.as_kiosk().de()?).await?;
sync_kiosk(server_info.as_kiosk().de()?.unwrap_or(false)).await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let devices = lshw().await?;
let status_info = ServerStatus {

View File

@@ -820,7 +820,6 @@ impl NetService {
break;
}
}
self.shutdown = true;
Ok(())
}
@@ -832,6 +831,7 @@ impl NetService {
impl Drop for NetService {
fn drop(&mut self) {
if !self.shutdown {
self.shutdown = true;
let svc = std::mem::replace(self, Self::dummy());
tokio::spawn(async move { svc.remove_all().await.log_err() });
}

View File

@@ -145,9 +145,10 @@ pub struct GatewayInfo {
pub public: bool,
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[derive(Clone, Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct ServiceInterface {
pub id: ServiceInterfaceId,
pub name: String,

View File

@@ -188,7 +188,7 @@ lazy_static::lazy_static! {
}
fn asn1_time_to_system_time(time: &Asn1TimeRef) -> Result<SystemTime, Error> {
let diff = time.diff(&**ASN1_UNIX_EPOCH)?;
let diff = ASN1_UNIX_EPOCH.diff(time)?;
let mut res = UNIX_EPOCH;
if diff.days >= 0 {
res += Duration::from_secs(diff.days as u64 * 86400);

View File

@@ -509,7 +509,7 @@ where
drop(queue_cell.replace(None));
if !runner.is_empty() {
tokio::time::timeout(Duration::from_secs(60), runner)
tokio::time::timeout(Duration::from_millis(100), runner)
.await
.log_err();
}

View File

@@ -141,7 +141,7 @@ impl RegistryContext {
listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN),
db,
datadir,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {

View File

@@ -17,6 +17,7 @@ use ts_rs::TS;
#[allow(unused_imports)]
use crate::prelude::*;
use crate::shutdown::Shutdown;
use crate::util::future::TimedResource;
use crate::util::net::WebSocket;
use crate::util::{FromStrParser, new_guid};
@@ -98,12 +99,15 @@ pub type RestHandler = Box<dyn FnOnce(Request) -> RestFuture + Send>;
pub struct WebSocketFuture {
kill: Option<broadcast::Receiver<()>>,
shutdown: Option<broadcast::Receiver<Option<Shutdown>>>,
fut: BoxFuture<'static, ()>,
}
impl Future for WebSocketFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.kill.as_ref().map_or(false, |k| !k.is_empty()) {
if self.kill.as_ref().map_or(false, |k| !k.is_empty())
|| self.shutdown.as_ref().map_or(false, |s| !s.is_empty())
{
Poll::Ready(())
} else {
self.fut.poll_unpin(cx)
@@ -138,6 +142,7 @@ impl RpcContinuation {
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill: None,
shutdown: None,
fut: handler(ws.into()).boxed(),
}),
timeout,
@@ -170,6 +175,7 @@ impl RpcContinuation {
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill,
shutdown: None,
fut: handler(ws.into()).boxed(),
}),
timeout,
@@ -183,15 +189,21 @@ impl RpcContinuation {
}
}
pub struct RpcContinuations(AsyncMutex<BTreeMap<Guid, RpcContinuation>>);
pub struct RpcContinuations {
continuations: AsyncMutex<BTreeMap<Guid, RpcContinuation>>,
shutdown: Option<broadcast::Sender<Option<Shutdown>>>,
}
impl RpcContinuations {
pub fn new() -> Self {
RpcContinuations(AsyncMutex::new(BTreeMap::new()))
pub fn new(shutdown: Option<broadcast::Sender<Option<Shutdown>>>) -> Self {
RpcContinuations {
continuations: AsyncMutex::new(BTreeMap::new()),
shutdown,
}
}
#[instrument(skip_all)]
pub async fn clean(&self) {
let mut continuations = self.0.lock().await;
let mut continuations = self.continuations.lock().await;
let mut to_remove = Vec::new();
for (guid, cont) in &*continuations {
if cont.is_timed_out() {
@@ -206,23 +218,28 @@ impl RpcContinuations {
#[instrument(skip_all)]
pub async fn add(&self, guid: Guid, handler: RpcContinuation) {
self.clean().await;
self.0.lock().await.insert(guid, handler);
self.continuations.lock().await.insert(guid, handler);
}
pub async fn get_ws_handler(&self, guid: &Guid) -> Option<WebSocketHandler> {
let mut continuations = self.0.lock().await;
let mut continuations = self.continuations.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
return None;
}
let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else {
return None;
};
x.get().await
let handler = x.get().await?;
let shutdown = self.shutdown.as_ref().map(|s| s.subscribe());
Some(Box::new(move |ws| {
let mut fut = handler(ws);
fut.shutdown = shutdown;
fut
}))
}
pub async fn get_rest_handler(&self, guid: &Guid) -> Option<RestHandler> {
let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap<Guid, RpcContinuation>> =
self.0.lock().await;
let mut continuations = self.continuations.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
return None;
}

View File

@@ -12,18 +12,20 @@ use serde::{Deserialize, Serialize};
use tracing::warn;
use ts_rs::TS;
use crate::db::model::package::PackageState;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::host::Host;
use crate::net::service_interface::ServiceInterface;
use crate::net::ssl::FullchainCertData;
use crate::prelude::*;
use crate::service::effects::context::EffectContext;
use crate::service::effects::net::ssl::Algorithm;
use crate::service::rpc::{CallbackHandle, CallbackId};
use crate::service::{Service, ServiceActorSeed};
use crate::status::StatusInfo;
use crate::util::collections::EqMap;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::sync::SyncMutex;
use crate::status::StatusInfo;
use crate::{GatewayId, HostId, PackageId, ServiceInterfaceId};
/// Abstraction for callbacks that are triggered by patchdb subscriptions.
@@ -66,9 +68,7 @@ impl<K: Ord + Clone + Send + Sync + 'static> DbWatchedCallbacks<K> {
.map(|(_, handlers)| CallbackHandlers(handlers))
.filter(|cb| !cb.0.is_empty())
}) {
let value = watch
.peek_and_mark_seen()
.unwrap_or_default();
let value = watch.peek_and_mark_seen().unwrap_or_default();
if let Err(e) = cbs.call(vector![value]).await {
tracing::error!("Error in {label} callback: {e}");
tracing::debug!("{e:?}");
@@ -99,6 +99,10 @@ pub struct ServiceCallbacks {
inner: SyncMutex<ServiceCallbackMap>,
get_host_info: Arc<DbWatchedCallbacks<(PackageId, HostId)>>,
get_status: Arc<DbWatchedCallbacks<PackageId>>,
get_service_interface: Arc<DbWatchedCallbacks<(PackageId, ServiceInterfaceId)>>,
list_service_interfaces: Arc<DbWatchedCallbacks<PackageId>>,
get_system_smtp: Arc<DbWatchedCallbacks<()>>,
get_service_manifest: Arc<DbWatchedCallbacks<PackageId>>,
}
impl Default for ServiceCallbacks {
@@ -107,21 +111,21 @@ impl Default for ServiceCallbacks {
inner: SyncMutex::new(ServiceCallbackMap::default()),
get_host_info: Arc::new(DbWatchedCallbacks::new("host info")),
get_status: Arc::new(DbWatchedCallbacks::new("get_status")),
get_service_interface: Arc::new(DbWatchedCallbacks::new("get_service_interface")),
list_service_interfaces: Arc::new(DbWatchedCallbacks::new("list_service_interfaces")),
get_system_smtp: Arc::new(DbWatchedCallbacks::new("get_system_smtp")),
get_service_manifest: Arc::new(DbWatchedCallbacks::new("get_service_manifest")),
}
}
}
#[derive(Default)]
struct ServiceCallbackMap {
get_service_interface: BTreeMap<(PackageId, ServiceInterfaceId), Vec<CallbackHandler>>,
list_service_interfaces: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_system_smtp: Vec<CallbackHandler>,
get_ssl_certificate: EqMap<
(BTreeSet<InternedString>, FullchainCertData, Algorithm),
(NonDetachingJoinHandle<()>, Vec<CallbackHandler>),
>,
get_container_ip: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_service_manifest: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_outbound_gateway: BTreeMap<PackageId, (NonDetachingJoinHandle<()>, Vec<CallbackHandler>)>,
}
@@ -132,24 +136,10 @@ impl ServiceCallbacks {
pub fn gc(&self) {
self.mutate(|this| {
this.get_service_interface.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.list_service_interfaces.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_system_smtp
.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
this.get_ssl_certificate.retain(|_, (_, v)| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_service_manifest.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_outbound_gateway.retain(|_, (_, v)| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
@@ -157,70 +147,38 @@ impl ServiceCallbacks {
});
self.get_host_info.gc();
self.get_status.gc();
self.get_service_interface.gc();
self.list_service_interfaces.gc();
self.get_system_smtp.gc();
self.get_service_manifest.gc();
}
pub(super) fn add_get_service_interface(
&self,
package_id: PackageId,
service_interface_id: ServiceInterfaceId,
watch: TypedDbWatch<ServiceInterface>,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.get_service_interface
.entry((package_id, service_interface_id))
.or_default()
.push(handler);
})
self.get_service_interface
.add((package_id, service_interface_id), watch, handler);
}
#[must_use]
pub fn get_service_interface(
&self,
id: &(PackageId, ServiceInterfaceId),
) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.get_service_interface.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_list_service_interfaces(
pub(super) fn add_list_service_interfaces<T: Send + 'static>(
&self,
package_id: PackageId,
watch: TypedDbWatch<T>,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.list_service_interfaces
.entry(package_id)
.or_default()
.push(handler);
})
self.list_service_interfaces.add(package_id, watch, handler);
}
#[must_use]
pub fn list_service_interfaces(&self, id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.list_service_interfaces.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_system_smtp(&self, handler: CallbackHandler) {
self.mutate(|this| {
this.get_system_smtp.push(handler);
})
}
#[must_use]
pub fn get_system_smtp(&self) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(std::mem::take(&mut this.get_system_smtp)))
.filter(|cb| !cb.0.is_empty())
})
pub(super) fn add_get_system_smtp<T: Send + 'static>(
&self,
watch: TypedDbWatch<T>,
handler: CallbackHandler,
) {
self.get_system_smtp.add((), watch, handler);
}
pub(super) fn add_get_host_info(
@@ -376,23 +334,13 @@ impl ServiceCallbacks {
})
}
pub(super) fn add_get_service_manifest(&self, package_id: PackageId, handler: CallbackHandler) {
self.mutate(|this| {
this.get_service_manifest
.entry(package_id)
.or_default()
.push(handler)
})
}
#[must_use]
pub fn get_service_manifest(&self, package_id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
this.get_service_manifest
.remove(package_id)
.map(CallbackHandlers)
.filter(|cb| !cb.0.is_empty())
})
pub(super) fn add_get_service_manifest(
&self,
package_id: PackageId,
watch: TypedDbWatch<PackageState>,
handler: CallbackHandler,
) {
self.get_service_manifest.add(package_id, watch, handler);
}
}

View File

@@ -399,27 +399,38 @@ pub async fn get_service_manifest(
callback,
}: GetServiceManifestParams,
) -> Result<Manifest, Error> {
use crate::db::model::package::PackageState;
let context = context.deref()?;
let ptr = format!("/public/packageData/{}/stateInfo", package_id)
.parse()
.expect("valid json pointer");
let mut watch = context
.seed
.ctx
.db
.watch(ptr)
.await
.typed::<PackageState>();
let manifest = watch
.peek_and_mark_seen()?
.as_manifest(ManifestPreference::Old)
.de()?;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context
.seed
.ctx
.callbacks
.add_get_service_manifest(package_id.clone(), CallbackHandler::new(&context, callback));
.add_get_service_manifest(
package_id.clone(),
watch,
CallbackHandler::new(&context, callback),
);
}
let db = context.seed.ctx.db.peek().await;
let manifest = db
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
.as_state_info()
.as_manifest(ManifestPreference::New)
.de()?;
Ok(manifest)
}

View File

@@ -1,7 +1,5 @@
use std::collections::BTreeMap;
use imbl::vector;
use crate::net::service_interface::{AddressInfo, ServiceInterface, ServiceInterfaceType};
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
@@ -42,7 +40,7 @@ pub async fn export_service_interface(
interface_type: r#type,
};
let res = context
context
.seed
.ctx
.db
@@ -56,27 +54,8 @@ pub async fn export_service_interface(
ifaces.insert(&id, &service_interface)?;
Ok(())
})
.await;
res.result?;
if res.revision.is_some() {
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.get_service_interface(&(package_id.clone(), id))
{
callbacks.call(vector![]).await?;
}
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.list_service_interfaces(&package_id)
{
callbacks.call(vector![]).await?;
}
}
.await
.result?;
Ok(())
}
@@ -101,26 +80,34 @@ pub async fn get_service_interface(
) -> Result<Option<ServiceInterface>, Error> {
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
let db = context.seed.ctx.db.peek().await;
let ptr = format!(
"/public/packageData/{}/serviceInterfaces/{}",
package_id, service_interface_id
)
.parse()
.expect("valid json pointer");
let mut watch = context
.seed
.ctx
.db
.watch(ptr)
.await
.typed::<ServiceInterface>();
let res = watch.peek_and_mark_seen()?.de().ok();
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_get_service_interface(
package_id.clone(),
service_interface_id.clone(),
watch,
CallbackHandler::new(&context, callback),
);
}
let interface = db
.as_public()
.as_package_data()
.as_idx(&package_id)
.and_then(|m| m.as_service_interfaces().as_idx(&service_interface_id))
.map(|m| m.de())
.transpose()?;
Ok(interface)
Ok(res)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
@@ -142,27 +129,23 @@ pub async fn list_service_interfaces(
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
let ptr = format!("/public/packageData/{}/serviceInterfaces", package_id)
.parse()
.expect("valid json pointer");
let mut watch = context.seed.ctx.db.watch(ptr).await;
let res = imbl_value::from_value(watch.peek_and_mark_seen()?)
.unwrap_or_default();
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_list_service_interfaces(
package_id.clone(),
watch.typed::<BTreeMap<ServiceInterfaceId, ServiceInterface>>(),
CallbackHandler::new(&context, callback),
);
}
let res = context
.seed
.ctx
.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&package_id)
.map(|m| m.into_service_interfaces().de())
.transpose()?
.unwrap_or_default();
Ok(res)
}
@@ -180,52 +163,22 @@ pub async fn clear_service_interfaces(
let context = context.deref()?;
let package_id = context.seed.id.clone();
let res = context
context
.seed
.ctx
.db
.mutate(|db| {
let mut removed = Vec::new();
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_service_interfaces_mut()
.mutate(|s| {
Ok(s.retain(|id, _| {
if except.contains(id) {
true
} else {
removed.push(id.clone());
false
}
}))
})?;
Ok(removed)
Ok(s.retain(|id, _| except.contains(id)))
})
})
.await;
let removed = res.result?;
if res.revision.is_some() {
for id in removed {
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.get_service_interface(&(package_id.clone(), id))
{
callbacks.call(vector![]).await?;
}
}
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.list_service_interfaces(&package_id)
{
callbacks.call(vector![]).await?;
}
}
.await
.result?;
Ok(())
}

View File

@@ -16,25 +16,25 @@ pub async fn get_system_smtp(
) -> Result<Option<SmtpValue>, Error> {
let context = context.deref()?;
let ptr = "/public/serverInfo/smtp"
.parse()
.expect("valid json pointer");
let mut watch = context.seed.ctx.db.watch(ptr).await;
let res = imbl_value::from_value(watch.peek_and_mark_seen()?)
.with_kind(ErrorKind::Deserialization)?;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context
.seed
.ctx
.callbacks
.add_get_system_smtp(CallbackHandler::new(&context, callback));
.add_get_system_smtp(
watch.typed::<Option<SmtpValue>>(),
CallbackHandler::new(&context, callback),
);
}
let res = context
.seed
.ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_smtp()
.de()?;
Ok(res)
}

View File

@@ -432,11 +432,15 @@ impl Service {
tracing::error!("Error installing service: {e}");
tracing::debug!("{e:?}")
}) {
crate::volume::remove_install_backup(id).await.log_err();
return Ok(Some(service));
}
}
}
cleanup(ctx, id, false).await.log_err();
crate::volume::restore_volumes_from_install_backup(id)
.await
.log_err();
ctx.db
.mutate(|v| v.as_public_mut().as_package_data_mut().remove(id))
.await
@@ -471,37 +475,60 @@ impl Service {
tracing::error!("Error installing service: {e}");
tracing::debug!("{e:?}")
}) {
crate::volume::remove_install_backup(id).await.log_err();
return Ok(Some(service));
}
}
}
let s9pk = S9pk::open(s9pk_path, Some(id)).await?;
ctx.db
.mutate({
|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_state_info_mut()
.map_mutate(|s| {
if let PackageState::Updating(UpdatingState {
manifest, ..
}) = s
{
Ok(PackageState::Installed(InstalledState { manifest }))
} else {
Err(Error::new(
eyre!("{}", t!("service.mod.race-condition-detected")),
ErrorKind::Database,
))
}
})
}
})
.await
.result?;
handle_installed(s9pk).await
match async {
let s9pk = S9pk::open(s9pk_path, Some(id)).await?;
ctx.db
.mutate({
|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_state_info_mut()
.map_mutate(|s| {
if let PackageState::Updating(UpdatingState {
manifest,
..
}) = s
{
Ok(PackageState::Installed(InstalledState { manifest }))
} else {
Err(Error::new(
eyre!(
"{}",
t!("service.mod.race-condition-detected")
),
ErrorKind::Database,
))
}
})
}
})
.await
.result?;
handle_installed(s9pk).await
}
.await
{
Ok(service) => {
crate::volume::remove_install_backup(id).await.log_err();
Ok(service)
}
Err(e) => {
tracing::error!(
"Update rollback failed for {id}, restoring volume snapshot: {e}"
);
crate::volume::restore_volumes_from_install_backup(id)
.await
.log_err();
Err(e)
}
}
}
PackageStateMatchModelRef::Removing(_) | PackageStateMatchModelRef::Restoring(_) => {
if let Ok(s9pk) = S9pk::open(s9pk_path, Some(id)).await.map_err(|e| {
@@ -650,17 +677,6 @@ impl Service {
tokio::task::yield_now().await;
}
// Trigger manifest callbacks after successful installation
let manifest = service.seed.persistent_container.s9pk.as_manifest();
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
let manifest_value =
serde_json::to_value(manifest).with_kind(ErrorKind::Serialization)?;
callbacks
.call(imbl::vector![manifest_value.into()])
.await
.log_err();
}
Ok(service)
}

View File

@@ -307,6 +307,8 @@ impl ServiceMap {
finalization_progress.start();
let s9pk = S9pk::open(&installed_path, Some(&id)).await?;
let data_version = get_data_version(&id).await?;
// Snapshot existing volumes before install/update modifies them
crate::volume::snapshot_volumes_for_install(&id).await?;
let prev = if let Some(service) = service.take() {
ensure_code!(
recovery_source.is_none(),
@@ -382,6 +384,8 @@ impl ServiceMap {
cleanup.await?;
}
crate::volume::remove_install_backup(&id).await.log_err();
drop(service);
sync_progress_task.await.map_err(|_| {

View File

@@ -1,8 +1,6 @@
use std::collections::BTreeSet;
use std::path::Path;
use imbl::vector;
use crate::context::RpcContext;
use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState};
use crate::net::host::all_hosts;
@@ -94,11 +92,6 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(),
));
}
};
// Trigger manifest callbacks with null to indicate uninstall
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
callbacks.call(vector![Value::Null]).await.log_err();
}
if !soft {
let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id);
crate::util::io::delete_dir(&path).await?;

View File

@@ -115,7 +115,7 @@ pub async fn list_disks(ctx: SetupContext) -> Result<Vec<DiskInfo>, Error> {
async fn setup_init(
ctx: &SetupContext,
password: Option<String>,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
init_phases: InitPhases,
) -> Result<(AccountInfo, InitResult), Error> {
@@ -137,9 +137,8 @@ async fn setup_init(
account.save(m)?;
let info = m.as_public_mut().as_server_info_mut();
info.as_password_hash_mut().ser(&account.password)?;
if let Some(kiosk) = kiosk {
info.as_kiosk_mut().ser(&Some(kiosk))?;
}
info.as_kiosk_mut()
.ser(&Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"))?;
if let Some(language) = language.clone() {
info.as_language_mut().ser(&Some(language))?;
}
@@ -174,8 +173,7 @@ async fn setup_init(
pub struct AttachParams {
pub password: Option<EncryptedWire>,
pub guid: InternedString,
#[ts(optional)]
pub kiosk: Option<bool>,
pub kiosk: bool,
}
#[instrument(skip_all)]
@@ -411,8 +409,7 @@ pub struct SetupExecuteParams {
guid: InternedString,
password: Option<EncryptedWire>,
recovery_source: Option<RecoverySource<EncryptedWire>>,
#[ts(optional)]
kiosk: Option<bool>,
kiosk: bool,
name: Option<InternedString>,
hostname: Option<InternedString>,
}
@@ -549,7 +546,7 @@ pub async fn execute_inner(
guid: InternedString,
password: Option<String>,
recovery_source: Option<RecoverySource<String>>,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
) -> Result<(SetupResult, RpcContext), Error> {
let progress = &ctx.progress;
@@ -622,7 +619,7 @@ async fn fresh_setup(
ctx: &SetupContext,
guid: InternedString,
password: &str,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
init_phases,
@@ -633,7 +630,6 @@ async fn fresh_setup(
let account = AccountInfo::new(password, root_ca_start_time().await, hostname)?;
let db = ctx.db().await?;
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
let language = ctx.language.peek(|a| a.clone());
@@ -684,7 +680,7 @@ async fn recover(
recovery_source: BackupTargetFS,
server_id: String,
recovery_password: String,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
progress: SetupExecuteProgress,
) -> Result<(SetupResult, RpcContext), Error> {
@@ -709,7 +705,7 @@ async fn migrate(
guid: InternedString,
old_guid: &str,
password: Option<String>,
kiosk: Option<bool>,
kiosk: bool,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
init_phases,

View File

@@ -6,7 +6,6 @@ use chrono::Utc;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use imbl::vector;
use imbl_value::InternedString;
use rpc_toolkit::{Context, Empty, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
@@ -319,13 +318,11 @@ pub fn kernel_logs<C: Context + AsRef<RpcContinuations>>() -> ParentHandler<C, L
const DISABLE_KIOSK_PATH: &str =
"/media/startos/config/overlay/etc/systemd/system/getty@tty1.service.d/autologin.conf";
pub async fn sync_kiosk(kiosk: Option<bool>) -> Result<(), Error> {
if let Some(kiosk) = kiosk {
if kiosk {
enable_kiosk().await?;
} else {
disable_kiosk().await?;
}
pub async fn sync_kiosk(kiosk: bool) -> Result<(), Error> {
if kiosk {
enable_kiosk().await?;
} else {
disable_kiosk().await?;
}
Ok(())
}
@@ -1150,9 +1147,6 @@ pub async fn set_system_smtp(ctx: RpcContext, smtp: SmtpValue) -> Result<(), Err
})
.await
.result?;
if let Some(callbacks) = ctx.callbacks.get_system_smtp() {
callbacks.call(vector![to_value(&smtp)?]).await?;
}
Ok(())
}
pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> {
@@ -1165,9 +1159,6 @@ pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> {
})
.await
.result?;
if let Some(callbacks) = ctx.callbacks.get_system_smtp() {
callbacks.call(vector![Value::Null]).await?;
}
Ok(())
}

View File

@@ -201,7 +201,7 @@ impl TunnelContext {
listen,
db,
datadir,
rpc_continuations: RpcContinuations::new(),
rpc_continuations: RpcContinuations::new(None),
open_authed_continuations: OpenAuthedContinuations::new(),
ephemeral_sessions: SyncMutex::new(Sessions::new()),
net_iface,

View File

@@ -13,7 +13,6 @@ use futures::{FutureExt, Stream, StreamExt, ready};
use http::header::CONTENT_LENGTH;
use http::{HeaderMap, StatusCode};
use imbl_value::InternedString;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt};
use tokio::sync::watch;
@@ -23,6 +22,7 @@ use crate::progress::{PhaseProgressTrackerHandle, ProgressUnits};
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::s9pk::merkle_archive::source::multi_cursor_file::{FileCursor, MultiCursorFile};
use crate::util::direct_io::DirectIoFile;
use crate::util::io::{TmpDir, create_file};
pub async fn upload(
@@ -69,16 +69,6 @@ impl Progress {
false
}
}
fn handle_write(&mut self, res: &std::io::Result<usize>) -> bool {
match res {
Ok(a) => {
self.written += *a as u64;
self.tracker += *a as u64;
true
}
Err(e) => self.handle_error(e),
}
}
async fn expected_size(watch: &mut watch::Receiver<Self>) -> Option<u64> {
watch
.wait_for(|progress| progress.error.is_some() || progress.expected_size.is_some())
@@ -192,16 +182,19 @@ impl UploadingFile {
complete: false,
});
let file = create_file(path).await?;
let multi_cursor = MultiCursorFile::open(&file).await?;
let direct_file = DirectIoFile::from_tokio_file(file).await?;
let uploading = Self {
tmp_dir: None,
file: MultiCursorFile::open(&file).await?,
file: multi_cursor,
progress: progress.1,
};
Ok((
UploadHandle {
tmp_dir: None,
file,
file: direct_file,
progress: progress.0,
last_synced: 0,
},
uploading,
))
@@ -346,8 +339,9 @@ impl AsyncSeek for UploadingFileReader {
pub struct UploadHandle {
tmp_dir: Option<Arc<TmpDir>>,
#[pin]
file: File,
file: DirectIoFile,
progress: watch::Sender<Progress>,
last_synced: u64,
}
impl UploadHandle {
pub async fn upload(&mut self, request: Request) {
@@ -394,6 +388,19 @@ impl UploadHandle {
if let Err(e) = self.file.sync_all().await {
self.progress.send_if_modified(|p| p.handle_error(&e));
}
// Update progress with final synced bytes
self.update_sync_progress();
}
fn update_sync_progress(&mut self) {
let synced = self.file.bytes_synced();
let delta = synced - self.last_synced;
if delta > 0 {
self.last_synced = synced;
self.progress.send_modify(|p| {
p.written += delta;
p.tracker += delta;
});
}
}
}
#[pin_project::pinned_drop]
@@ -410,13 +417,23 @@ impl AsyncWrite for UploadHandle {
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
// Update progress based on bytes actually flushed to disk
let synced = this.file.bytes_synced();
let delta = synced - *this.last_synced;
if delta > 0 {
*this.last_synced = synced;
this.progress.send_modify(|p| {
p.written += delta;
p.tracker += delta;
});
}
match this.file.poll_write(cx, buf) {
Poll::Ready(res) => {
Poll::Ready(Err(e)) => {
this.progress
.send_if_modified(|progress| progress.handle_write(&res));
Poll::Ready(res)
.send_if_modified(|progress| progress.handle_error(&e));
Poll::Ready(Err(e))
}
Poll::Pending => Poll::Pending,
a => a,
}
}
fn poll_flush(

292
core/src/util/direct_io.rs Normal file
View File

@@ -0,0 +1,292 @@
use std::alloc::Layout;
use std::io::Write;
use std::os::fd::AsRawFd;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::io::AsyncWrite;
use tokio::task::JoinHandle;
const BLOCK_SIZE: usize = 4096;
const BUF_CAP: usize = 256 * 1024; // 256KB
/// Aligned buffer for O_DIRECT I/O.
struct AlignedBuf {
ptr: *mut u8,
len: usize,
}
// SAFETY: We have exclusive ownership of the allocation.
unsafe impl Send for AlignedBuf {}
impl AlignedBuf {
fn new() -> Self {
let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap();
// SAFETY: layout has non-zero size
let ptr = unsafe { std::alloc::alloc(layout) };
if ptr.is_null() {
std::alloc::handle_alloc_error(layout);
}
Self { ptr, len: 0 }
}
fn as_slice(&self) -> &[u8] {
// SAFETY: ptr is valid for len bytes, properly aligned, exclusively owned
unsafe { std::slice::from_raw_parts(self.ptr, self.len) }
}
fn push(&mut self, data: &[u8]) -> usize {
let n = data.len().min(BUF_CAP - self.len);
// SAFETY: src and dst don't overlap, both valid for n bytes
unsafe {
std::ptr::copy_nonoverlapping(data.as_ptr(), self.ptr.add(self.len), n);
}
self.len += n;
n
}
fn aligned_len(&self) -> usize {
self.len & !(BLOCK_SIZE - 1)
}
fn drain_front(&mut self, n: usize) {
debug_assert!(n <= self.len);
let remaining = self.len - n;
if remaining > 0 {
// SAFETY: regions may overlap, so we use copy (memmove)
unsafe {
std::ptr::copy(self.ptr.add(n), self.ptr, remaining);
}
}
self.len = remaining;
}
/// Extract aligned data into a new buffer for flushing, leaving remainder.
fn take_aligned(&mut self) -> Option<(AlignedBuf, u64)> {
let aligned = self.aligned_len();
if aligned == 0 {
return None;
}
let mut flush_buf = AlignedBuf::new();
flush_buf.push(&self.as_slice()[..aligned]);
self.drain_front(aligned);
Some((flush_buf, aligned as u64))
}
}
impl Drop for AlignedBuf {
fn drop(&mut self) {
let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap();
// SAFETY: ptr was allocated with this layout in new()
unsafe { std::alloc::dealloc(self.ptr, layout) };
}
}
enum FileState {
Idle(std::fs::File),
Flushing(JoinHandle<std::io::Result<(std::fs::File, u64)>>),
Done,
}
/// A file writer that uses O_DIRECT to bypass the kernel page cache.
///
/// Buffers writes in an aligned buffer and flushes to disk in the background.
/// New writes can proceed while a flush is in progress (double-buffering).
/// Progress is tracked via [`bytes_synced`](Self::bytes_synced), which reflects
/// bytes actually written to disk.
pub struct DirectIoFile {
file_state: FileState,
buf: AlignedBuf,
synced: u64,
}
impl DirectIoFile {
fn new(file: std::fs::File) -> Self {
Self {
file_state: FileState::Idle(file),
buf: AlignedBuf::new(),
synced: 0,
}
}
/// Convert an existing tokio File into a DirectIoFile by adding O_DIRECT.
pub async fn from_tokio_file(file: tokio::fs::File) -> std::io::Result<Self> {
let std_file = file.into_std().await;
let fd = std_file.as_raw_fd();
// SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops
unsafe {
let flags = libc::fcntl(fd, libc::F_GETFL);
if flags == -1 {
return Err(std::io::Error::last_os_error());
}
#[cfg(target_os = "linux")]
if libc::fcntl(fd, libc::F_SETFL, flags | libc::O_DIRECT) == -1 {
return Err(std::io::Error::last_os_error());
}
}
Ok(Self::new(std_file))
}
/// Number of bytes confirmed written to disk.
pub fn bytes_synced(&self) -> u64 {
self.synced
}
/// Flush any remaining buffered data and sync to disk.
///
/// Removes the O_DIRECT flag for the final partial-block write, then
/// calls fsync. Updates `bytes_synced` to the final total.
pub async fn sync_all(&mut self) -> std::io::Result<()> {
// Wait for any in-flight flush
self.await_flush().await?;
let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else {
return Ok(());
};
let mut buf = std::mem::replace(&mut self.buf, AlignedBuf::new());
let remaining = buf.len as u64;
tokio::task::spawn_blocking(move || {
let mut file = file;
// Write any aligned portion
let aligned = buf.aligned_len();
if aligned > 0 {
let slice = unsafe { std::slice::from_raw_parts(buf.ptr, aligned) };
file.write_all(slice)?;
buf.drain_front(aligned);
}
// Write remainder with O_DIRECT disabled
if buf.len > 0 {
let fd = file.as_raw_fd();
// SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops
#[cfg(target_os = "linux")]
unsafe {
let flags = libc::fcntl(fd, libc::F_GETFL);
libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_DIRECT);
}
file.write_all(buf.as_slice())?;
}
file.sync_all()
})
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??;
self.synced += remaining;
Ok(())
}
async fn await_flush(&mut self) -> std::io::Result<()> {
if let FileState::Flushing(handle) = &mut self.file_state {
let (file, flushed) = handle
.await
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??;
self.synced += flushed;
self.file_state = FileState::Idle(file);
}
Ok(())
}
/// Non-blocking poll: try to complete a pending flush.
/// Returns Ready(Ok(())) if idle (or just became idle), Pending if still flushing.
fn poll_complete_flush(&mut self, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
if let FileState::Flushing(handle) = &mut self.file_state {
match Pin::new(handle).poll(cx) {
Poll::Ready(Ok(Ok((file, flushed)))) => {
self.synced += flushed;
self.file_state = FileState::Idle(file);
}
Poll::Ready(Ok(Err(e))) => {
self.file_state = FileState::Done;
return Poll::Ready(Err(e));
}
Poll::Ready(Err(e)) => {
self.file_state = FileState::Done;
return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::Other, e)));
}
Poll::Pending => return Poll::Pending,
}
}
Poll::Ready(Ok(()))
}
/// Start a background flush of aligned data if the file is idle.
fn maybe_start_flush(&mut self) {
if !matches!(self.file_state, FileState::Idle(_)) {
return;
}
let Some((flush_buf, count)) = self.buf.take_aligned() else {
return;
};
let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else {
unreachable!()
};
let handle = tokio::task::spawn_blocking(move || {
let mut file = file;
file.write_all(flush_buf.as_slice())?;
Ok((file, count))
});
self.file_state = FileState::Flushing(handle);
}
}
impl AsyncWrite for DirectIoFile {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
// Try to complete any pending flush (non-blocking, registers waker)
match self.poll_complete_flush(cx) {
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
_ => {} // Pending is fine — we can still accept writes into the buffer
}
// If file just became idle and buffer has aligned data, start a flush
// to free buffer space before accepting new data
self.maybe_start_flush();
// Accept data into the buffer
let n = self.buf.push(buf);
if n == 0 {
// Buffer full, must wait for flush to complete and free space.
// Waker was already registered by poll_complete_flush above.
return Poll::Pending;
}
// If file is idle and we now have aligned data, start flushing
self.maybe_start_flush();
Poll::Ready(Ok(n))
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.poll_complete_flush(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Ready(Ok(())) => {}
}
if self.buf.aligned_len() > 0 {
self.maybe_start_flush();
// Poll the just-started flush
return self.poll_complete_flush(cx).map(|r| r.map(|_| ()));
}
Poll::Ready(Ok(()))
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.poll_complete_flush(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Ready(Ok(())) => {}
}
self.file_state = FileState::Done;
Poll::Ready(Ok(()))
}
}

View File

@@ -38,6 +38,7 @@ pub mod collections;
pub mod cpupower;
pub mod crypto;
pub mod data_url;
pub mod direct_io;
pub mod future;
pub mod http_reader;
pub mod io;

View File

@@ -60,8 +60,9 @@ mod v0_4_0_alpha_17;
mod v0_4_0_alpha_18;
mod v0_4_0_alpha_19;
mod v0_4_0_alpha_20;
mod v0_4_0_alpha_21;
pub type Current = v0_4_0_alpha_20::Version; // VERSION_BUMP
pub type Current = v0_4_0_alpha_21::Version; // VERSION_BUMP
impl Current {
#[instrument(skip(self, db))]
@@ -189,7 +190,8 @@ enum Version {
V0_4_0_alpha_17(Wrapper<v0_4_0_alpha_17::Version>),
V0_4_0_alpha_18(Wrapper<v0_4_0_alpha_18::Version>),
V0_4_0_alpha_19(Wrapper<v0_4_0_alpha_19::Version>),
V0_4_0_alpha_20(Wrapper<v0_4_0_alpha_20::Version>), // VERSION_BUMP
V0_4_0_alpha_20(Wrapper<v0_4_0_alpha_20::Version>),
V0_4_0_alpha_21(Wrapper<v0_4_0_alpha_21::Version>), // VERSION_BUMP
Other(exver::Version),
}
@@ -252,7 +254,8 @@ impl Version {
Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_18(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_21(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::Other(v) => {
return Err(Error::new(
eyre!("unknown version {v}"),
@@ -307,7 +310,8 @@ impl Version {
Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_18(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_21(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::Other(x) => x.clone(),
}
}

View File

@@ -143,7 +143,8 @@ pub struct Version;
impl VersionT for Version {
type Previous = v0_3_5_2::Version;
type PreUpRes = (AccountInfo, SshKeys, CifsTargets);
/// (package_id, host_id, expanded_key)
type PreUpRes = (AccountInfo, SshKeys, CifsTargets, Vec<(String, String, [u8; 64])>);
fn semver(self) -> exver::Version {
V0_3_6_alpha_0.clone()
}
@@ -158,15 +159,17 @@ impl VersionT for Version {
let cifs = previous_cifs(&pg).await?;
let tor_keys = previous_tor_keys(&pg).await?;
Command::new("systemctl")
.arg("stop")
.arg("postgresql@*.service")
.invoke(crate::ErrorKind::Database)
.await?;
Ok((account, ssh_keys, cifs))
Ok((account, ssh_keys, cifs, tor_keys))
}
fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result<Value, Error> {
fn up(self, db: &mut Value, (account, ssh_keys, cifs, tor_keys): Self::PreUpRes) -> Result<Value, Error> {
let prev_package_data = db["package-data"].clone();
let wifi = json!({
@@ -183,6 +186,11 @@ impl VersionT for Version {
"shuttingDown": db["server-info"]["status-info"]["shutting-down"],
"restarting": db["server-info"]["status-info"]["restarting"],
});
let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?;
let onion_address = tor_address
.replace("https://", "")
.replace("http://", "")
.replace(".onion/", "");
let server_info = {
let mut server_info = json!({
"arch": db["server-info"]["arch"],
@@ -196,15 +204,9 @@ impl VersionT for Version {
});
server_info["postInitMigrationTodos"] = json!({});
let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?;
// Maybe we do this like the Public::init does
server_info["torAddress"] = json!(tor_address);
server_info["onionAddress"] = json!(
tor_address
.replace("https://", "")
.replace("http://", "")
.replace(".onion/", "")
);
server_info["torAddress"] = json!(&tor_address);
server_info["onionAddress"] = json!(&onion_address);
server_info["networkInterfaces"] = json!({});
server_info["statusInfo"] = status_info;
server_info["wifi"] = wifi;
@@ -233,6 +235,30 @@ impl VersionT for Version {
let private = {
let mut value = json!({});
value["keyStore"] = crate::dbg!(to_value(&keystore)?);
// Preserve tor onion keys so later migrations (v0_4_0_alpha_20) can
// include them in onion-migration.json for the tor service.
if !tor_keys.is_empty() {
let mut onion_map: Value = json!({});
let onion_obj = onion_map.as_object_mut().unwrap();
let mut tor_migration = imbl::Vector::<Value>::new();
for (package_id, host_id, key_bytes) in &tor_keys {
let onion_addr = onion_address_from_key(key_bytes);
let encoded_key =
base64::Engine::encode(&crate::util::serde::BASE64, key_bytes);
onion_obj.insert(
onion_addr.as_str().into(),
Value::String(encoded_key.clone().into()),
);
tor_migration.push_back(json!({
"hostname": &onion_addr,
"packageId": package_id,
"hostId": host_id,
"key": &encoded_key,
}));
}
value["keyStore"]["onion"] = onion_map;
value["torMigration"] = Value::Array(tor_migration);
}
value["password"] = to_value(&account.password)?;
value["compatS9pkKey"] =
to_value(&crate::db::model::private::generate_developer_key())?;
@@ -498,3 +524,109 @@ async fn previous_ssh_keys(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<SshKeys, E
};
Ok(ssh_keys)
}
/// Returns `Vec<(package_id, host_id, expanded_key)>`.
/// Server key uses `("STARTOS", "STARTOS")`.
#[tracing::instrument(skip_all)]
async fn previous_tor_keys(
pg: &sqlx::Pool<sqlx::Postgres>,
) -> Result<Vec<(String, String, [u8; 64])>, Error> {
let mut keys = Vec::new();
// Server tor key from the account table.
// Older installs have tor_key (64 bytes). Newer installs (post-NetworkKeys migration)
// made tor_key nullable and use network_key (32 bytes, needs expansion) instead.
let row = sqlx::query(r#"SELECT tor_key, network_key FROM account"#)
.fetch_one(pg)
.await
.with_kind(ErrorKind::Database)?;
if let Ok(tor_key) = row.try_get::<Vec<u8>, _>("tor_key") {
if let Ok(key) = <[u8; 64]>::try_from(tor_key) {
keys.push(("STARTOS".to_owned(), "STARTOS".to_owned(), key));
}
} else if let Ok(net_key) = row.try_get::<Vec<u8>, _>("network_key") {
if let Ok(seed) = <[u8; 32]>::try_from(net_key) {
keys.push((
"STARTOS".to_owned(),
"STARTOS".to_owned(),
crate::util::crypto::ed25519_expand_key(&seed),
));
}
}
// Package tor keys from the network_keys table (32-byte keys that need expansion)
if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM network_keys"#)
.fetch_all(pg)
.await
{
for row in rows {
let Ok(package) = row.try_get::<String, _>("package") else {
continue;
};
let Ok(interface) = row.try_get::<String, _>("interface") else {
continue;
};
let Ok(key_bytes) = row.try_get::<Vec<u8>, _>("key") else {
continue;
};
if let Ok(seed) = <[u8; 32]>::try_from(key_bytes) {
keys.push((
package,
interface,
crate::util::crypto::ed25519_expand_key(&seed),
));
}
}
}
// Package tor keys from the tor table (already 64-byte expanded keys)
if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM tor"#)
.fetch_all(pg)
.await
{
for row in rows {
let Ok(package) = row.try_get::<String, _>("package") else {
continue;
};
let Ok(interface) = row.try_get::<String, _>("interface") else {
continue;
};
let Ok(key_bytes) = row.try_get::<Vec<u8>, _>("key") else {
continue;
};
if let Ok(key) = <[u8; 64]>::try_from(key_bytes) {
keys.push((package, interface, key));
}
}
}
Ok(keys)
}
/// Derive the tor v3 onion address (without .onion suffix) from a 64-byte
/// expanded ed25519 secret key.
fn onion_address_from_key(expanded_key: &[u8; 64]) -> String {
use sha3::Digest;
// Derive public key from expanded secret key using ed25519-dalek v1
let esk =
ed25519_dalek_v1::ExpandedSecretKey::from_bytes(expanded_key).expect("invalid tor key");
let pk = ed25519_dalek_v1::PublicKey::from(&esk);
let pk_bytes = pk.to_bytes();
// Compute onion v3 address: base32(pubkey || checksum || version)
// checksum = SHA3-256(".onion checksum" || pubkey || version)[0..2]
let mut hasher = sha3::Sha3_256::new();
hasher.update(b".onion checksum");
hasher.update(&pk_bytes);
hasher.update(b"\x03");
let hash = hasher.finalize();
let mut raw = [0u8; 35];
raw[..32].copy_from_slice(&pk_bytes);
raw[32] = hash[0]; // checksum byte 0
raw[33] = hash[1]; // checksum byte 1
raw[34] = 0x03; // version
base32::encode(base32::Alphabet::Rfc4648 { padding: false }, &raw).to_ascii_lowercase()
}

View File

@@ -2,11 +2,13 @@ use std::path::Path;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::json;
use reqwest::Url;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_19};
use crate::context::RpcContext;
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_20: exver::Version = exver::Version::new(
@@ -33,74 +35,106 @@ impl VersionT for Version {
}
#[instrument(skip_all)]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
// Extract onion migration data before removing it
let onion_store = db
// Use the pre-built torMigration data from v0_3_6_alpha_0 if available.
// This contains all (hostname, packageId, hostId, key) entries with keys
// already resolved, avoiding the issue where packageData is empty during
// migration (packages aren't reinstalled until post_up).
let migration_data = if let Some(tor_migration) = db
.get("private")
.and_then(|p| p.get("keyStore"))
.and_then(|k| k.get("onion"))
.cloned()
.unwrap_or(Value::Object(Default::default()));
let mut addresses = imbl::Vector::<Value>::new();
// Extract OS host onion addresses
if let Some(onions) = db
.get("public")
.and_then(|p| p.get("serverInfo"))
.and_then(|s| s.get("network"))
.and_then(|n| n.get("host"))
.and_then(|h| h.get("onions"))
.and_then(|o| o.as_array())
.and_then(|p| p.get("torMigration"))
.and_then(|t| t.as_array())
{
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.unwrap_or_default();
addresses.push_back(json!({
"hostname": hostname,
"packageId": "STARTOS",
"hostId": "STARTOS",
"key": key,
}));
json!({
"addresses": tor_migration.clone(),
})
} else {
// Fallback for fresh installs or installs that didn't go through
// v0_3_6_alpha_0 with the torMigration field.
let onion_store = db
.get("private")
.and_then(|p| p.get("keyStore"))
.and_then(|k| k.get("onion"))
.cloned()
.unwrap_or(Value::Object(Default::default()));
let mut addresses = imbl::Vector::<Value>::new();
// Extract OS host onion addresses
if let Some(onions) = db
.get("public")
.and_then(|p| p.get("serverInfo"))
.and_then(|s| s.get("network"))
.and_then(|n| n.get("host"))
.and_then(|h| h.get("onions"))
.and_then(|o| o.as_array())
{
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.ok_or_else(|| {
Error::new(
eyre!("missing tor key for onion address {hostname}"),
ErrorKind::Database,
)
})?;
addresses.push_back(json!({
"hostname": hostname,
"packageId": "STARTOS",
"hostId": "startos-ui",
"key": key,
}));
}
}
}
}
// Extract package host onion addresses
if let Some(packages) = db
.get("public")
.and_then(|p| p.get("packageData"))
.and_then(|p| p.as_object())
{
for (package_id, package) in packages.iter() {
if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) {
for (host_id, host) in hosts.iter() {
if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) {
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.unwrap_or_default();
addresses.push_back(json!({
"hostname": hostname,
"packageId": &**package_id,
"hostId": &**host_id,
"key": key,
}));
// Extract package host onion addresses
if let Some(packages) = db
.get("public")
.and_then(|p| p.get("packageData"))
.and_then(|p| p.as_object())
{
for (package_id, package) in packages.iter() {
if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) {
for (host_id, host) in hosts.iter() {
if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) {
for onion in onions {
if let Some(hostname) = onion.as_str() {
let key = onion_store
.get(hostname)
.and_then(|v| v.as_str())
.ok_or_else(|| {
Error::new(
eyre!(
"missing tor key for onion address {hostname}"
),
ErrorKind::Database,
)
})?;
addresses.push_back(json!({
"hostname": hostname,
"packageId": &**package_id,
"hostId": &**host_id,
"key": key,
}));
}
}
}
}
}
}
}
}
let migration_data = json!({
"addresses": addresses,
});
json!({
"addresses": addresses,
})
};
// Clean up torMigration from private
if let Some(private) = db.get_mut("private").and_then(|p| p.as_object_mut()) {
private.remove("torMigration");
}
// Remove onions and tor-related fields from server host
if let Some(host) = db
@@ -200,7 +234,7 @@ impl VersionT for Version {
}
#[instrument(skip_all)]
async fn post_up(self, _ctx: &RpcContext, input: Value) -> Result<(), Error> {
async fn post_up(self, ctx: &RpcContext, input: Value) -> Result<(), Error> {
let path = Path::new(
"/media/startos/data/package-data/volumes/tor/data/startos/onion-migration.json",
);
@@ -209,6 +243,53 @@ impl VersionT for Version {
crate::util::io::write_file_atomic(path, json).await?;
// Sideload the bundled tor s9pk
let s9pk_path_str = format!("/usr/lib/startos/tor_{}.s9pk", crate::ARCH);
let s9pk_path = Path::new(&s9pk_path_str);
if tokio::fs::metadata(s9pk_path).await.is_ok() {
if let Err(e) = async {
let package_s9pk = tokio::fs::File::open(s9pk_path).await?;
let file = MultiCursorFile::open(&package_s9pk).await?;
let key = ctx.db.peek().await.into_private().into_developer_key();
let registry_url =
Url::parse("https://registry.start9.com/").with_kind(ErrorKind::ParseUrl)?;
ctx.services
.install(
ctx.clone(),
|| crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None),
None,
None::<crate::util::Never>,
None,
)
.await?
.await?
.await?;
// Set the marketplace URL on the installed tor package
let tor_id = "tor".parse::<crate::PackageId>()?;
ctx.db
.mutate(|db| {
if let Some(pkg) =
db.as_public_mut().as_package_data_mut().as_idx_mut(&tor_id)
{
pkg.as_registry_mut().ser(&Some(registry_url))?;
}
Ok(())
})
.await
.result?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error installing tor package: {e}");
tracing::debug!("{e:?}");
}
}
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {

View File

@@ -0,0 +1,37 @@
use exver::{PreReleaseSegment, VersionRange};
use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_20};
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_21: exver::Version = exver::Version::new(
[0, 4, 0],
[PreReleaseSegment::String("alpha".into()), 21.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_4_0_alpha_20::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_4_0_alpha_21.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
#[instrument(skip_all)]
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,13 +1,19 @@
use std::path::{Path, PathBuf};
use tokio::process::Command;
use crate::PackageId;
pub use crate::VolumeId;
use crate::prelude::*;
use crate::util::Invoke;
use crate::util::VersionString;
use crate::DATA_DIR;
pub const PKG_VOLUME_DIR: &str = "package-data/volumes";
pub const BACKUP_DIR: &str = "/media/startos/backups";
const INSTALL_BACKUP_SUFFIX: &str = ".install-backup";
pub fn data_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, volume_id: &VolumeId) -> PathBuf {
datadir
.as_ref()
@@ -33,3 +39,70 @@ pub fn asset_dir<P: AsRef<Path>>(
pub fn backup_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(BACKUP_DIR).join(pkg_id).join("data")
}
fn pkg_volume_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(pkg_id)
}
fn install_backup_path(pkg_id: &PackageId) -> PathBuf {
Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(format!("{pkg_id}{INSTALL_BACKUP_SUFFIX}"))
}
/// Creates a COW snapshot of the package volume directory before install.
/// Uses `cp --reflink=always` so it's instant on btrfs and fails gracefully
/// on ext4 (no backup, current behavior preserved).
/// Returns `true` if a backup was created, `false` if no data existed or
/// the filesystem doesn't support reflinks.
pub async fn snapshot_volumes_for_install(pkg_id: &PackageId) -> Result<bool, Error> {
let src = pkg_volume_dir(pkg_id);
if tokio::fs::metadata(&src).await.is_err() {
return Ok(false);
}
let dst = install_backup_path(pkg_id);
// Remove any stale backup from a previous failed attempt
crate::util::io::delete_dir(&dst).await?;
match Command::new("cp")
.arg("-a")
.arg("--reflink=always")
.arg(&src)
.arg(&dst)
.invoke(ErrorKind::Filesystem)
.await
{
Ok(_) => {
tracing::info!("Created install backup for {pkg_id} at {dst:?}");
Ok(true)
}
Err(e) => {
tracing::warn!(
"Could not create install backup for {pkg_id} \
(filesystem may not support reflinks): {e}"
);
// Clean up partial copy if any
crate::util::io::delete_dir(&dst).await?;
Ok(false)
}
}
}
/// Restores the package volume directory from a COW snapshot after a failed
/// install. The current (possibly corrupted) volume dir is deleted first.
/// No-op if no backup exists.
pub async fn restore_volumes_from_install_backup(pkg_id: &PackageId) -> Result<(), Error> {
let backup = install_backup_path(pkg_id);
if tokio::fs::metadata(&backup).await.is_err() {
return Ok(());
}
let dst = pkg_volume_dir(pkg_id);
crate::util::io::delete_dir(&dst).await?;
crate::util::io::rename(&backup, &dst).await?;
tracing::info!("Restored volumes from install backup for {pkg_id}");
Ok(())
}
/// Removes the install backup after a successful install.
pub async fn remove_install_backup(pkg_id: &PackageId) -> Result<(), Error> {
crate::util::io::delete_dir(&install_backup_path(pkg_id)).await
}

View File

@@ -1,5 +1,29 @@
# Changelog
## 0.4.0-beta.61 — StartOS v0.4.0-alpha.21 (2026-03-16)
### Fixed
- Fixed bug where leaving the effect context triggered consts
## 0.4.0-beta.60 — StartOS v0.4.0-alpha.20 (2026-03-16)
### Added
- Tunnel TS type exports and port forward labels
- Secure Boot MOK key enrollment fields in `SetupInfo`
### Changed
- Consolidated `Watchable` base class with generic `map`/`eq` support; renamed `call` to `fetch`
- Moved `GetServiceManifest` and `GetSslCertificate` from `package/` to `base/`
- Simplified `getServiceInterface`, `getServiceInterfaces`, `GetOutboundGateway`, `GetSystemSmtp`, and `fileHelper` using `Watchable` base class
- Simplified SDK Makefile with rsync
### Fixed
- Added `restart_again` flag to `DesiredStatus::Restarting`
## 0.4.0-beta.59 — StartOS v0.4.0-alpha.20 (2026-03-06)
### Added

View File

@@ -4,5 +4,5 @@ import type { EncryptedWire } from './EncryptedWire'
export type AttachParams = {
password: EncryptedWire | null
guid: string
kiosk?: boolean
kiosk: boolean
}

View File

@@ -6,7 +6,7 @@ export type SetupExecuteParams = {
guid: string
password: EncryptedWire | null
recoverySource: RecoverySource<EncryptedWire> | null
kiosk?: boolean
kiosk: boolean
name: string | null
hostname: string | null
}

View File

@@ -83,10 +83,12 @@ export abstract class Watchable<Raw, Mapped = Raw> {
const constRetry = this.effects.constRetry
const cleanup = this.onConstRegistered(value)
gen.next().then(
() => {
(a) => {
abort.abort()
cleanup?.()
constRetry()
if (!a.done) {
constRetry()
}
},
() => {
abort.abort()

View File

@@ -69,7 +69,7 @@ import { getOwnServiceInterfaces } from '../../base/lib/util/getServiceInterface
import { Volumes, createVolumes } from './util/Volume'
/** The minimum StartOS version required by this SDK release */
export const OSVersion = testTypeVersion('0.4.0-alpha.20')
export const OSVersion = testTypeVersion('0.4.0-alpha.21')
// prettier-ignore
type AnyNeverCond<T extends any[], Then, Else> =

View File

@@ -1,12 +1,12 @@
{
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.59",
"version": "0.4.0-beta.61",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.59",
"version": "0.4.0-beta.61",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",

View File

@@ -1,6 +1,6 @@
{
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.59",
"version": "0.4.0-beta.61",
"description": "Software development kit to facilitate packaging services for StartOS",
"main": "./package/lib/index.js",
"types": "./package/lib/index.d.ts",

24
web/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "startos-ui",
"version": "0.4.0-alpha.20",
"version": "0.4.0-alpha.21",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "startos-ui",
"version": "0.4.0-alpha.20",
"version": "0.4.0-alpha.21",
"license": "MIT",
"dependencies": {
"@angular/cdk": "^21.2.1",
@@ -836,6 +836,7 @@
"integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.29.0",
"@babel/generator": "^7.29.0",
@@ -8322,6 +8323,7 @@
"integrity": "sha512-ME4Fb83LgEgwNw96RKNvKV4VTLuXfoKudAmm2lP8Kk87KaMK0/Xrx/aAkMWmT8mDb+3MlFDspfbCs7adjRxA2g==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"cli-truncate": "^5.0.0",
"colorette": "^2.0.20",
@@ -12524,24 +12526,6 @@
"dev": true,
"license": "ISC"
},
"node_modules/yaml": {
"version": "2.8.2",
"resolved": "https://registry.npmjs.org/yaml/-/yaml-2.8.2.tgz",
"integrity": "sha512-mplynKqc1C2hTVYxd0PU2xQAc22TI1vShAYGksCCfxbn/dFwnHTNi1bvYsBTkhdUNtGIf5xNOg938rrSSYvS9A==",
"dev": true,
"license": "ISC",
"optional": true,
"peer": true,
"bin": {
"yaml": "bin.mjs"
},
"engines": {
"node": ">= 14.6"
},
"funding": {
"url": "https://github.com/sponsors/eemeli"
}
},
"node_modules/yargs": {
"version": "18.0.0",
"resolved": "https://registry.npmjs.org/yargs/-/yargs-18.0.0.tgz",

View File

@@ -1,6 +1,6 @@
{
"name": "startos-ui",
"version": "0.4.0-alpha.20",
"version": "0.4.0-alpha.21",
"author": "Start9 Labs, Inc",
"homepage": "https://start9.com/",
"license": "MIT",

View File

@@ -1,12 +1,30 @@
import { Component } from '@angular/core'
import { FormsModule } from '@angular/forms'
import { i18nPipe } from '@start9labs/shared'
import { TuiButton, TuiTitle } from '@taiga-ui/core'
import { TuiDialogContext } from '@taiga-ui/core'
import {
TuiButton,
TuiCheckbox,
TuiDialogContext,
TuiNotification,
TuiTitle,
} from '@taiga-ui/core'
import { TuiHeader } from '@taiga-ui/layout'
import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus'
export interface PreserveOverwriteData {
isExt4: boolean
}
@Component({
imports: [TuiButton, TuiHeader, TuiTitle, i18nPipe],
imports: [
FormsModule,
TuiButton,
TuiCheckbox,
TuiHeader,
TuiNotification,
TuiTitle,
i18nPipe,
],
template: `
<header tuiHeader>
<hgroup tuiTitle>
@@ -24,6 +42,18 @@ import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus'
{{ 'to discard' | i18n }}
</li>
</ul>
@if (context.data.isExt4) {
<p tuiNotification appearance="warning" size="m">
{{
'This drive uses ext4 and will be automatically converted to btrfs. A backup is strongly recommended before proceeding.'
| i18n
}}
</p>
<label>
<input tuiCheckbox type="checkbox" [(ngModel)]="backupAck" />
{{ 'I have a backup of my data' | i18n }}
</label>
}
<footer>
<button
tuiButton
@@ -36,6 +66,7 @@ import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus'
tuiButton
appearance=""
[style.background]="'var(--tui-status-positive)'"
[disabled]="context.data.isExt4 && !backupAck"
(click)="context.completeWith(true)"
>
{{ 'Preserve' | i18n }}
@@ -44,7 +75,9 @@ import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus'
`,
})
export class PreserveOverwriteDialog {
protected readonly context = injectContext<TuiDialogContext<boolean>>()
protected readonly context =
injectContext<TuiDialogContext<boolean, PreserveOverwriteData>>()
protected backupAck = false
}
export const PRESERVE_OVERWRITE = new PolymorpheusComponent(

View File

@@ -292,8 +292,18 @@ export default class DrivesPage {
private showPreserveOverwriteDialog() {
let selectionMade = false
const drive = this.selectedDataDrive
const filesystem =
drive?.filesystem ||
drive?.partitions.find(p => p.guid)?.filesystem ||
null
const isExt4 = filesystem === 'ext2'
this.dialogs.openComponent<boolean>(PRESERVE_OVERWRITE).subscribe({
this.dialogs
.openComponent<boolean>(PRESERVE_OVERWRITE, {
data: { isExt4 },
})
.subscribe({
next: preserve => {
selectionMade = true
this.preserveData = preserve

View File

@@ -16,7 +16,7 @@ import { StateService } from '../services/state.service'
<button tuiCell="l" (click)="startFresh()">
<span tuiAvatar="@tui.plus" appearance="positive"></span>
<div tuiTitle>
{{ 'Start Fresh' | i18n }}
<b>{{ 'Start Fresh' | i18n }}</b>
<div tuiSubtitle>{{ 'Set up a brand new server' | i18n }}</div>
</div>
</button>
@@ -24,7 +24,7 @@ import { StateService } from '../services/state.service'
<button tuiCell="l" (click)="restore()">
<span tuiAvatar="@tui.archive-restore" appearance="warning"></span>
<div tuiTitle>
{{ 'Restore from Backup' | i18n }}
<b>{{ 'Restore from Backup' | i18n }}</b>
<div tuiSubtitle>
{{ 'Restore StartOS data from an encrypted backup' | i18n }}
</div>
@@ -34,7 +34,7 @@ import { StateService } from '../services/state.service'
<button tuiCell="l" (click)="transfer()">
<span tuiAvatar="@tui.hard-drive-download" appearance="info"></span>
<div tuiTitle>
{{ 'Transfer' | i18n }}
<b>{{ 'Transfer' | i18n }}</b>
<div tuiSubtitle>
{{ 'Transfer data from an existing StartOS data drive' | i18n }}
</div>

View File

@@ -1,5 +1,4 @@
import { Component, inject } from '@angular/core'
import { Router } from '@angular/router'
import {
AbstractControl,
FormControl,
@@ -7,14 +6,15 @@ import {
ReactiveFormsModule,
Validators,
} from '@angular/forms'
import { Router } from '@angular/router'
import { ErrorService, i18nPipe, normalizeHostname } from '@start9labs/shared'
import { TuiAutoFocus, TuiMapperPipe, TuiValidator } from '@taiga-ui/cdk'
import {
TuiButton,
TuiError,
TuiIcon,
TuiTitle,
TuiInput,
TuiTitle,
tuiValidationErrorsProvider,
} from '@taiga-ui/core'
import { TuiNotificationMiddleService, TuiPassword } from '@taiga-ui/kit'
@@ -80,17 +80,6 @@ import { StateService } from '../services/state.service'
<tui-error formControlName="confirm" />
<footer>
<button
tuiButton
size="m"
[disabled]="
isFresh
? form.invalid
: form.controls.password.value && form.invalid
"
>
{{ 'Finish' | i18n }}
</button>
@if (!isFresh) {
<button
tuiButton
@@ -102,6 +91,17 @@ import { StateService } from '../services/state.service'
{{ 'Skip' | i18n }}
</button>
}
<button
tuiButton
size="m"
[disabled]="
isFresh
? form.invalid
: form.controls.password.value && form.invalid
"
>
{{ 'Finish' | i18n }}
</button>
</footer>
</form>
`,

View File

@@ -29,20 +29,20 @@ import { StateService } from '../services/state.service'
<canvas matrix></canvas>
<section tuiCardLarge>
<header tuiHeader>
<h2 tuiTitle>
<span class="inline-title">
<hgroup tuiTitle>
<h2 tuiCell="m">
<tui-icon icon="@tui.circle-check-big" class="g-positive" />
{{ 'Setup Complete!' | i18n }}
</span>
@if (!stateService.kiosk) {
<span tuiSubtitle>
{{
'http://start.local was for setup only. It will no longer work.'
| i18n
}}
</span>
}
</h2>
</h2>
</hgroup>
@if (!stateService.kiosk) {
<p tuiSubtitle>
{{
'http://start.local was for setup only. It will no longer work.'
| i18n
}}
</p>
}
</header>
@if (!result) {
@@ -52,15 +52,15 @@ import { StateService } from '../services/state.service'
@if (!stateService.kiosk) {
<button tuiCell="l" (click)="download()">
<span tuiAvatar="@tui.download" appearance="secondary"></span>
<div tuiTitle>
{{ 'Download Address Info' | i18n }}
<div tuiSubtitle>
<span tuiTitle>
<b>{{ 'Download Address Info' | i18n }}</b>
<span tuiSubtitle>
{{
"Contains your server's permanent local address and Root CA"
| i18n
}}
</div>
</div>
</span>
</span>
@if (downloaded) {
<tui-icon icon="@tui.circle-check" class="g-positive" />
}
@@ -76,15 +76,15 @@ import { StateService } from '../services/state.service'
(click)="removeMedia()"
>
<span tuiAvatar="@tui.usb" appearance="secondary"></span>
<div tuiTitle>
{{ 'Remove Installation Media' | i18n }}
<div tuiSubtitle>
<span tuiTitle>
<b>{{ 'Remove Installation Media' | i18n }}</b>
<span tuiSubtitle>
{{
'Remove USB stick or other installation media from your server'
| i18n
}}
</div>
</div>
</span>
</span>
@if (usbRemoved) {
<tui-icon icon="@tui.circle-check" class="g-positive" />
}
@@ -99,15 +99,15 @@ import { StateService } from '../services/state.service'
(click)="acknowledgeMok()"
>
<span tuiAvatar="@tui.shield-check" appearance="secondary"></span>
<div tuiTitle>
{{ 'Secure Boot Enrollment' | i18n }}
<div tuiSubtitle>
<span tuiTitle>
<b>{{ 'Secure Boot Enrollment' | i18n }}</b>
<span tuiSubtitle>
{{
'Prepare for Secure Boot key enrollment on the next reboot'
| i18n
}}
</div>
</div>
</span>
</span>
@if (mokAcknowledged) {
<tui-icon icon="@tui.circle-check" class="g-positive" />
}
@@ -126,9 +126,9 @@ import { StateService } from '../services/state.service'
(click)="reboot()"
>
<span tuiAvatar="@tui.rotate-cw" appearance="secondary"></span>
<div tuiTitle>
{{ 'Restart Server' | i18n }}
<div tuiSubtitle>
<span tuiTitle>
<b>{{ 'Restart Server' | i18n }}</b>
<span tuiSubtitle>
@if (rebooting) {
{{ 'Waiting for server to come back online' | i18n }}
} @else if (rebooted) {
@@ -136,8 +136,8 @@ import { StateService } from '../services/state.service'
} @else {
{{ 'Restart your server to complete setup' | i18n }}
}
</div>
</div>
</span>
</span>
@if (rebooting) {
<tui-loader />
} @else if (rebooted) {
@@ -147,12 +147,12 @@ import { StateService } from '../services/state.service'
} @else if (stateService.kiosk) {
<button tuiCell="l" (click)="exitKiosk()">
<span tuiAvatar="@tui.log-in" appearance="secondary"></span>
<div tuiTitle>
{{ 'Continue to Login' | i18n }}
<div tuiSubtitle>
<span tuiTitle>
<b>{{ 'Continue to Login' | i18n }}</b>
<span tuiSubtitle>
{{ 'Proceed to the StartOS login screen' | i18n }}
</div>
</div>
</span>
</span>
</button>
}
@@ -165,10 +165,10 @@ import { StateService } from '../services/state.service'
(click)="openLocalAddress()"
>
<span tuiAvatar="@tui.external-link" appearance="secondary"></span>
<div tuiTitle>
{{ 'Open Local Address' | i18n }}
<div tuiSubtitle>{{ lanAddress }}</div>
</div>
<span tuiTitle>
<b>{{ 'Open Local Address' | i18n }}</b>
<span tuiSubtitle>{{ lanAddress }}</span>
</span>
</button>
<app-documentation hidden [lanAddress]="lanAddress" />
@@ -177,12 +177,6 @@ import { StateService } from '../services/state.service'
</section>
`,
styles: `
.inline-title {
display: inline-flex;
align-items: center;
gap: 0.5rem;
}
[tuiCell].disabled {
opacity: var(--tui-disabled-opacity);
pointer-events: none;

View File

@@ -203,6 +203,7 @@ const MOCK_DISKS: DiskInfo[] = [
partitions: [],
capacity: 0,
guid: null,
filesystem: null,
},
// 10 GiB - too small for OS and data; also tests both vendor+model null
{
@@ -217,10 +218,12 @@ const MOCK_DISKS: DiskInfo[] = [
used: null,
startOs: {},
guid: null,
filesystem: null,
},
],
capacity: 10 * GiB,
guid: null,
filesystem: null,
},
// 18 GiB - exact OS boundary; tests vendor null with model present
{
@@ -235,10 +238,12 @@ const MOCK_DISKS: DiskInfo[] = [
used: null,
startOs: {},
guid: null,
filesystem: null,
},
],
capacity: 18 * GiB,
guid: null,
filesystem: null,
},
// 20 GiB - exact data boundary; tests vendor present with model null
{
@@ -253,10 +258,12 @@ const MOCK_DISKS: DiskInfo[] = [
used: null,
startOs: {},
guid: null,
filesystem: null,
},
],
capacity: 20 * GiB,
guid: null,
filesystem: null,
},
// 30 GiB - OK for OS or data alone, too small for both (< 38 GiB)
{
@@ -271,10 +278,12 @@ const MOCK_DISKS: DiskInfo[] = [
used: null,
startOs: {},
guid: null,
filesystem: null,
},
],
capacity: 30 * GiB,
guid: null,
filesystem: null,
},
// 30 GiB with existing StartOS data - tests preserve/overwrite + capacity constraint
{
@@ -298,10 +307,12 @@ const MOCK_DISKS: DiskInfo[] = [
},
},
guid: 'small-existing-guid',
filesystem: 'ext2',
},
],
capacity: 30 * GiB,
guid: 'small-existing-guid',
filesystem: 'ext2',
},
// 500 GB - large, always OK
{
@@ -316,10 +327,12 @@ const MOCK_DISKS: DiskInfo[] = [
used: null,
startOs: {},
guid: null,
filesystem: null,
},
],
capacity: 500000000000,
guid: null,
filesystem: null,
},
// 1 TB with existing StartOS data
{
@@ -343,10 +356,12 @@ const MOCK_DISKS: DiskInfo[] = [
},
},
guid: 'existing-guid',
filesystem: 'btrfs',
},
],
capacity: 1000000000000,
guid: 'existing-guid',
filesystem: 'btrfs',
},
// 2 TB
{
@@ -370,10 +385,12 @@ const MOCK_DISKS: DiskInfo[] = [
},
},
guid: null,
filesystem: null,
},
],
capacity: 2000000000000,
guid: null,
filesystem: null,
},
]

View File

@@ -72,6 +72,7 @@ export class StateService {
await this.api.attach({
guid: this.dataDriveGuid,
password: password ? await this.api.encrypt(password) : null,
kiosk: this.kiosk,
})
}
@@ -106,6 +107,7 @@ export class StateService {
name,
hostname,
recoverySource,
kiosk: this.kiosk,
})
}

View File

@@ -67,10 +67,6 @@ button:disabled {
}
}
[tuiCell]:not([tuiOption]):not(:last-of-type) {
box-shadow: 0 calc(0.5rem + 1px) 0 -0.5rem var(--tui-border-normal);
}
// TODO: Remove in Taiga v5.0
[tuiButton] {
min-block-size: var(--t-size);

View File

@@ -722,4 +722,7 @@ export default {
799: 'Nach Klick auf "Enroll MOK":',
800: 'Geben Sie bei Aufforderung Ihr StartOS-Passwort ein',
801: 'Ihr System hat Secure Boot aktiviert, was erfordert, dass alle Kernel-Module mit einem vertrauenswürdigen Schlüssel signiert sind. Einige Hardware-Treiber \u2014 wie die für NVIDIA-GPUs \u2014 sind nicht mit dem Standard-Distributionsschlüssel signiert. Die Registrierung des StartOS-Signaturschlüssels ermöglicht es Ihrer Firmware, diesen Modulen zu vertrauen, damit Ihre Hardware vollständig genutzt werden kann.',
802: 'Die Übersetzungen auf Betriebssystemebene sind bereits aktiv. Ein Neustart ist erforderlich, damit die Übersetzungen auf Dienstebene wirksam werden.',
803: 'Dieses Laufwerk verwendet ext4 und wird automatisch in btrfs konvertiert. Ein Backup wird dringend empfohlen, bevor Sie fortfahren.',
804: 'Ich habe ein Backup meiner Daten',
} satisfies i18n

View File

@@ -723,4 +723,7 @@ export const ENGLISH: Record<string, number> = {
'After clicking "Enroll MOK":': 799,
'When prompted, enter your StartOS password': 800,
'Your system has Secure Boot enabled, which requires all kernel modules to be signed with a trusted key. Some hardware drivers \u2014 such as those for NVIDIA GPUs \u2014 are not signed by the default distribution key. Enrolling the StartOS signing key allows your firmware to trust these modules so your hardware can be fully utilized.': 801,
'OS-level translations are already in effect. A restart is required for service-level translations to take effect.': 802,
'This drive uses ext4 and will be automatically converted to btrfs. A backup is strongly recommended before proceeding.': 803,
'I have a backup of my data': 804,
}

View File

@@ -722,4 +722,7 @@ export default {
799: 'Después de hacer clic en "Enroll MOK":',
800: 'Cuando se le solicite, ingrese su contraseña de StartOS',
801: 'Su sistema tiene Secure Boot habilitado, lo que requiere que todos los módulos del kernel estén firmados con una clave de confianza. Algunos controladores de hardware \u2014 como los de las GPU NVIDIA \u2014 no están firmados con la clave de distribución predeterminada. Registrar la clave de firma de StartOS permite que su firmware confíe en estos módulos para que su hardware pueda utilizarse completamente.',
802: 'Las traducciones a nivel del sistema operativo ya están en vigor. Se requiere un reinicio para que las traducciones a nivel de servicio surtan efecto.',
803: 'Esta unidad usa ext4 y se convertirá automáticamente a btrfs. Se recomienda encarecidamente hacer una copia de seguridad antes de continuar.',
804: 'Tengo una copia de seguridad de mis datos',
} satisfies i18n

View File

@@ -722,4 +722,7 @@ export default {
799: 'Après avoir cliqué sur "Enroll MOK" :',
800: 'Lorsque vous y êtes invité, entrez votre mot de passe StartOS',
801: "Votre système a Secure Boot activé, ce qui exige que tous les modules du noyau soient signés avec une clé de confiance. Certains pilotes matériels \u2014 comme ceux des GPU NVIDIA \u2014 ne sont pas signés par la clé de distribution par défaut. L'enregistrement de la clé de signature StartOS permet à votre firmware de faire confiance à ces modules afin que votre matériel puisse être pleinement utilisé.",
802: "Les traductions au niveau du système d'exploitation sont déjà en vigueur. Un redémarrage est nécessaire pour que les traductions au niveau des services prennent effet.",
803: "Ce disque utilise ext4 et sera automatiquement converti en btrfs. Il est fortement recommandé de faire une sauvegarde avant de continuer.",
804: "J'ai une sauvegarde de mes données",
} satisfies i18n

View File

@@ -722,4 +722,7 @@ export default {
799: 'Po kliknięciu "Enroll MOK":',
800: 'Po wyświetleniu monitu wprowadź swoje hasło StartOS',
801: 'Twój system ma włączony Secure Boot, co wymaga, aby wszystkie moduły jądra były podpisane zaufanym kluczem. Niektóre sterowniki sprzętowe \u2014 takie jak te dla GPU NVIDIA \u2014 nie są podpisane domyślnym kluczem dystrybucji. Zarejestrowanie klucza podpisu StartOS pozwala firmware ufać tym modułom, aby sprzęt mógł być w pełni wykorzystany.',
802: 'Tłumaczenia na poziomie systemu operacyjnego są już aktywne. Wymagane jest ponowne uruchomienie, aby tłumaczenia na poziomie usług zaczęły obowiązywać.',
803: 'Ten dysk używa ext4 i zostanie automatycznie skonwertowany na btrfs. Zdecydowanie zaleca się wykonanie kopii zapasowej przed kontynuowaniem.',
804: 'Mam kopię zapasową moich danych',
} satisfies i18n

View File

@@ -7,6 +7,7 @@ export interface DiskInfo {
partitions: PartitionInfo[]
capacity: number
guid: string | null
filesystem: string | null
}
export interface PartitionInfo {
@@ -16,6 +17,7 @@ export interface PartitionInfo {
used: number | null
startOs: Record<string, StartOSDiskInfo>
guid: string | null
filesystem: string | null
}
export type StartOSDiskInfo = {

View File

@@ -37,7 +37,7 @@ export const appConfig: ApplicationConfig = {
provideRouter(routes, withRouterConfig({ onSameUrlNavigation: 'reload' })),
provideTaiga({ mode: 'dark' }),
tuiDropdownOptionsProvider({ appearance: 'start-9' }),
tuiDialogOptionsProvider({ appearance: 'start-9 taiga' }),
tuiDialogOptionsProvider({ appearance: 'start-9 taiga', size: 's' }),
{
provide: PatchDB,
deps: [PatchDbSource, PATCH_CACHE],

View File

@@ -0,0 +1,35 @@
import { ChangeDetectionStrategy, Component, input } from '@angular/core'
import { TuiIcon } from '@taiga-ui/core'
@Component({
selector: 'app-placeholder',
template: `
@if (icon(); as icon) {
<tui-icon [icon]="icon" />
}
<ng-content />
`,
styles: `
:host {
display: flex;
flex: 1;
flex-direction: column;
gap: 0.5rem;
align-items: center;
justify-content: center;
text-align: center;
padding: 1rem;
font: var(--tui-typography-body-l);
color: var(--tui-text-tertiary);
tui-icon {
font-size: 2.5rem;
}
}
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [TuiIcon],
})
export class PlaceholderComponent {
readonly icon = input<string>()
}

View File

@@ -168,7 +168,9 @@ export class DevicesAdd {
ip,
})
this.dialogs.open(DEVICES_CONFIG, { data: config }).subscribe()
this.dialogs
.open(DEVICES_CONFIG, { data: config, closable: false })
.subscribe()
}
} catch (e: any) {
console.error(e)

View File

@@ -3,24 +3,28 @@ import {
Component,
computed,
inject,
Signal,
} from '@angular/core'
import { toSignal } from '@angular/core/rxjs-interop'
import { ErrorService } from '@start9labs/shared'
import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile'
import { TuiButton, TuiDataList, TuiDropdown } from '@taiga-ui/core'
import { TUI_CONFIRM, TuiNotificationMiddleService } from '@taiga-ui/kit'
import {
TUI_CONFIRM,
TuiNotificationMiddleService,
TuiSkeleton,
} from '@taiga-ui/kit'
import { PatchDB } from 'patch-db-client'
import { filter, map } from 'rxjs'
import { PlaceholderComponent } from 'src/app/routes/home/components/placeholder'
import { ApiService } from 'src/app/services/api/api.service'
import { TunnelData } from 'src/app/services/patch-db/data-model'
import { DEVICES_ADD } from './add'
import { DEVICES_CONFIG } from './config'
import { MappedDevice, MappedSubnet } from './utils'
import { MappedDevice } from './utils'
@Component({
template: `
<table class="g-table">
<table class="g-table" [tuiSkeleton]="!devices()">
<thead>
<tr>
<th>Name</th>
@@ -49,7 +53,11 @@ import { MappedDevice, MappedSubnet } from './utils'
iconStart="@tui.ellipsis-vertical"
>
Actions
<tui-data-list *tuiDropdown size="s">
<tui-data-list
*tuiDropdown="let close"
size="s"
(click)="close()"
>
<button
tuiOption
iconStart="@tui.pencil"
@@ -76,13 +84,23 @@ import { MappedDevice, MappedSubnet } from './utils'
</td>
</tr>
} @empty {
<div class="placeholder">No devices</div>
<tr>
<td colspan="4">
<app-placeholder icon="@tui.laptop">No devices</app-placeholder>
</td>
</tr>
}
</tbody>
</table>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [TuiButton, TuiDropdown, TuiDataList],
imports: [
TuiButton,
TuiDropdown,
TuiDataList,
PlaceholderComponent,
TuiSkeleton,
],
})
export default class Devices {
private readonly dialogs = inject(TuiResponsiveDialogService)
@@ -90,7 +108,7 @@ export default class Devices {
private readonly loading = inject(TuiNotificationMiddleService)
private readonly errorService = inject(ErrorService)
protected readonly subnets: Signal<readonly MappedSubnet[]> = toSignal(
protected readonly subnets = toSignal(
inject<PatchDB<TunnelData>>(PatchDB)
.watch$('wg', 'subnets')
.pipe(
@@ -102,11 +120,11 @@ export default class Devices {
})),
),
),
{ initialValue: [] },
{ initialValue: null },
)
protected readonly devices = computed(() =>
this.subnets().flatMap(subnet =>
this.subnets()?.flatMap(subnet =>
Object.entries(subnet.clients).map(([ip, { name }]) => ({
subnet: {
name: subnet.name,
@@ -141,7 +159,7 @@ export default class Devices {
try {
const data = await this.api.showDeviceConfig({ subnet: subnet.range, ip })
this.dialogs.open(DEVICES_CONFIG, { data }).subscribe()
this.dialogs.open(DEVICES_CONFIG, { data, closable: false }).subscribe()
} catch (e: any) {
console.log(e)
this.errorService.handleError(e)

View File

@@ -15,6 +15,7 @@ import {
TuiCheckbox,
TuiDialogContext,
TuiError,
TuiInput,
TuiNumberFormat,
} from '@taiga-ui/core'
import {
@@ -35,7 +36,7 @@ import { MappedDevice, PortForwardsData } from './utils'
<form tuiForm [formGroup]="form">
<tui-textfield>
<label tuiLabel>Label</label>
<input tuiTextfield formControlName="label" />
<input tuiInput formControlName="label" />
</tui-textfield>
<tui-error formControlName="label" />
<tui-textfield tuiChevron>
@@ -129,6 +130,7 @@ import { MappedDevice, PortForwardsData } from './utils'
TuiCheckbox,
TuiValueChanges,
TuiElasticContainer,
TuiInput,
],
})
export class PortForwardsAdd {

View File

@@ -6,12 +6,7 @@ import {
} from '@angular/forms'
import { ErrorService } from '@start9labs/shared'
import { T } from '@start9labs/start-sdk'
import {
TuiButton,
TuiDialogContext,
TuiError,
TuiTextfield,
} from '@taiga-ui/core'
import { TuiButton, TuiDialogContext, TuiError, TuiInput } from '@taiga-ui/core'
import { TuiNotificationMiddleService } from '@taiga-ui/kit'
import { TuiForm } from '@taiga-ui/layout'
import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus'
@@ -27,7 +22,7 @@ export interface EditLabelData {
<form tuiForm [formGroup]="form">
<tui-textfield>
<label tuiLabel>Label</label>
<input tuiTextfield formControlName="label" />
<input tuiInput formControlName="label" />
</tui-textfield>
<tui-error formControlName="label" />
<footer>
@@ -38,7 +33,7 @@ export interface EditLabelData {
</form>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [ReactiveFormsModule, TuiButton, TuiError, TuiTextfield, TuiForm],
imports: [ReactiveFormsModule, TuiButton, TuiError, TuiInput, TuiForm],
})
export class PortForwardsEditLabel {
private readonly api = inject(ApiService)

View File

@@ -21,10 +21,12 @@ import {
import {
TUI_CONFIRM,
TuiNotificationMiddleService,
TuiSkeleton,
TuiSwitch,
} from '@taiga-ui/kit'
import { PatchDB } from 'patch-db-client'
import { filter, map } from 'rxjs'
import { PlaceholderComponent } from 'src/app/routes/home/components/placeholder'
import { PORT_FORWARDS_ADD } from 'src/app/routes/home/routes/port-forwards/add'
import { PORT_FORWARDS_EDIT_LABEL } from 'src/app/routes/home/routes/port-forwards/edit-label'
import { ApiService } from 'src/app/services/api/api.service'
@@ -34,7 +36,7 @@ import { MappedDevice, MappedForward } from './utils'
@Component({
template: `
<table class="g-table">
<table class="g-table" [tuiSkeleton]="!portForwards()">
<thead>
<tr>
<th></th>
@@ -84,11 +86,14 @@ import { MappedDevice, MappedForward } from './utils'
iconStart="@tui.ellipsis-vertical"
>
Actions
<tui-data-list *tuiDropdown size="s">
<tui-data-list
*tuiDropdown="let close"
size="s"
(click)="close()"
>
<button
tuiOption
iconStart="@tui.pencil"
new
(click)="onEditLabel(forward)"
>
{{ forward.label ? 'Rename' : 'Add label' }}
@@ -96,7 +101,6 @@ import { MappedDevice, MappedForward } from './utils'
<button
tuiOption
iconStart="@tui.trash"
new
(click)="onDelete(forward)"
>
Delete
@@ -106,7 +110,13 @@ import { MappedDevice, MappedForward } from './utils'
</td>
</tr>
} @empty {
<div class="placeholder">No port forwards</div>
<tr>
<td colspan="7">
<app-placeholder icon="@tui.globe">
No port forwards
</app-placeholder>
</td>
</tr>
}
</tbody>
</table>
@@ -120,6 +130,8 @@ import { MappedDevice, MappedForward } from './utils'
TuiLoader,
TuiSwitch,
TuiTextfield,
PlaceholderComponent,
TuiSkeleton,
],
})
export default class PortForwards {
@@ -128,8 +140,6 @@ export default class PortForwards {
private readonly loading = inject(TuiNotificationMiddleService)
private readonly patch = inject<PatchDB<TunnelData>>(PatchDB)
private readonly errorService = inject(ErrorService)
private readonly portForwards = toSignal(this.patch.watch$('portForwards'))
private readonly ips = toSignal(
this.patch.watch$('gateways').pipe(
map(g =>
@@ -157,6 +167,7 @@ export default class PortForwards {
{ initialValue: [] },
)
protected readonly portForwards = toSignal(this.patch.watch$('portForwards'))
protected readonly forwards = computed(() =>
Object.entries(this.portForwards() || {}).map(([source, entry]) => {
const sourceSplit = source.split(':')

View File

@@ -3,9 +3,14 @@ import { toSignal } from '@angular/core/rxjs-interop'
import { utils } from '@start9labs/start-sdk'
import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile'
import { TuiButton, TuiDataList, TuiDropdown } from '@taiga-ui/core'
import { TUI_CONFIRM, TuiNotificationMiddleService } from '@taiga-ui/kit'
import {
TUI_CONFIRM,
TuiNotificationMiddleService,
TuiSkeleton,
} from '@taiga-ui/kit'
import { PatchDB } from 'patch-db-client'
import { filter, map } from 'rxjs'
import { PlaceholderComponent } from 'src/app/routes/home/components/placeholder'
import { ApiService } from 'src/app/services/api/api.service'
import { TunnelData } from 'src/app/services/patch-db/data-model'
@@ -13,7 +18,7 @@ import { SUBNETS_ADD } from './add'
@Component({
template: `
<table class="g-table">
<table class="g-table" [tuiSkeleton]="!subnets()">
<thead>
<tr>
<th>Name</th>
@@ -40,7 +45,11 @@ import { SUBNETS_ADD } from './add'
iconStart="@tui.ellipsis-vertical"
>
Actions
<tui-data-list *tuiDropdown size="s">
<tui-data-list
*tuiDropdown="let close"
size="s"
(click)="close()"
>
<button
tuiOption
iconStart="@tui.pencil"
@@ -60,20 +69,30 @@ import { SUBNETS_ADD } from './add'
</td>
</tr>
} @empty {
<div class="placeholder">No subnets</div>
<tr>
<td colspan="3">
<app-placeholder icon="@tui.network">No subnets</app-placeholder>
</td>
</tr>
}
</tbody>
</table>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [TuiButton, TuiDropdown, TuiDataList],
imports: [
TuiButton,
TuiDropdown,
TuiDataList,
PlaceholderComponent,
TuiSkeleton,
],
})
export default class Subnets {
private readonly dialogs = inject(TuiResponsiveDialogService)
private readonly api = inject(ApiService)
private readonly loading = inject(TuiNotificationMiddleService)
protected readonly subnets = toSignal<MappedSubnet[], []>(
protected readonly subnets = toSignal(
inject<PatchDB<TunnelData>>(PatchDB)
.watch$('wg', 'subnets')
.pipe(
@@ -85,7 +104,7 @@ export default class Subnets {
})),
),
),
{ initialValue: [] },
{ initialValue: null },
)
protected onAdd(): void {
@@ -111,7 +130,7 @@ export default class Subnets {
.open(TUI_CONFIRM, { label: 'Are you sure?' })
.pipe(filter(Boolean))
.subscribe(async () => {
const subnet = this.subnets()[index]?.range || ''
const subnet = this.subnets()?.[index]?.range || ''
const loader = this.loading.open('').subscribe()
try {
@@ -125,13 +144,13 @@ export default class Subnets {
}
private getNext(): string {
const current = this.subnets().map(s => utils.IpNet.parse(s.range))
const current = this.subnets()?.map(s => utils.IpNet.parse(s.range))
const suggestion = utils.IpNet.parse('10.59.0.1/24')
for (let i = 0; i < 256; i++) {
suggestion.octets[2] = Math.floor(Math.random() * 256)
if (
!current.some(
!current?.some(
s => s.contains(suggestion), // inverse check unnecessary since we don't allow subnets smaller than /24
)
) {

View File

@@ -1,9 +1,8 @@
import { Component, computed, inject, Injectable, signal } from '@angular/core'
import { computed, inject, Injectable, signal } from '@angular/core'
import { toObservable } from '@angular/core/rxjs-interop'
import { ErrorService } from '@start9labs/shared'
import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile'
import { TuiLoader } from '@taiga-ui/core'
import { PolymorpheusComponent } from '@taiga-ui/polymorpheus'
import { T } from '@start9labs/start-sdk'
import { TuiNotificationMiddleService } from '@taiga-ui/kit'
import {
catchError,
EMPTY,
@@ -14,25 +13,16 @@ import {
switchMap,
takeWhile,
} from 'rxjs'
import { T } from '@start9labs/start-sdk'
import { ApiService } from './api/api.service'
import { AuthService } from './auth.service'
@Component({
template: '<tui-loader size="xl" [textContent]="text" />',
imports: [TuiLoader],
})
class UpdatingDialog {
protected readonly text = 'StartTunnel is updating...'
}
@Injectable({
providedIn: 'root',
})
export class UpdateService {
private readonly api = inject(ApiService)
private readonly auth = inject(AuthService)
private readonly dialogs = inject(TuiResponsiveDialogService)
private readonly loading = inject(TuiNotificationMiddleService)
private readonly errorService = inject(ErrorService)
readonly result = signal<T.Tunnel.TunnelUpdateResult | null>(null)
@@ -106,11 +96,8 @@ export class UpdateService {
private showUpdatingDialog(): void {
if (this.updatingDialog) return
this.updatingDialog = this.dialogs
.open(new PolymorpheusComponent(UpdatingDialog), {
closable: false,
dismissible: false,
})
this.updatingDialog = this.loading
.open('StartTunnel is updating...')
.subscribe({ complete: () => (this.updatingDialog = null) })
}

View File

@@ -66,6 +66,10 @@ tui-notification-middle {
background: var(--tui-background-neutral-1);
box-shadow: inset 0 0 0 1px var(--tui-background-neutral-1);
&:has(app-placeholder) thead {
display: none;
}
thead tr {
position: sticky;
top: 0;
@@ -93,12 +97,6 @@ tui-notification-middle {
}
}
.placeholder {
padding: 1rem;
font: var(--tui-font-text-l);
color: var(--tui-text-tertiary);
}
qr-code {
display: flex;
justify-content: center;
@@ -107,3 +105,21 @@ qr-code {
tui-data-list {
--tui-text-action: var(--tui-text-primary);
}
[tuiTheme='dark'] tui-notification-middle[style] {
&.tui-enter,
&.tui-leave {
--tui-scale: 0;
animation-name: tuiScale;
}
&::before {
background: var(--tui-background-neutral-1);
backdrop-filter: blur(1rem);
box-shadow: inset 0 1px 1px var(--tui-background-neutral-2);
}
tui-loader svg {
stroke: white;
}
}

View File

@@ -7,7 +7,7 @@ import { TuiNotification } from '@taiga-ui/core'
template: `
<div
tuiNotification
[appearance]="status || 'warning'"
[appearance]="appearance"
icon=""
class="notification-wrapper"
>
@@ -78,4 +78,15 @@ export class MarketplaceNotificationComponent {
return null
}
get appearance() {
switch (this.status) {
case 'success':
return 'positive'
case 'error':
return 'negative'
default:
return 'info'
}
}
}

View File

@@ -101,7 +101,7 @@ export class ServiceHealthCheckComponent {
case 'starting':
return this.i18n.transform('Starting')!
case 'success':
return `${this.i18n.transform('Success')}: ${message || 'health check passing'}`
return message || this.i18n.transform('Success')!
case 'waiting':
return message
? `${this.i18n.transform('Waiting on')} ${message}...`

View File

@@ -98,6 +98,7 @@ import { getManifest } from 'src/app/utils/get-package-data'
padding-top: 1rem;
border-radius: 0;
cursor: pointer;
overflow: hidden;
box-shadow: 0 -1px rgba(255, 255, 255, 0.1);
}

View File

@@ -306,6 +306,7 @@ export default class SystemGeneralComponent {
onLanguageChange(language: Language) {
this.i18nService.setLang(language.name)
this.promptLanguageRestart()
}
// Expose shared utilities for template use
@@ -562,6 +563,21 @@ export default class SystemGeneralComponent {
.subscribe(() => this.restart())
}
private promptLanguageRestart() {
this.dialog
.openConfirm({
label: 'Restart to apply',
data: {
content:
'OS-level translations are already in effect. A restart is required for service-level translations to take effect.',
yes: 'Restart now',
no: 'Later',
},
})
.pipe(filter(Boolean))
.subscribe(() => this.restart())
}
private update() {
this.dialogs
.open(UPDATE, {

View File

@@ -171,8 +171,7 @@ ul {
.g-table {
width: stretch;
border: 1px solid var(--tui-background-neutral-1);
border-spacing: 0;
border-collapse: collapse;
border-collapse: collapse !important;
border-radius: var(--tui-radius-s);
overflow: hidden;
box-shadow: inset 0 0 0 1px var(--tui-background-neutral-1);
@@ -248,6 +247,10 @@ ul {
display: none;
}
&:has(app-placeholder) thead {
display: none;
}
tr:not(:last-child) {
box-shadow: inset 0 -1px var(--tui-background-neutral-1);
}